hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc8e0795beca857fd768bebf12bbfc80d2f36028
| 1,506
|
py
|
Python
|
core/storage/question/gae_models_test.py
|
Panda2498/oppia
|
fccfd7e89c6904c244deaccdee80cc5658f2520a
|
[
"Apache-2.0"
] | null | null | null |
core/storage/question/gae_models_test.py
|
Panda2498/oppia
|
fccfd7e89c6904c244deaccdee80cc5658f2520a
|
[
"Apache-2.0"
] | 1
|
2020-01-26T14:02:43.000Z
|
2020-01-26T14:02:43.000Z
|
core/storage/question/gae_models_test.py
|
ryanboris/oppia
|
bc39e54e00d53ea2f00bca906fe02162d0c422ac
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.domain import exp_domain
from core.platform import models
from core.tests import test_utils
(question_models,) = models.Registry.import_models([models.NAMES.question])
class QuestionModelUnitTests(test_utils.GenericTestBase):
"""Tests the QuestionModel class."""
def test_create_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_data_schema_version = 1
language_code = 'en'
question_model = question_models.QuestionModel.create(
question_data, question_data_schema_version,
language_code)
self.assertEqual(question_model.question_data, question_data)
self.assertEqual(
question_model.question_data_schema_version,
question_data_schema_version)
self.assertEqual(question_model.language_code, language_code)
| 38.615385
| 75
| 0.745684
|
774a4ac0a5f1d65bd8288387389a7b937611ba55
| 913
|
py
|
Python
|
metasdk/examples/media_api/set_auth_user_id.py
|
devision-io/metasdk
|
e3340ab4c62a9f6c7317d768bc12ea5ceff01275
|
[
"MIT"
] | 1
|
2020-08-07T02:52:14.000Z
|
2020-08-07T02:52:14.000Z
|
metasdk/examples/media_api/set_auth_user_id.py
|
devision-io/metasdk
|
e3340ab4c62a9f6c7317d768bc12ea5ceff01275
|
[
"MIT"
] | 2
|
2019-10-22T09:50:30.000Z
|
2020-08-05T14:22:50.000Z
|
metasdk/examples/media_api/set_auth_user_id.py
|
devision-io/metasdk
|
e3340ab4c62a9f6c7317d768bc12ea5ceff01275
|
[
"MIT"
] | 1
|
2020-08-07T02:58:45.000Z
|
2020-08-07T02:58:45.000Z
|
import base64
from metasdk import MetaApp
from metasdk.utils import pretty_json
META = MetaApp(meta_url="http://localhost:8080")
log = META.log
#
# Вы можете установить ID пользователя, от лица которого будут работать запросы
# Это полезно, когда вам надо сгенерировать приватный файл в фоновом режиме.
# Это user_id вы можете передать и прочитать из поля data в task
#
META.auth_user_id = 3503
YOUR_FILE_CONTENT_BASE64 = base64.b64encode(b'Custom user file').decode("utf-8")
# Получаете инстанс сервиса и делаете запрос к нему
result = META.MediaService.persist_one(
file_base64_content=YOUR_FILE_CONTENT_BASE64,
filename="req.txt",
extension="txt",
mime="plain/text"
)
print(u"result = %s" % result)
# Формат ответа стандартный для меты
first = result['rows'][0]
print(u"result['rows'][0]['url'] = %s" % first['url'])
print(u"first = %s" % first)
print(u"result = %s" % pretty_json(result))
| 30.433333
| 80
| 0.740416
|
f542c4199f005244f68e0965ad2eeaba96db7587
| 2,073
|
py
|
Python
|
train_exp.py
|
chaiyujin/AudioDVP
|
1b7a6bc85bda6df16c9709d08d7b1415b449c584
|
[
"MIT"
] | 200
|
2020-11-14T16:23:11.000Z
|
2022-03-31T17:40:37.000Z
|
train_exp.py
|
chaiyujin/AudioDVP
|
1b7a6bc85bda6df16c9709d08d7b1415b449c584
|
[
"MIT"
] | 36
|
2020-11-15T14:17:51.000Z
|
2022-01-04T08:22:43.000Z
|
train_exp.py
|
chaiyujin/AudioDVP
|
1b7a6bc85bda6df16c9709d08d7b1415b449c584
|
[
"MIT"
] | 42
|
2020-11-14T16:29:18.000Z
|
2022-03-20T01:16:39.000Z
|
import time
from options.options import Options
from models import audio_expression_model
from datasets import create_dataset
from utils.visualizer import Visualizer
if __name__ == '__main__':
opt = Options().parse_args() # get training options
dataset = create_dataset(opt)
model = audio_expression_model.AudioExpressionModel(opt)
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
total_iters = 0
for epoch in range(opt.num_epoch):
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
visualizer.display_current_results(model.get_current_visuals(), total_iters)
if total_iters % opt.print_freq == 0: # print training losses
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
visualizer.plot_current_losses(total_iters, losses)
iter_data_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.num_epoch, time.time() - epoch_start_time))
model.save_network()
| 39.865385
| 116
| 0.666184
|
9051a1a1ca678660821931e07eeb266ed868bac0
| 634
|
py
|
Python
|
consumeraffairs/reviews/urls.py
|
sqyttles/Django-Backend-Test
|
c46d5b2d8a1f98bf3ec69524ab7a2e344514e538
|
[
"MIT"
] | null | null | null |
consumeraffairs/reviews/urls.py
|
sqyttles/Django-Backend-Test
|
c46d5b2d8a1f98bf3ec69524ab7a2e344514e538
|
[
"MIT"
] | null | null | null |
consumeraffairs/reviews/urls.py
|
sqyttles/Django-Backend-Test
|
c46d5b2d8a1f98bf3ec69524ab7a2e344514e538
|
[
"MIT"
] | null | null | null |
from django.urls import path
from consumeraffairs.reviews.views import (
review_list_view, review_redirect_view, review_create_view,
review_detail_view, review_update_view, review_delete_view)
app_name = "reviews"
urlpatterns = [
path("", view=review_list_view, name="list"),
path("~redirect/", view=review_redirect_view, name="redirect"),
path("create/", view=review_create_view, name="create"),
path("<slug:title>", view=review_detail_view, name="detail"),
path("<slug:title>/update/", view=review_update_view, name="update"),
path("<slug:title>/delete/", view=review_delete_view, name="delete")
]
| 39.625
| 73
| 0.731861
|
3a0dcc37a47120f0deefaeda94e5bd31fc33c0ef
| 2,032
|
py
|
Python
|
edx_rest_framework_extensions/auth/jwt/tests/test_cookies.py
|
Jawayria/edx-drf-extensions
|
ff138f24f1695f97988356c46fc2f4c4774019a1
|
[
"Apache-2.0"
] | null | null | null |
edx_rest_framework_extensions/auth/jwt/tests/test_cookies.py
|
Jawayria/edx-drf-extensions
|
ff138f24f1695f97988356c46fc2f4c4774019a1
|
[
"Apache-2.0"
] | null | null | null |
edx_rest_framework_extensions/auth/jwt/tests/test_cookies.py
|
Jawayria/edx-drf-extensions
|
ff138f24f1695f97988356c46fc2f4c4774019a1
|
[
"Apache-2.0"
] | null | null | null |
"""
Unit tests for jwt cookies module.
"""
import ddt
import mock
from django.test import TestCase, override_settings
from edx_rest_framework_extensions.auth.jwt.decoder import jwt_decode_handler
from edx_rest_framework_extensions.auth.jwt.tests.utils import (
generate_jwt_token,
generate_latest_version_payload,
)
from edx_rest_framework_extensions.tests.factories import UserFactory
from .. import cookies
@ddt.ddt
class TestJwtAuthCookies(TestCase):
@ddt.data(
(cookies.jwt_cookie_name, 'JWT_AUTH_COOKIE', 'custom-jwt-cookie-name'),
(cookies.jwt_cookie_header_payload_name, 'JWT_AUTH_COOKIE_HEADER_PAYLOAD', 'custom-jwt-header-payload-name'),
(cookies.jwt_cookie_signature_name, 'JWT_AUTH_COOKIE_SIGNATURE', 'custom-jwt-signature-name'),
)
@ddt.unpack
def test_get_setting_value(self, jwt_cookie_func, setting_name, setting_value):
with override_settings(JWT_AUTH={setting_name: setting_value}):
self.assertEqual(jwt_cookie_func(), setting_value)
@ddt.data(
(cookies.jwt_cookie_name, 'edx-jwt-cookie'),
(cookies.jwt_cookie_header_payload_name, 'edx-jwt-cookie-header-payload'),
(cookies.jwt_cookie_signature_name, 'edx-jwt-cookie-signature'),
)
@ddt.unpack
def test_get_default_value(self, jwt_cookie_func, expected_default_value):
self.assertEqual(jwt_cookie_func(), expected_default_value)
def test_get_decoded_jwt_from_existing_cookie(self):
user = UserFactory()
payload = generate_latest_version_payload(user)
jwt = generate_jwt_token(payload)
expected_decoded_jwt = jwt_decode_handler(jwt)
mock_request_with_cookie = mock.Mock(COOKIES={'edx-jwt-cookie': jwt})
decoded_jwt = cookies.get_decoded_jwt(mock_request_with_cookie)
self.assertEqual(expected_decoded_jwt, decoded_jwt)
def test_get_decoded_jwt_when_no_cookie(self):
mock_request = mock.Mock(COOKIES={})
self.assertIsNone(cookies.get_decoded_jwt(mock_request))
| 37.62963
| 117
| 0.752461
|
c3a68580661274146b3a0eeabd7858694aff0617
| 309
|
py
|
Python
|
test/solution_tests/SUM/test_sum.py
|
DPNT-Sourcecode/CHK-cylq01
|
a8ca9e25b3577370ebf0d35da8298ee523f8e46b
|
[
"Apache-2.0"
] | null | null | null |
test/solution_tests/SUM/test_sum.py
|
DPNT-Sourcecode/CHK-cylq01
|
a8ca9e25b3577370ebf0d35da8298ee523f8e46b
|
[
"Apache-2.0"
] | null | null | null |
test/solution_tests/SUM/test_sum.py
|
DPNT-Sourcecode/CHK-cylq01
|
a8ca9e25b3577370ebf0d35da8298ee523f8e46b
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from solutions.SUM import sum_solution
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum_solution.compute(1, 2), 3)
def test_notequal(self):
self.assertNotEqual(sum_solution.compute(1,1), 3)
if __name__ == '__main__':
unittest.main()
| 19.3125
| 57
| 0.705502
|
3a4c1ca03c28b85d63f798781fe1ad8421f4f045
| 386
|
py
|
Python
|
tasks/__init__.py
|
ttzhou/poetry-workspace-plugin
|
078d8ab323b0d33e0b542cac93ae7144bace87e8
|
[
"MIT"
] | 46
|
2021-08-10T09:30:58.000Z
|
2022-03-31T16:18:46.000Z
|
tasks/__init__.py
|
ttzhou/poetry-workspace-plugin
|
078d8ab323b0d33e0b542cac93ae7144bace87e8
|
[
"MIT"
] | 1
|
2021-12-07T11:00:02.000Z
|
2021-12-07T11:29:19.000Z
|
tasks/__init__.py
|
ttzhou/poetry-workspace-plugin
|
078d8ab323b0d33e0b542cac93ae7144bace87e8
|
[
"MIT"
] | 4
|
2021-08-18T02:30:18.000Z
|
2022-02-11T23:23:58.000Z
|
from invoke import Collection
from tasks.changelog_check import changelog_check
from tasks.lint import lint
from tasks.release import build, release
from tasks.test import coverage, test
from tasks.typecheck import typecheck
from tasks.verify import verify
namespace = Collection(
build,
changelog_check,
coverage,
lint,
release,test,
typecheck,
verify,
)
| 20.315789
| 49
| 0.766839
|
103549d200e973e501c14eb496e73d2d80c9a6ee
| 2,428
|
py
|
Python
|
examples/pipeline/hetero_feature_binning/pipeline-hetero-binning-bucket-binning.py
|
qixiuai/FATE
|
6d50af65b96b5b226afda30dfa8e4a1e5746952d
|
[
"Apache-2.0"
] | null | null | null |
examples/pipeline/hetero_feature_binning/pipeline-hetero-binning-bucket-binning.py
|
qixiuai/FATE
|
6d50af65b96b5b226afda30dfa8e4a1e5746952d
|
[
"Apache-2.0"
] | null | null | null |
examples/pipeline/hetero_feature_binning/pipeline-hetero-binning-bucket-binning.py
|
qixiuai/FATE
|
6d50af65b96b5b226afda30dfa8e4a1e5746952d
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_feature_binning import common_tools
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
backend = config.backend
work_mode = config.work_mode
param = {
"name": "hetero_feature_binning_0",
"method": "bucket",
"optimal_binning_param": {
"metric_method": "iv",
"min_bin_pct": 0.05,
"max_bin_pct": 0.8,
"init_bucket_method": "quantile",
"init_bin_nums": 100,
"mixture": True
},
"compress_thres": 10000,
"head_size": 10000,
"error": 0.001,
"bin_num": 10,
"bin_indexes": -1,
"bin_names": None,
"category_indexes": None,
"category_names": None,
"adjustment_factor": 0.5,
"local_only": False,
"transform_param": {
"transform_cols": -1,
"transform_names": None,
"transform_type": "bin_num"
}
}
pipeline = common_tools.make_normal_dsl(config, namespace, param)
pipeline.fit(backend=backend, work_mode=work_mode)
# common_tools.prettify(pipeline.get_component("hetero_feature_binning_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 31.128205
| 93
| 0.644152
|
6f3060ee6d617c6c01ac1ea3bcb011c915657ba2
| 13,876
|
py
|
Python
|
kubernetes/client/models/v1_api_resource.py
|
palnabarun/Python
|
6b01c95e1673c0787d3d688b361bfd995d62dd98
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_api_resource.py
|
palnabarun/Python
|
6b01c95e1673c0787d3d688b361bfd995d62dd98
|
[
"Apache-2.0"
] | 7
|
2020-11-07T10:35:21.000Z
|
2022-02-07T03:06:25.000Z
|
kubernetes/client/models/v1_api_resource.py
|
palnabarun/python
|
6b01c95e1673c0787d3d688b361bfd995d62dd98
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1APIResource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'categories': 'list[str]',
'group': 'str',
'kind': 'str',
'name': 'str',
'namespaced': 'bool',
'short_names': 'list[str]',
'singular_name': 'str',
'storage_version_hash': 'str',
'verbs': 'list[str]',
'version': 'str'
}
attribute_map = {
'categories': 'categories',
'group': 'group',
'kind': 'kind',
'name': 'name',
'namespaced': 'namespaced',
'short_names': 'shortNames',
'singular_name': 'singularName',
'storage_version_hash': 'storageVersionHash',
'verbs': 'verbs',
'version': 'version'
}
def __init__(self, categories=None, group=None, kind=None, name=None, namespaced=None, short_names=None, singular_name=None, storage_version_hash=None, verbs=None, version=None, local_vars_configuration=None): # noqa: E501
"""V1APIResource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._categories = None
self._group = None
self._kind = None
self._name = None
self._namespaced = None
self._short_names = None
self._singular_name = None
self._storage_version_hash = None
self._verbs = None
self._version = None
self.discriminator = None
if categories is not None:
self.categories = categories
if group is not None:
self.group = group
self.kind = kind
self.name = name
self.namespaced = namespaced
if short_names is not None:
self.short_names = short_names
self.singular_name = singular_name
if storage_version_hash is not None:
self.storage_version_hash = storage_version_hash
self.verbs = verbs
if version is not None:
self.version = version
@property
def categories(self):
"""Gets the categories of this V1APIResource. # noqa: E501
categories is a list of the grouped resources this resource belongs to (e.g. 'all') # noqa: E501
:return: The categories of this V1APIResource. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this V1APIResource.
categories is a list of the grouped resources this resource belongs to (e.g. 'all') # noqa: E501
:param categories: The categories of this V1APIResource. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def group(self):
"""Gets the group of this V1APIResource. # noqa: E501
group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\". # noqa: E501
:return: The group of this V1APIResource. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1APIResource.
group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\". # noqa: E501
:param group: The group of this V1APIResource. # noqa: E501
:type: str
"""
self._group = group
@property
def kind(self):
"""Gets the kind of this V1APIResource. # noqa: E501
kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') # noqa: E501
:return: The kind of this V1APIResource. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1APIResource.
kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') # noqa: E501
:param kind: The kind of this V1APIResource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this V1APIResource. # noqa: E501
name is the plural name of the resource. # noqa: E501
:return: The name of this V1APIResource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1APIResource.
name is the plural name of the resource. # noqa: E501
:param name: The name of this V1APIResource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespaced(self):
"""Gets the namespaced of this V1APIResource. # noqa: E501
namespaced indicates if a resource is namespaced or not. # noqa: E501
:return: The namespaced of this V1APIResource. # noqa: E501
:rtype: bool
"""
return self._namespaced
@namespaced.setter
def namespaced(self, namespaced):
"""Sets the namespaced of this V1APIResource.
namespaced indicates if a resource is namespaced or not. # noqa: E501
:param namespaced: The namespaced of this V1APIResource. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and namespaced is None: # noqa: E501
raise ValueError("Invalid value for `namespaced`, must not be `None`") # noqa: E501
self._namespaced = namespaced
@property
def short_names(self):
"""Gets the short_names of this V1APIResource. # noqa: E501
shortNames is a list of suggested short names of the resource. # noqa: E501
:return: The short_names of this V1APIResource. # noqa: E501
:rtype: list[str]
"""
return self._short_names
@short_names.setter
def short_names(self, short_names):
"""Sets the short_names of this V1APIResource.
shortNames is a list of suggested short names of the resource. # noqa: E501
:param short_names: The short_names of this V1APIResource. # noqa: E501
:type: list[str]
"""
self._short_names = short_names
@property
def singular_name(self):
"""Gets the singular_name of this V1APIResource. # noqa: E501
singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface. # noqa: E501
:return: The singular_name of this V1APIResource. # noqa: E501
:rtype: str
"""
return self._singular_name
@singular_name.setter
def singular_name(self, singular_name):
"""Sets the singular_name of this V1APIResource.
singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface. # noqa: E501
:param singular_name: The singular_name of this V1APIResource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and singular_name is None: # noqa: E501
raise ValueError("Invalid value for `singular_name`, must not be `None`") # noqa: E501
self._singular_name = singular_name
@property
def storage_version_hash(self):
"""Gets the storage_version_hash of this V1APIResource. # noqa: E501
The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates. # noqa: E501
:return: The storage_version_hash of this V1APIResource. # noqa: E501
:rtype: str
"""
return self._storage_version_hash
@storage_version_hash.setter
def storage_version_hash(self, storage_version_hash):
"""Sets the storage_version_hash of this V1APIResource.
The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates. # noqa: E501
:param storage_version_hash: The storage_version_hash of this V1APIResource. # noqa: E501
:type: str
"""
self._storage_version_hash = storage_version_hash
@property
def verbs(self):
"""Gets the verbs of this V1APIResource. # noqa: E501
verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy) # noqa: E501
:return: The verbs of this V1APIResource. # noqa: E501
:rtype: list[str]
"""
return self._verbs
@verbs.setter
def verbs(self, verbs):
"""Sets the verbs of this V1APIResource.
verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy) # noqa: E501
:param verbs: The verbs of this V1APIResource. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
self._verbs = verbs
@property
def version(self):
"""Gets the version of this V1APIResource. # noqa: E501
version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\". # noqa: E501
:return: The version of this V1APIResource. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this V1APIResource.
version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\". # noqa: E501
:param version: The version of this V1APIResource. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1APIResource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1APIResource):
return True
return self.to_dict() != other.to_dict()
| 36.515789
| 445
| 0.634765
|
d386e5b8811179974e6f5072947f5f0c653e4d4e
| 254
|
py
|
Python
|
jalali_test/manage.py
|
ajahansh/django-jalali
|
5be8845f500fc18d96d0b624f74b4e83161c4645
|
[
"BSD-3-Clause"
] | null | null | null |
jalali_test/manage.py
|
ajahansh/django-jalali
|
5be8845f500fc18d96d0b624f74b4e83161c4645
|
[
"BSD-3-Clause"
] | null | null | null |
jalali_test/manage.py
|
ajahansh/django-jalali
|
5be8845f500fc18d96d0b624f74b4e83161c4645
|
[
"BSD-3-Clause"
] | 1
|
2018-11-21T14:31:52.000Z
|
2018-11-21T14:31:52.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jalali_test.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.090909
| 75
| 0.775591
|
45a9b868ca14f4f48414c003dff9c395080c61d9
| 41,198
|
py
|
Python
|
reinforcement_learning/common/policies.py
|
mizolotu/DonkeyCarExperiments
|
3d6be742915efe51c0f5abda4c69a4349a555373
|
[
"MIT"
] | null | null | null |
reinforcement_learning/common/policies.py
|
mizolotu/DonkeyCarExperiments
|
3d6be742915efe51c0f5abda4c69a4349a555373
|
[
"MIT"
] | null | null | null |
reinforcement_learning/common/policies.py
|
mizolotu/DonkeyCarExperiments
|
3d6be742915efe51c0f5abda4c69a4349a555373
|
[
"MIT"
] | null | null | null |
import warnings
from itertools import zip_longest
from abc import ABC, abstractmethod
import numpy as np
import tensorflow as tf
from reinforcement_learning.gym.spaces import Discrete
from reinforcement_learning.common.tf_util import batch_to_seq, seq_to_batch
from reinforcement_learning.common.tf_layers import conv, linear, conv_to_fc, lstm, mlp
from reinforcement_learning.common.distributions import make_proba_dist_type, CategoricalProbabilityDistribution, MultiCategoricalProbabilityDistribution, DiagGaussianProbabilityDistribution, BernoulliProbabilityDistribution
from reinforcement_learning.common.input import observation_input
def nature_cnn(scaled_images, **kwargs):
"""
CNN from Nature paper.
:param scaled_images: (TensorFlow Tensor) Image input placeholder
:param kwargs: (dict) Extra keywords parameters for the convolutional layers of the CNN
:return: (TensorFlow Tensor) The CNN output layer
"""
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=8, stride=4, init_scale=np.sqrt(2), **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=4, stride=2, init_scale=np.sqrt(2), **kwargs))
layer_3 = activ(conv(layer_2, 'c3', n_filters=64, filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs))
layer_3 = conv_to_fc(layer_3)
return activ(linear(layer_3, 'fc1', n_hidden=512, init_scale=np.sqrt(2)))
def mlp_extractor(flat_observations, net_arch, act_fun, batch_norm=True):
"""
Constructs an MLP that receives observations as an input and outputs a latent representation for the policy and
a value network. The ``net_arch`` parameter allows to specify the amount and size of the hidden layers and how many
of them are shared between the policy network and the value network. It is assumed to be a list with the following
structure:
1. An arbitrary length (zero allowed) number of integers each specifying the number of units in a shared layer.
If the number of ints is zero, there will be no shared layers.
2. An optional dict, to specify the following non-shared layers for the value network and the policy network.
It is formatted like ``dict(vf=[<value layer sizes>], pi=[<policy layer sizes>])``.
If it is missing any of the keys (pi or vf), no non-shared layers (empty list) is assumed.
For example to construct a network with one shared layer of size 55 followed by two non-shared layers for the value
network of size 255 and a single non-shared layer of size 128 for the policy network, the following layers_spec
would be used: ``[55, dict(vf=[255, 255], pi=[128])]``. A simple shared network topology with two layers of size 128
would be specified as [128, 128].
:param flat_observations: (tf.Tensor) The observations to base policy and value function on.
:param net_arch: ([int or dict]) The specification of the policy and value networks.
See above for details on its formatting.
:param act_fun: (tf function) The activation function to use for the networks.
:return: (tf.Tensor, tf.Tensor) latent_policy, latent_value of the specified network.
If all layers are shared, then ``latent_policy == latent_value``
"""
latent = flat_observations
policy_only_layers = [] # Layer sizes of the network that only belongs to the policy network
value_only_layers = [] # Layer sizes of the network that only belongs to the value network
# Iterate through the shared layers and build the shared parts of the network
for idx, layer in enumerate(net_arch):
if isinstance(layer, int): # Check that this is a shared layer
layer_size = layer
latent = act_fun(linear(latent, "shared_fc{}".format(idx), layer_size, init_scale=np.sqrt(2)))
if batch_norm:
latent = tf.compat.v1.layers.batch_normalization(latent)
else:
assert isinstance(layer, dict), "Error: the net_arch list can only contain ints and dicts"
if 'pi' in layer:
assert isinstance(layer['pi'], list), "Error: net_arch[-1]['pi'] must contain a list of integers."
policy_only_layers = layer['pi']
if 'vf' in layer:
assert isinstance(layer['vf'], list), "Error: net_arch[-1]['vf'] must contain a list of integers."
value_only_layers = layer['vf']
break # From here on the network splits up in policy and value network
# Build the non-shared part of the network
latent_policy = latent
latent_value = latent
for idx, (pi_layer_size, vf_layer_size) in enumerate(zip_longest(policy_only_layers, value_only_layers)):
if pi_layer_size is not None:
assert isinstance(pi_layer_size, int), "Error: net_arch[-1]['pi'] must only contain integers."
latent_policy = act_fun(linear(latent_policy, "pi_fc{}".format(idx), pi_layer_size, init_scale=np.sqrt(2)))
if vf_layer_size is not None:
assert isinstance(vf_layer_size, int), "Error: net_arch[-1]['vf'] must only contain integers."
latent_value = act_fun(linear(latent_value, "vf_fc{}".format(idx), vf_layer_size, init_scale=np.sqrt(2)))
return latent_policy, latent_value
class BasePolicy(ABC):
"""
The base policy object
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batches to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param scale: (bool) whether or not to scale the input
:param obs_phs: (TensorFlow Tensor, TensorFlow Tensor) a tuple containing an override for observation placeholder
and the processed observation placeholder respectively
:param add_action_ph: (bool) whether or not to create an action placeholder
"""
recurrent = False
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, scale=False,
obs_phs=None, add_action_ph=False):
self.n_env = n_env
self.n_steps = n_steps
self.n_batch = n_batch
with tf.compat.v1.variable_scope("input", reuse=False):
if obs_phs is None:
self._obs_ph, self._processed_obs = observation_input(ob_space, n_batch, scale=scale)
else:
self._obs_ph, self._processed_obs = obs_phs
self._action_ph = None
if add_action_ph:
self._action_ph = tf.compat.v1.placeholder(dtype=ac_space.dtype, shape=(n_batch,) + ac_space.shape,
name="action_ph")
self.sess = sess
self.reuse = reuse
self.ob_space = ob_space
self.ac_space = ac_space
@property
def is_discrete(self):
"""bool: is action space discrete."""
return isinstance(self.ac_space, Discrete)
@property
def initial_state(self):
"""
The initial state of the policy. For feedforward policies, None. For a recurrent policy,
a NumPy array of shape (self.n_env, ) + state_shape.
"""
assert not self.recurrent, "When using recurrent policies, you must overwrite `initial_state()` method"
return None
@property
def obs_ph(self):
"""tf.Tensor: placeholder for observations, shape (self.n_batch, ) + self.ob_space.shape."""
return self._obs_ph
@property
def processed_obs(self):
"""tf.Tensor: processed observations, shape (self.n_batch, ) + self.ob_space.shape.
The form of processing depends on the type of the observation space, and the parameters
whether scale is passed to the constructor; see observation_input for more information."""
return self._processed_obs
@property
def action_ph(self):
"""tf.Tensor: placeholder for actions, shape (self.n_batch, ) + self.ac_space.shape."""
return self._action_ph
@staticmethod
def _kwargs_check(feature_extraction, kwargs):
"""
Ensure that the user is not passing wrong keywords
when using policy_kwargs.
:param feature_extraction: (str)
:param kwargs: (dict)
"""
# When using policy_kwargs parameter on model creation,
# all keywords arguments must be consumed by the policy constructor except
# the ones for the cnn_extractor network (cf nature_cnn()), where the keywords arguments
# are not passed explicitly (using **kwargs to forward the arguments)
# that's why there should be not kwargs left when using the mlp_extractor
# (in that case the keywords arguments are passed explicitly)
if feature_extraction == 'mlp' and len(kwargs) > 0:
raise ValueError("Unknown keywords for policy: {}".format(kwargs))
@abstractmethod
def step(self, obs, state=None, mask=None):
"""
Returns the policy for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in recurrent policies)
:param mask: ([float]) The last masks (used in recurrent policies)
:return: ([float], [float], [float], [float]) actions, values, states, neglogp
"""
raise NotImplementedError
@abstractmethod
def proba_step(self, obs, state=None, mask=None):
"""
Returns the action probability for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in recurrent policies)
:param mask: ([float]) The last masks (used in recurrent policies)
:return: ([float]) the action probability
"""
raise NotImplementedError
class ActorCriticPolicy(BasePolicy):
"""
Policy object that implements actor critic
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param scale: (bool) whether or not to scale the input
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, scale=False):
super(ActorCriticPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse,
scale=scale)
self._pdtype = make_proba_dist_type(ac_space)
self._policy = None
self._proba_distribution = None
self._value_fn = None
self._action = None
self._deterministic_action = None
def _setup_init(self):
"""Sets up the distributions, actions, and value."""
with tf.compat.v1.variable_scope("output", reuse=True):
assert self.policy is not None and self.proba_distribution is not None and self.value_fn is not None
self._action = self.proba_distribution.sample()
self._deterministic_action = self.proba_distribution.mode()
self._neglogp = self.proba_distribution.neglogp(self.action)
if isinstance(self.proba_distribution, CategoricalProbabilityDistribution):
self._policy_proba = tf.nn.softmax(self.policy)
elif isinstance(self.proba_distribution, DiagGaussianProbabilityDistribution):
self._policy_proba = [self.proba_distribution.mean, self.proba_distribution.std]
elif isinstance(self.proba_distribution, BernoulliProbabilityDistribution):
self._policy_proba = tf.nn.sigmoid(self.policy)
elif isinstance(self.proba_distribution, MultiCategoricalProbabilityDistribution):
self._policy_proba = [tf.nn.softmax(categorical.flatparam())
for categorical in self.proba_distribution.categoricals]
else:
self._policy_proba = [] # it will return nothing, as it is not implemented
self._value_flat = self.value_fn[:, 0]
@property
def pdtype(self):
"""ProbabilityDistributionType: type of the distribution for stochastic actions."""
return self._pdtype
@property
def policy(self):
"""tf.Tensor: policy output, e.g. logits."""
return self._policy
@property
def proba_distribution(self):
"""ProbabilityDistribution: distribution of stochastic actions."""
return self._proba_distribution
@property
def value_fn(self):
"""tf.Tensor: value estimate, of shape (self.n_batch, 1)"""
return self._value_fn
@property
def value_flat(self):
"""tf.Tensor: value estimate, of shape (self.n_batch, )"""
return self._value_flat
@property
def action(self):
"""tf.Tensor: stochastic action, of shape (self.n_batch, ) + self.ac_space.shape."""
return self._action
@property
def deterministic_action(self):
"""tf.Tensor: deterministic action, of shape (self.n_batch, ) + self.ac_space.shape."""
return self._deterministic_action
@property
def neglogp(self):
"""tf.Tensor: negative log likelihood of the action sampled by self.action."""
return self._neglogp
@property
def policy_proba(self):
"""tf.Tensor: parameters of the probability distribution. Depends on pdtype."""
return self._policy_proba
@abstractmethod
def step(self, obs, state=None, mask=None, deterministic=False):
"""
Returns the policy for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in recurrent policies)
:param mask: ([float]) The last masks (used in recurrent policies)
:param deterministic: (bool) Whether or not to return deterministic actions.
:return: ([float], [float], [float], [float]) actions, values, states, neglogp
"""
raise NotImplementedError
@abstractmethod
def value(self, obs, state=None, mask=None):
"""
Returns the value for a single step
:param obs: ([float] or [int]) The current observation of the environment
:param state: ([float]) The last states (used in recurrent policies)
:param mask: ([float]) The last masks (used in recurrent policies)
:return: ([float]) The associated value of the action
"""
raise NotImplementedError
class RecurrentActorCriticPolicy(ActorCriticPolicy):
"""
Actor critic policy object uses a previous state in the computation for the current step.
NOTE: this class is not limited to recurrent neural network policies,
see https://github.com/hill-a/stable-baselines/issues/241
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param state_shape: (tuple<int>) shape of the per-environment state space.
:param reuse: (bool) If the policy is reusable or not
:param scale: (bool) whether or not to scale the input
"""
recurrent = True
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
state_shape, reuse=False, scale=False):
super(RecurrentActorCriticPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps,
n_batch, reuse=reuse, scale=scale)
with tf.compat.v1.variable_scope("input", reuse=False):
self._dones_ph = tf.compat.v1.placeholder(tf.float32, (n_batch, ), name="dones_ph") # (done t-1)
state_ph_shape = (self.n_env, ) + tuple(state_shape)
self._states_ph = tf.compat.v1.placeholder(tf.float32, state_ph_shape, name="states_ph")
initial_state_shape = (self.n_env, ) + tuple(state_shape)
self._initial_state = np.zeros(initial_state_shape, dtype=np.float32)
@property
def initial_state(self):
return self._initial_state
@property
def dones_ph(self):
"""tf.Tensor: placeholder for whether episode has terminated (done), shape (self.n_batch, ).
Internally used to reset the state before the next episode starts."""
return self._dones_ph
@property
def states_ph(self):
"""tf.Tensor: placeholder for states, shape (self.n_env, ) + state_shape."""
return self._states_ph
@abstractmethod
def value(self, obs, state=None, mask=None):
"""
Cf base class doc.
"""
raise NotImplementedError
class MemoryPolicy(RecurrentActorCriticPolicy):
recurrent = True
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_mem=64, reuse=False, layers=None, net_arch=None, act_fun=tf.tanh, layer_norm=False, feature_extraction="mlp", **kwargs):
assert len(ob_space.shape) == 1
nfeatures = ob_space.shape[0]
super(MemoryPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, state_shape=(n_mem * nfeatures,), reuse=reuse, scale=(feature_extraction == "cnn"))
if net_arch is None: # Legacy mode
if layers is None:
layers = [64, 64]
else:
warnings.warn("The layers parameter is deprecated. Use the net_arch parameter instead.")
with tf.compat.v1.variable_scope("model", reuse=reuse):
extracted_features = tf.compat.v1.layers.flatten(self.obs_ph)
input_sequence = batch_to_seq(extracted_features, self.n_env, n_steps)
masks = batch_to_seq(self.dones_ph, self.n_env, n_steps)
output, self.snew = mlpstack(input_sequence, masks, self.states_ph, 'mem1', n_hidden=64, layer_norm=layer_norm, act_fun=act_fun)
output = seq_to_batch(output)
value_fn = linear(output, 'vf', 1)
self._proba_distribution, self._policy, self.q_value = self.pdtype.proba_distribution_from_latent(output, output)
self._value_fn = value_fn
else:
raise NotImplementedError
self._setup_init()
def step(self, obs, state=None, mask=None, deterministic=False):
if deterministic:
return self.sess.run([self.deterministic_action, self.value_flat, self.snew, self.neglogp],
{self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
else:
return self.sess.run([self.action, self.value_flat, self.snew, self.neglogp],
{self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
def value(self, obs, state=None, mask=None):
return self.sess.run(self.value_flat, {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
class LstmPolicy(RecurrentActorCriticPolicy):
"""
Policy object that implements actor critic, using LSTMs.
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for recurrent policies)
:param reuse: (bool) If the policy is reusable or not
:param layers: ([int]) The size of the Neural network before the LSTM layer (if None, default to [64, 64])
:param net_arch: (list) Specification of the actor-critic policy network architecture. Notation similar to the
format described in mlp_extractor but with additional support for a 'lstm' entry in the shared network part.
:param act_fun: (tf.func) the activation function to use in the neural network.
:param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction
:param layer_norm: (bool) Whether or not to use layer normalizing LSTMs
:param feature_extraction: (str) The feature extraction type ("cnn" or "mlp")
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
recurrent = True
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=64, reuse=False, layers=None,
net_arch=None, act_fun=tf.tanh, cnn_extractor=nature_cnn, layer_norm=False, feature_extraction="cnn",
**kwargs):
# state_shape = [n_lstm * 2] dim because of the cell and hidden states of the LSTM
super(LstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch,
state_shape=(2 * n_lstm, ), reuse=reuse,
scale=(feature_extraction == "cnn"))
self._kwargs_check(feature_extraction, kwargs)
if net_arch is None: # Legacy mode
if layers is None:
layers = [64]
else:
warnings.warn("The layers parameter is deprecated. Use the net_arch parameter instead.")
with tf.compat.v1.variable_scope("model", reuse=reuse):
if feature_extraction == "cnn":
extracted_features = cnn_extractor(self.processed_obs, **kwargs)
else:
extracted_features = tf.compat.v1.layers.flatten(self.processed_obs)
for i, layer_size in enumerate(layers):
extracted_features = act_fun(linear(extracted_features, 'pi_fc' + str(i), n_hidden=layer_size, init_scale=np.sqrt(2)))
input_sequence = batch_to_seq(extracted_features, self.n_env, n_steps)
masks = batch_to_seq(self.dones_ph, self.n_env, n_steps)
rnn_output, self.snew = lstm(input_sequence, masks, self.states_ph, 'lstm1', n_hidden=n_lstm,
layer_norm=layer_norm)
rnn_output = seq_to_batch(rnn_output)
value_fn = linear(rnn_output, 'vf', 1)
self._proba_distribution, self._policy, self.q_value = \
self.pdtype.proba_distribution_from_latent(rnn_output, rnn_output)
self._value_fn = value_fn
else: # Use the new net_arch parameter
if layers is not None:
warnings.warn("The new net_arch parameter overrides the deprecated layers parameter.")
if feature_extraction == "cnn":
raise NotImplementedError()
with tf.compat.v1.variable_scope("model", reuse=reuse):
latent = tf.compat.v1.layers.flatten(self.processed_obs)
policy_only_layers = [] # Layer sizes of the network that only belongs to the policy network
value_only_layers = [] # Layer sizes of the network that only belongs to the value network
# Iterate through the shared layers and build the shared parts of the network
lstm_layer_constructed = False
for idx, layer in enumerate(net_arch):
if isinstance(layer, int): # Check that this is a shared layer
layer_size = layer
latent = act_fun(linear(latent, "shared_fc{}".format(idx), layer_size, init_scale=np.sqrt(2)))
elif layer == "lstm":
if lstm_layer_constructed:
raise ValueError("The net_arch parameter must only contain one occurrence of 'lstm'!")
input_sequence = batch_to_seq(latent, self.n_env, n_steps)
masks = batch_to_seq(self.dones_ph, self.n_env, n_steps)
rnn_output, self.snew = lstm(input_sequence, masks, self.states_ph, 'lstm1', n_hidden=n_lstm,
layer_norm=layer_norm)
latent = seq_to_batch(rnn_output)
lstm_layer_constructed = True
else:
assert isinstance(layer, dict), "Error: the net_arch list can only contain ints and dicts"
if 'pi' in layer:
assert isinstance(layer['pi'],
list), "Error: net_arch[-1]['pi'] must contain a list of integers."
policy_only_layers = layer['pi']
if 'vf' in layer:
assert isinstance(layer['vf'],
list), "Error: net_arch[-1]['vf'] must contain a list of integers."
value_only_layers = layer['vf']
break # From here on the network splits up in policy and value network
# Build the non-shared part of the policy-network
latent_policy = latent
for idx, pi_layer_size in enumerate(policy_only_layers):
if pi_layer_size == "lstm":
raise NotImplementedError("LSTMs are only supported in the shared part of the policy network.")
assert isinstance(pi_layer_size, int), "Error: net_arch[-1]['pi'] must only contain integers."
latent_policy = act_fun(
linear(latent_policy, "pi_fc{}".format(idx), pi_layer_size, init_scale=np.sqrt(2)))
# Build the non-shared part of the value-network
latent_value = latent
for idx, vf_layer_size in enumerate(value_only_layers):
if vf_layer_size == "lstm":
raise NotImplementedError("LSTMs are only supported in the shared part of the value function "
"network.")
assert isinstance(vf_layer_size, int), "Error: net_arch[-1]['vf'] must only contain integers."
latent_value = act_fun(
linear(latent_value, "vf_fc{}".format(idx), vf_layer_size, init_scale=np.sqrt(2)))
if not lstm_layer_constructed:
raise ValueError("The net_arch parameter must contain at least one occurrence of 'lstm'!")
self._value_fn = linear(latent_value, 'vf', 1)
# TODO: why not init_scale = 0.001 here like in the feedforward
self._proba_distribution, self._policy, self.q_value = \
self.pdtype.proba_distribution_from_latent(latent_policy, latent_value)
self._setup_init()
def step(self, obs, state=None, mask=None, deterministic=False):
if deterministic:
return self.sess.run([self.deterministic_action, self.value_flat, self.snew, self.neglogp],
{self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
else:
return self.sess.run([self.action, self.value_flat, self.snew, self.neglogp],
{self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
def value(self, obs, state=None, mask=None):
return self.sess.run(self.value_flat, {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask})
class FeedForwardPolicy(ActorCriticPolicy):
"""
Policy object that implements actor critic, using a feed forward neural network.
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param layers: ([int]) (deprecated, use net_arch instead) The size of the Neural network for the policy
(if None, default to [64, 64])
:param net_arch: (list) Specification of the actor-critic policy network architecture (see mlp_extractor
documentation for details).
:param act_fun: (tf.func) the activation function to use in the neural network.
:param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction
:param feature_extraction: (str) The feature extraction type ("cnn" or "mlp")
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, layers=None, net_arch=None,
act_fun=tf.tanh, cnn_extractor=nature_cnn, feature_extraction="cnn", **kwargs):
super(FeedForwardPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse,
scale=(feature_extraction == "cnn"))
self._kwargs_check(feature_extraction, kwargs)
if layers is not None:
warnings.warn("Usage of the `layers` parameter is deprecated! Use net_arch instead "
"(it has a different semantics though).", DeprecationWarning)
if net_arch is not None:
warnings.warn("The new `net_arch` parameter overrides the deprecated `layers` parameter!",
DeprecationWarning)
if net_arch is None:
if layers is None:
layers = [64, 64]
net_arch = [dict(vf=layers, pi=layers)]
with tf.compat.v1.variable_scope("model", reuse=reuse):
if feature_extraction == "cnn":
pi_latent = vf_latent = cnn_extractor(self.processed_obs, **kwargs)
else:
pi_latent, vf_latent = mlp_extractor(tf.compat.v1.layers.flatten(self.processed_obs), net_arch, act_fun)
self._value_fn = linear(vf_latent, 'vf', 1)
self._proba_distribution, self._policy, self.q_value = \
self.pdtype.proba_distribution_from_latent(pi_latent, vf_latent, init_scale=0.01)
self._setup_init()
def step(self, obs, state=None, mask=None, deterministic=False):
if deterministic:
action, value, neglogp = self.sess.run([self.deterministic_action, self.value_flat, self.neglogp],
{self.obs_ph: obs})
else:
action, value, neglogp = self.sess.run([self.action, self.value_flat, self.neglogp],
{self.obs_ph: obs})
return action, value, self.initial_state, neglogp
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, {self.obs_ph: obs})
def value(self, obs, state=None, mask=None):
return self.sess.run(self.value_flat, {self.obs_ph: obs})
class CnnPolicy(FeedForwardPolicy):
"""
Policy object that implements actor critic, using a CNN (the nature CNN)
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, **_kwargs):
super(CnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="cnn", **_kwargs)
class CnnLstmPolicy(LstmPolicy):
"""
Policy object that implements actor critic, using LSTMs with a CNN feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for recurrent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
super(CnnLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=False, feature_extraction="cnn", **_kwargs)
class CnnLnLstmPolicy(LstmPolicy):
"""
Policy object that implements actor critic, using a layer normalized LSTMs with a CNN feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for recurrent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
super(CnnLnLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=True, feature_extraction="cnn", **_kwargs)
class MlpPolicyDefault(FeedForwardPolicy):
"""
Policy object that implements actor critic, using a MLP (2 layers of 64)
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param reuse: (bool) If the policy is reusable or not
:param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=False, **_kwargs):
super(MlpPolicyDefault, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,
feature_extraction="mlp", **_kwargs)
class MlpPolicy(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(MlpPolicy, self).__init__(*args, **kwargs, net_arch=[256, dict(vf=[256], pi=[256])], feature_extraction="mlp")
class policy_0(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(policy_0, self).__init__(*args, **kwargs, net_arch=[dict(vf=[256, 256], pi=[256, 256])], feature_extraction="mlp")
class policy_1(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(policy_1, self).__init__(*args, **kwargs, net_arch=[256, dict(vf=[256], pi=[256])], feature_extraction="mlp")
class policy_2(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(policy_2, self).__init__(*args, **kwargs, net_arch=[256, 256], feature_extraction="mlp")
class MlpLstmPolicy(LstmPolicy):
"""
Policy object that implements actor critic, using LSTMs with a MLP feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for recurrent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
super(MlpLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=False, feature_extraction="mlp", **_kwargs)
class MlpLnLstmPolicy(LstmPolicy):
"""
Policy object that implements actor critic, using a layer normalized LSTMs with a MLP feature extraction
:param sess: (TensorFlow session) The current TensorFlow session
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param n_env: (int) The number of environments to run
:param n_steps: (int) The number of steps to run for each environment
:param n_batch: (int) The number of batch to run (n_envs * n_steps)
:param n_lstm: (int) The number of LSTM cells (for recurrent policies)
:param reuse: (bool) If the policy is reusable or not
:param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction
"""
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm=256, reuse=False, **_kwargs):
super(MlpLnLstmPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, n_lstm, reuse,
layer_norm=True, feature_extraction="mlp", **_kwargs)
_policy_registry = {
ActorCriticPolicy: {
"CnnPolicy": CnnPolicy,
"CnnLstmPolicy": CnnLstmPolicy,
"CnnLnLstmPolicy": CnnLnLstmPolicy,
"MlpPolicy": MlpPolicy,
"MlpLstmPolicy": MlpLstmPolicy,
"MlpLnLstmPolicy": MlpLnLstmPolicy,
}
}
def get_policy_from_name(base_policy_type, name):
"""
returns the registed policy from the base type and name
:param base_policy_type: (BasePolicy) the base policy object
:param name: (str) the policy name
:return: (base_policy_type) the policy
"""
if base_policy_type not in _policy_registry:
raise ValueError("Error: the policy type {} is not registered!".format(base_policy_type))
if name not in _policy_registry[base_policy_type]:
raise ValueError("Error: unknown policy type {}, the only registed policy type are: {}!"
.format(name, list(_policy_registry[base_policy_type].keys())))
return _policy_registry[base_policy_type][name]
def register_policy(name, policy):
"""
returns the registed policy from the base type and name
:param name: (str) the policy name
:param policy: (subclass of BasePolicy) the policy
"""
sub_class = None
for cls in BasePolicy.__subclasses__():
if issubclass(policy, cls):
sub_class = cls
break
if sub_class is None:
raise ValueError("Error: the policy {} is not of any known subclasses of BasePolicy!".format(policy))
if sub_class not in _policy_registry:
_policy_registry[sub_class] = {}
if name in _policy_registry[sub_class]:
raise ValueError("Error: the name {} is alreay registered for a different policy, will not override."
.format(name))
_policy_registry[sub_class][name] = policy
| 50.180268
| 224
| 0.656998
|
9c9b7dffec761731570a26a55c96024e33cf3229
| 2,105
|
py
|
Python
|
dandi/support/tests/test_iterators.py
|
TheChymera/dandi-cli
|
9a42b1fa2f9af3be01254f7457f5a21d834c1864
|
[
"Apache-2.0"
] | 10
|
2020-07-27T02:34:28.000Z
|
2022-02-18T19:36:17.000Z
|
dandi/support/tests/test_iterators.py
|
TheChymera/dandi-cli
|
9a42b1fa2f9af3be01254f7457f5a21d834c1864
|
[
"Apache-2.0"
] | 908
|
2019-08-16T20:40:09.000Z
|
2022-03-31T19:38:51.000Z
|
dandi/support/tests/test_iterators.py
|
TheChymera/dandi-cli
|
9a42b1fa2f9af3be01254f7457f5a21d834c1864
|
[
"Apache-2.0"
] | 10
|
2019-10-08T03:12:23.000Z
|
2022-02-18T19:36:22.000Z
|
from time import sleep
import pytest
from ..iterators import IteratorWithAggregation
def sleeping_range(n, secs=0.01, thr=None):
"""Fast generator based on range
Parameters
----------
n : int
Number to pass to `range`
secs : float, optional
Seconds to sleep between iterations
thr : int, optional
If specified, will cause loop to raise ValueError when it
reaches that value
Yields
------
int
Integers like range does
"""
for i in range(n):
yield i
sleep(secs)
if thr and i >= thr:
raise ValueError(i)
def test_IteratorWithAggregation():
def sumup(v, t=0):
return v + t
it = IteratorWithAggregation(sleeping_range(3, 0.0001), agg=sumup)
# we should get our summary available after 2nd iteration and before it finishes
slow_machine = False
for t, i in enumerate(it):
sleep(0.005) # 0.0003 should be sufficient but to deal with Windows failures,
# making it longer
assert t == i # it is just a range after all
if i:
if not it.finished:
# give considerably more time for poor Windows VM
slow_machine = True
sleep(0.1)
assert it.finished
# If there is an exception thrown, it would be raised only by the end
it = IteratorWithAggregation(sleeping_range(5, 0.0001, thr=2), agg=sumup)
got = []
with pytest.raises(ValueError):
for i in it:
got.append(i)
sleep(0.001 if not slow_machine else 0.1)
assert got == [0, 1, 2]
assert it.finished
# If there is an exception thrown, it would be raised immediately
it = IteratorWithAggregation(
sleeping_range(5, 0.0001, thr=2), agg=sumup, reraise_immediately=True
)
got = []
with pytest.raises(ValueError):
for i in it:
got.append(i)
# sleep long enough to trigger exception before next iteration
sleep(0.02 if not slow_machine else 0.1)
assert got in ([], [0])
assert it.finished
| 28.445946
| 86
| 0.610451
|
54780ffc04060cacaf9139840829ffc66e979477
| 11,011
|
py
|
Python
|
data/cro_transformer_models.py
|
dafrie/fin-disclosures-nlp
|
aa89013a1b3c621df261d70cad16aeef15c34d25
|
[
"MIT"
] | 2
|
2020-06-08T17:07:17.000Z
|
2020-06-25T16:04:38.000Z
|
data/cro_transformer_models.py
|
dafrie/fin-disclosures-nlp
|
aa89013a1b3c621df261d70cad16aeef15c34d25
|
[
"MIT"
] | 2
|
2020-05-11T17:37:59.000Z
|
2020-05-11T17:41:12.000Z
|
data/cro_transformer_models.py
|
dafrie/fin-disclosures-nlp
|
aa89013a1b3c621df261d70cad16aeef15c34d25
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from sklearn.metrics import average_precision_score, accuracy_score, balanced_accuracy_score, precision_recall_fscore_support, roc_auc_score, matthews_corrcoef
from sklearn.utils.class_weight import compute_class_weight
from transformers import logging, AutoModelForSequenceClassification, TrainingArguments, Trainer, AutoTokenizer
from transformers.trainer_pt_utils import nested_detach
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class CroTrainer(Trainer):
"""Custom implementation of the "Trainer" class that adds default behaviour such as custom metrics,
weights in the loss function and tokenization and initialization
"""
def __init__(self, model_checkpoint=None, dataset=None, task=None, avg_strategy=None, max_token_size=256, should_weight=True, **kwargs):
"""Custom initialisation that calls the super class method of the base Trainer class.
kwargs contains the TrainerArguments, so any additional parameter (that is not used in the base class) must be a named argument.
Dataset doesn't need to be tokenized.
"""
# Initialize custom stuff
self.model_checkpoint = model_checkpoint
if task == 'binary':
self.num_labels = 2
elif task == 'multi-class':
self.num_labels = dataset['train'].features['labels'].num_classes
else:
self.num_labels = dataset['train'].features['labels'].feature.num_classes
self.dataset = dataset
self.task = task
self.avg_strategy = avg_strategy
self.weights = None
if should_weight:
self.weights = self.get_pos_weights()
print(f"Using weights: {self.weights}")
# Set and load the tokenizer.
self.tokenizer = AutoTokenizer.from_pretrained(self.model_checkpoint)
# Tokenize the dataset
print("Tokenization....")
self.dataset = self.dataset.map(lambda ds: self.tokenizer(
ds["text"], truncation=True, padding='max_length', max_length=max_token_size), batched=True)
# Setup the trainer arguments
self.training_args = TrainingArguments(**kwargs)
# Initialize actual trainer
super().__init__(
model_init=self.my_model_init, # Notice the custom implementation
args=self.training_args,
train_dataset=self.dataset["train"],
eval_dataset=self.dataset["valid"],
tokenizer=self.tokenizer,
compute_metrics=self.my_compute_metrics # Notice the custom implementation
)
def my_model_init(self):
"""Custom model initialization. Disabels logging temporarily to avoid spamming messages and loads the pretrained or fine-tuned model"""
logging.set_verbosity_error() # Workaround to hide warnings that the model weights are randomly set and fine-tuning is necessary (which we do later...)
model = AutoModelForSequenceClassification.from_pretrained(
# Load from model checkpoint, i.e. the pretrained model or a previously saved fine-tuned model
self.model_checkpoint,
num_labels=self.num_labels, # The number of different categories/labels
# Whether the model returns attentions weights.
output_attentions=False,
output_hidden_states=False, # Whether the model returns all hidden-states.)
return_dict=False, # TODO: Change this back and ammend the custom functions
)
logging.set_verbosity_warning()
return model
def prediction_step_(self, model, inputs, prediction_loss_only, ignore_keys):
"""Custom method overwriting the provided method to include "compute_loss()" which lead to an error when evaluating
See previous PR: https://github.com/huggingface/transformers/pull/7074/files
"""
# Return the inherited method if not absolutely necessary
if self.task != 'multi-label':
return super().prediction_step(model, inputs, prediction_loss_only, ignore_keys)
has_labels = all(inputs.get(k) is not None for k in self.label_names)
# These two lines are added
if has_labels:
labels = tuple(inputs.get(name) for name in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(
self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
with torch.no_grad():
if self.args.fp16 and _use_native_amp:
with autocast():
# These two lines are added
if has_labels:
loss = self.compute_loss(model, inputs)
outputs = model(**inputs)
else:
# These two lines are added
if has_labels:
loss = self.compute_loss(model, inputs)
outputs = model(**inputs)
if has_labels:
if isinstance(outputs, dict):
# loss = outputs["loss"].mean().detach()
loss = loss.mean().detach()
logits = tuple(v for k, v in outputs.items()
if k not in ignore_keys + ["loss"])
else:
# loss = outputs[0].mean().detach()
loss = loss.mean().detach()
# logits = outputs[1:]
logits = outputs[0:]
else:
loss = None
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items()
if k not in ignore_keys)
else:
logits = outputs
# TODO: Remove
# Slicing so we get a tuple even if `outputs` is a `ModelOutput`.
# logits = outputs[:]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]
# TODO: Remove
# Remove the past from the logits.
# logits = logits[: self.args.past_index - 1] + \
# logits[self.args.past_index:]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
if has_labels:
labels = nested_detach(labels)
# ORIG: labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
return (loss, logits, labels)
def get_pos_weights(self):
"""Calculates weights for each class that relates to the ratio of positive to negative sample in each class"""
if self.task == 'multi-label':
pos_counts = np.sum(self.dataset['train']['labels'], axis=0)
neg_counts = [self.dataset['train'].num_rows -
pos_count for pos_count in pos_counts]
pos_weights = neg_counts / pos_counts
else:
y = self.dataset['train']['labels']
pos_weights = compute_class_weight(
class_weight='balanced', classes=np.unique(y), y=y)
tensor = torch.as_tensor(pos_weights, dtype=torch.float)
if torch.cuda.is_available():
return tensor.to(device="cuda")
return tensor
def compute_loss(self, model, inputs, return_outputs=False):
"""Implements a BinaryCrossEntropyWithLogits activation and loss function
to support multi-label cases
"""
labels = inputs.pop("labels")
outputs = model(**inputs)
# logits = outputs[0]
logits = outputs.logits
###############################
# Note: The multi-label conditional is added since we want to use a different loss function for this case
###############################
# Multi-Label: Example: [0 1 1]
if self.task == 'multi-label':
# To adjust for inbalanced data, the pos_weight
loss_func = BCEWithLogitsLoss(pos_weight=self.weights)
#labels = labels.float()
loss = loss_func(logits.view(-1, self.model.config.num_labels), # model.num_labels), # The logits
labels.float().view(-1, self.model.config.num_labels) # model.num_labels)
) # The labels
# Binary or multi-class
else:
if model.num_labels == 1:
loss_fct = MSELoss() # Doing regression
loss = loss_fct(logits.view(-1), labels.view(-1))
# Multi-class (Example: [0 0 1]):
else:
loss_fct = CrossEntropyLoss(weight=self.weights)
loss = loss_fct(
logits.view(-1, self.model.config.num_labels), labels.view(-1))
return (loss, outputs) if return_outputs else loss
def my_compute_metrics(self, pred):
"""Computes classification task metric. Supports both multi-class and multi-label"""
labels = pred.label_ids
preds = pred.predictions
# Multi-label
if self.task == 'multi-label':
roc_auc = roc_auc_score(labels, preds, average=self.avg_strategy)
pr_auc = average_precision_score(
labels, preds, average=self.avg_strategy)
# TODO: Also add some threshold aware eval metrics, such as accuracy, ...
# Problem: How do we set the threshold?
return {
'roc_auc': roc_auc,
'pr_auc': pr_auc
}
# Binary/Multi-class task
else:
preds_bool = preds.argmax(-1)
# Threshold unaware metrics
# roc_auc = roc_auc_score(labels, preds, average=self.avg_strategy)
# pr_auc = average_precision_score(labels, preds, average = self.avg_strategy)
preds = preds.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(
labels, preds_bool, average=self.avg_strategy)
acc = accuracy_score(labels, preds_bool)
balanced_accuracy = balanced_accuracy_score(
labels, preds_bool, adjusted=True)
matthews_corr = matthews_corrcoef(labels, preds_bool)
return {
# 'roc_auc': roc_auc,
# 'pr_auc': pr_auc,
'accuracy': acc,
'balanced_accuracy': balanced_accuracy,
'f1': f1,
'precision': precision,
'recall': recall,
'matthews_correlation': matthews_corr
}
| 42.513514
| 160
| 0.592226
|
308e63c76843dc2c83c6e852f8d1de8f5c88dc24
| 236
|
py
|
Python
|
thirdweb/modules/__init__.py
|
princetonwong/python-sdk
|
f35181d97620e29d055498fca75f3702f3bb2449
|
[
"Apache-2.0"
] | 1
|
2022-02-18T16:59:12.000Z
|
2022-02-18T16:59:12.000Z
|
thirdweb/modules/__init__.py
|
princetonwong/python-sdk
|
f35181d97620e29d055498fca75f3702f3bb2449
|
[
"Apache-2.0"
] | null | null | null |
thirdweb/modules/__init__.py
|
princetonwong/python-sdk
|
f35181d97620e29d055498fca75f3702f3bb2449
|
[
"Apache-2.0"
] | null | null | null |
"""All Modules"""
from .nft import NftModule as NftModuleV2
from .nft_v1 import *
from .nft_types import *
from .currency import *
from .market import *
from .pack import *
from .collection import CollectionModule
from .bundle import *
| 23.6
| 41
| 0.762712
|
cf963af522d3404b66c4873948913ee959b74f72
| 10,945
|
py
|
Python
|
plants_disease_classify_pytorch/main.py
|
wanghao15536870732/plants_disease_classify
|
6d0d1d39f0ec15fc2bd523142c5c403a1577da84
|
[
"Apache-2.0"
] | null | null | null |
plants_disease_classify_pytorch/main.py
|
wanghao15536870732/plants_disease_classify
|
6d0d1d39f0ec15fc2bd523142c5c403a1577da84
|
[
"Apache-2.0"
] | null | null | null |
plants_disease_classify_pytorch/main.py
|
wanghao15536870732/plants_disease_classify
|
6d0d1d39f0ec15fc2bd523142c5c403a1577da84
|
[
"Apache-2.0"
] | null | null | null |
import os
import random
import time
import json
import torch
import torchvision
import numpy as np
import pandas as pd
import warnings
from datetime import datetime
from torch import nn, optim
from config import config
from collections import OrderedDict
from torch.autograd import Variable
from torch.utils.data import DataLoader
from dataset.dataloader import *
from sklearn.model_selection import train_test_split, StratifiedKFold
from timeit import default_timer as timer
from models.model import *
from utils import *
# 1. set random.seed and cudnn performance
random.seed(config.seed)
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus
torch.backends.cudnn.benchmark = True
warnings.filterwarnings('ignore')
# 2. evaluate func
def evaluate(val_loader, model, criterion):
# 2.1 define meters
losses = AverageMeter()
top1 = AverageMeter()
top2 = AverageMeter()
# 2.2 switch to evaluate mode and confirm model has been transfered to cuda
model.cuda()
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
input = Variable(input).cuda()
target = Variable(torch.from_numpy(np.array(target)).long()).cuda()
# target = Variable(target).cuda()
# 2.2.1 compute output
output = model(input)
loss = criterion(output, target)
# 2.2.2 measure accuracy and record loss
precision1, precision2 = accuracy(output, target, topk=(1, 2))
losses.update(loss.item(), input.size(0))
top1.update(precision1[0], input.size(0))
top2.update(precision2[0], input.size(0))
return [losses.avg, top1.avg, top2.avg]
# 3. test model on public dataset and save the probability matrix
def predict(test_loader, model, folds):
# 3.1 confirm the model converted to cuda
csv_map = OrderedDict({"filename": [], "probability": []})
model.cuda()
model.eval()
with open("./submit/baseline.json", "w", encoding="utf-8") as f:
submit_results = []
for i, (input, filepath) in enumerate(tqdm(test_loader)):
# 3.2 change everything to cuda and get only basename
filepath = [os.path.basename(x) for x in filepath]
with torch.no_grad():
image_var = Variable(input).cuda()
# 3.3.output
# print(filepath)
# print(input,input.shape)
y_pred = model(image_var)
# print(y_pred.shape)
smax = nn.Softmax(1)
smax_out = smax(y_pred)
# 3.4 save probability to csv files
csv_map["filename"].extend(filepath)
for output in smax_out:
prob = ";".join([str(i) for i in output.data.tolist()])
csv_map["probability"].append(prob)
result = pd.DataFrame(csv_map)
result["probability"] = result["probability"].map(lambda x: [float(i) for i in x.split(";")])
for index, row in result.iterrows():
pred_label = np.argmax(row['probability'])
pred_acc = row['probability'][pred_label]
if pred_label > 43:
pred_label = pred_label + 2
submit_results.append({"image_id": row['filename'], "disease_class": config.LABEL_NAMES[pred_label],
"identify_accuracy": pred_acc})
json.dump(submit_results, f, ensure_ascii=False, cls=MyEncoder)
# 4. more details to build main function
def main():
fold = 0
# 4.1 mkdirs
if not os.path.exists(config.submit):
os.mkdir(config.submit)
if not os.path.exists(config.weights):
os.mkdir(config.weights)
if not os.path.exists(config.best_models):
os.mkdir(config.best_models)
if not os.path.exists(config.logs):
os.mkdir(config.logs)
if not os.path.exists(config.weights + config.model_name + os.sep + str(fold) + os.sep):
os.makedirs(config.weights + config.model_name + os.sep + str(fold) + os.sep)
if not os.path.exists(config.best_models + config.model_name + os.sep + str(fold) + os.sep):
os.makedirs(config.best_models + config.model_name + os.sep + str(fold) + os.sep)
# 4.2 get model and optimizer
model = get_net()
# model = torch.nn.DataParallel(model)
model.cuda()
# optimizer = optim.SGD(model.parameters(),lr = config.lr,momentum=0.9,weight_decay=config.weight_decay)
optimizer = optim.Adam(model.parameters(), lr=config.lr, amsgrad=True, weight_decay=config.weight_decay)
criterion = nn.CrossEntropyLoss().cuda()
# criterion = FocalLoss().cuda()
log = Logger()
log.open(config.logs + "log_train.txt", mode="a")
log.write("\n----------------------------------------------- [START %s] %s\n\n" % (
datetime.now().strftime('%Y-%m-%d %H:%M:%S'), '-' * 51))
# 4.3 some parameters for K-fold and restart model
start_epoch = 0
best_precision1 = 0
best_precision_save = 0
resume = True
# 4.4 restart the training process
if resume:
checkpoint = torch.load(config.weights + config.model_name + os.sep + str(fold) + "/_checkpoint.pth.tar")
start_epoch = checkpoint["epoch"]
fold = checkpoint["fold"]
best_precision1 = checkpoint["best_precision1"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
# 4.5 get files and split for K-fold dataset
# 4.5.1 read files
train_ = get_files(config.train_data, "train")
# val_data_list = get_files(config.val_data,"val")
test_files = get_files(config.test_one_data, "test")
"""
#4.5.2 split
split_fold = StratifiedKFold(n_splits=3)
folds_indexes = split_fold.split(X=origin_files["filename"],y=origin_files["label"])
folds_indexes = np.array(list(folds_indexes))
fold_index = folds_indexes[fold]
#4.5.3 using fold index to split for train data and val data
train_data_list = pd.concat([origin_files["filename"][fold_index[0]],origin_files["label"][fold_index[0]]],axis=1)
val_data_list = pd.concat([origin_files["filename"][fold_index[1]],origin_files["label"][fold_index[1]]],axis=1)
"""
train_data_list, val_data_list = train_test_split(train_, test_size=0.15, stratify=train_["label"])
# 4.5.4 load dataset
train_dataloader = DataLoader(ChaojieDataset(train_data_list), batch_size=config.batch_size, shuffle=True,
collate_fn=collate_fn, pin_memory=True)
val_dataloader = DataLoader(ChaojieDataset(val_data_list, train=False), batch_size=config.batch_size, shuffle=True,
collate_fn=collate_fn, pin_memory=False)
test_dataloader = DataLoader(ChaojieDataset(test_files, test=True), batch_size=1, shuffle=False, pin_memory=False)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,"max",verbose=1,patience=3)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
# 4.5.5.1 define metrics
train_losses = AverageMeter()
train_top1 = AverageMeter()
train_top2 = AverageMeter()
valid_loss = [np.inf, 0, 0]
model.train()
# logs
log.write('** start training here! **\n')
log.write(
' |------------ VALID -------------|----------- TRAIN -------------|------Accuracy------|------------|\n')
log.write(
'lr iter epoch | loss top-1 top-2 | loss top-1 top-2 | Current Best | time |\n')
log.write(
'-------------------------------------------------------------------------------------------------------------------------------\n')
# 4.5.5 train
start = timer()
for epoch in range(start_epoch, config.epochs):
scheduler.step(epoch)
# train
# global iter
for iter, (input, target) in enumerate(train_dataloader):
# 4.5.5 switch to continue train process
model.train()
input = Variable(input).cuda()
target = Variable(torch.from_numpy(np.array(target)).long()).cuda()
# target = Variable(target).cuda()
torch.cuda.empty_cache()
output = model(input)
loss = criterion(output, target)
precision1_train, precision2_train = accuracy(output, target, topk=(1, 2))
train_losses.update(loss.item(), input.size(0))
train_top1.update(precision1_train[0], input.size(0))
train_top2.update(precision2_train[0], input.size(0))
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr = get_learning_rate(optimizer)
print('\r', end='', flush=True)
print(
'%0.4f %5.1f %6.1f | %0.3f %0.3f %0.3f | %0.3f %0.3f %0.3f | %s | %s' % ( \
lr, iter / len(train_dataloader) + epoch, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_losses.avg, train_top1.avg, train_top2.avg, str(best_precision_save),
time_to_str((timer() - start), 'min'))
, end='', flush=True)
# evaluate
lr = get_learning_rate(optimizer)
# evaluate every half epoch
valid_loss = evaluate(val_dataloader, model, criterion)
is_best = valid_loss[1] > best_precision1
best_precision1 = max(valid_loss[1], best_precision1)
try:
best_precision_save = best_precision1.cpu().data.numpy()
except:
pass
save_checkpoint({
"epoch": epoch + 1,
"model_name": config.model_name,
"state_dict": model.state_dict(),
"best_precision1": best_precision1,
"optimizer": optimizer.state_dict(),
"fold": fold,
"valid_loss": valid_loss,
}, is_best, fold)
# adjust learning rate
scheduler.step(valid_loss[1])
print("\r", end="", flush=True)
log.write(
'%0.4f %5.1f %6.1f | %0.3f %0.3f %0.3f | %0.3f %0.3f %0.3f | %s | %s' % ( \
lr, 0 + epoch, epoch,
valid_loss[0], valid_loss[1], valid_loss[2],
train_losses.avg, train_top1.avg, train_top2.avg, str(best_precision_save),
time_to_str((timer() - start), 'min'))
)
log.write('\n')
time.sleep(0.01)
best_model = torch.load(
config.best_models + os.sep + config.model_name + os.sep + str(fold) + os.sep + 'model_best.pth.tar')
model.load_state_dict(best_model["state_dict"])
predict(test_dataloader, model, fold)
if __name__ == "__main__":
main()
| 42.921569
| 140
| 0.59863
|
28cec6f2704a461d573f458dd9f5544f37d39c91
| 1,111
|
py
|
Python
|
src/python/dependencies.py
|
yinghsienwu/cloud
|
9f6d0c0a2b26f0cab307492ef137353fdac842b3
|
[
"Apache-2.0"
] | null | null | null |
src/python/dependencies.py
|
yinghsienwu/cloud
|
9f6d0c0a2b26f0cab307492ef137353fdac842b3
|
[
"Apache-2.0"
] | null | null | null |
src/python/dependencies.py
|
yinghsienwu/cloud
|
9f6d0c0a2b26f0cab307492ef137353fdac842b3
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration of TensorFlow Cloud client-side library."""
def make_required_install_packages():
return [
"absl-py",
"docker",
"google-api-python-client",
"google-auth",
"google-cloud-storage",
"keras-tuner",
"tensorflow>=1.15.0,<3.0",
"tensorflow_datasets<3.1.0",
]
def make_required_test_packages():
return [
"absl-py",
"flake8",
"mock",
"numpy",
"nbconvert",
]
| 28.487179
| 74
| 0.660666
|
95a29421407ab33bb64509191acee4b401870cba
| 623
|
py
|
Python
|
backend/Techfesia2019/events/permissions.py
|
Sayam753/Techfesia2019
|
40b497a1d98fc27910bba4ad2aad039acdea81f9
|
[
"MIT"
] | null | null | null |
backend/Techfesia2019/events/permissions.py
|
Sayam753/Techfesia2019
|
40b497a1d98fc27910bba4ad2aad039acdea81f9
|
[
"MIT"
] | null | null | null |
backend/Techfesia2019/events/permissions.py
|
Sayam753/Techfesia2019
|
40b497a1d98fc27910bba4ad2aad039acdea81f9
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class IsStaffUser(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method == 'GET':
return True
else:
if request.user.is_staff or request.user.is_superuser:
return True
else:
return False
def has_permission(self, request, view):
if request.method == 'GET':
return True
else:
if request.user.is_staff or request.user.is_superuser:
return True
else:
return False
| 28.318182
| 66
| 0.571429
|
7caf358bc7133176fd4fd867f42e095f51a8e575
| 447
|
py
|
Python
|
jsonschematypes/tests/fixtures.py
|
locationlabs/jsonschema-types
|
82491390ddfed3b17ff6e5fc8d62b4cb92ab05c7
|
[
"Apache-2.0"
] | 2
|
2015-05-27T16:40:48.000Z
|
2015-05-27T17:25:50.000Z
|
jsonschematypes/tests/fixtures.py
|
locationlabs/jsonschema-types
|
82491390ddfed3b17ff6e5fc8d62b4cb92ab05c7
|
[
"Apache-2.0"
] | 2
|
2015-06-03T17:43:42.000Z
|
2022-02-10T08:26:00.000Z
|
jsonschematypes/tests/fixtures.py
|
locationlabs/jsonschema-types
|
82491390ddfed3b17ff6e5fc8d62b4cb92ab05c7
|
[
"Apache-2.0"
] | 2
|
2015-06-03T17:12:10.000Z
|
2021-09-08T09:49:45.000Z
|
"""
Common test fixtures.
"""
from os.path import dirname, join
ADDRESS = dict(
street="1600 Pennsylvania Ave",
city="Washington",
state="DC",
)
NAME = dict(
first="George",
last="Washington",
)
RECORD = dict(
name=NAME,
address=ADDRESS,
)
ADDRESS_ID = "http://x.y.z/bar/address"
NAME_ID = "http://x.y.z/foo/name"
RECORD_ID = "http://x.y.z/record"
def schema_for(name):
return join(dirname(__file__), name)
| 14.419355
| 40
| 0.63311
|
fd197682937f8d9d7cb9145c73bbb0ea2eb2714a
| 1,548
|
py
|
Python
|
migrations/versions/2efa047fe343_fix_issue_increase_string_length_of_.py
|
Bubblbu/fhe-collector
|
b587a952eec318eab6cf430383fe83ca85277895
|
[
"MIT"
] | null | null | null |
migrations/versions/2efa047fe343_fix_issue_increase_string_length_of_.py
|
Bubblbu/fhe-collector
|
b587a952eec318eab6cf430383fe83ca85277895
|
[
"MIT"
] | null | null | null |
migrations/versions/2efa047fe343_fix_issue_increase_string_length_of_.py
|
Bubblbu/fhe-collector
|
b587a952eec318eab6cf430383fe83ca85277895
|
[
"MIT"
] | null | null | null |
"""fix issue: increase string length of source in Import() class to 500
Revision ID: 2efa047fe343
Revises: 4a3a663ce25d
Create Date: 2019-03-05 00:30:15.707276
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2efa047fe343'
down_revision = '4a3a663ce25d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('import', 'source',
existing_type=sa.VARCHAR(length=32),
type_=sa.String(length=500),
existing_nullable=True)
op.alter_column('url', 'url',
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=512))
op.alter_column('url', 'url_type',
existing_type=sa.VARCHAR(length=64),
type_=sa.String(length=32),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('url', 'url_type',
existing_type=sa.String(length=32),
type_=sa.VARCHAR(length=64),
existing_nullable=True)
op.alter_column('url', 'url',
existing_type=sa.String(length=512),
type_=sa.VARCHAR(length=256))
op.alter_column('import', 'source',
existing_type=sa.String(length=500),
type_=sa.VARCHAR(length=32),
existing_nullable=True)
# ### end Alembic commands ###
| 31.591837
| 71
| 0.614987
|
86d50490a6ad74e8526814f464613628ef1c88ea
| 2,367
|
py
|
Python
|
tests/unit/api/test_config.py
|
amenezes/discovery-client
|
9c41456d1cc14f4aab34628ad4e13423e00bc4be
|
[
"Apache-2.0"
] | 2
|
2019-07-18T22:43:49.000Z
|
2020-03-09T03:27:41.000Z
|
tests/unit/api/test_config.py
|
amenezes/discovery-client
|
9c41456d1cc14f4aab34628ad4e13423e00bc4be
|
[
"Apache-2.0"
] | 20
|
2019-02-27T19:08:03.000Z
|
2021-06-22T16:47:32.000Z
|
tests/unit/api/test_config.py
|
amenezes/discovery-client
|
9c41456d1cc14f4aab34628ad4e13423e00bc4be
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from discovery import api
def sample_payload():
return {
"Kind": "service-defaults",
"Name": "web",
"Protocol": "http",
}
def config_response():
return {
"Kind": "service-defaults",
"Name": "web",
"Protocol": "http",
"CreateIndex": 15,
"ModifyIndex": 35,
}
def list_response():
return [
{
"Kind": "service-defaults",
"Name": "db",
"Protocol": "tcp",
"CreateIndex": 16,
"ModifyIndex": 16,
},
{
"Kind": "service-defaults",
"Name": "web",
"Protocol": "http",
"CreateIndex": 13,
"ModifyIndex": 13,
},
]
@pytest.fixture
@pytest.mark.asyncio
def config(consul_api):
return api.Config(client=consul_api)
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_apply(config, expected):
config.client.expected = expected
response = await config.apply(sample_payload())
assert response.status == 200
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [config_response()])
async def test_get_success(config, expected):
config.client.expected = expected
response = await config.get("service-defaults", "web")
response = await response.json()
assert response == config_response()
@pytest.mark.asyncio
async def test_get_value_error(config):
with pytest.raises(ValueError):
await config.get("service", "web")
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [list_response()])
async def test_list_success(config, expected):
config.client.expected = expected
response = await config.list("service-defaults")
response = await response.json()
assert response == list_response()
@pytest.mark.asyncio
async def test_list_value_error(config):
with pytest.raises(ValueError):
await config.list("service")
@pytest.mark.asyncio
@pytest.mark.parametrize("expected", [200])
async def test_delete_success(config, expected):
config.client.expected = expected
response = await config.delete("service-defaults", "web")
assert response.status == 200
@pytest.mark.asyncio
async def test_delete_value_error(config):
with pytest.raises(ValueError):
await config.delete("service", "web")
| 23.909091
| 61
| 0.637938
|
416e9acb820f091da6e50a785a6436e2e0edc9b9
| 801
|
py
|
Python
|
app/main/forms.py
|
ruthjelimo/Pitch-app
|
c70258bd5dfc99520ed662276ef405137597cb1f
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
ruthjelimo/Pitch-app
|
c70258bd5dfc99520ed662276ef405137597cb1f
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
ruthjelimo/Pitch-app
|
c70258bd5dfc99520ed662276ef405137597cb1f
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField,SelectField
from wtforms.validators import DataRequired
class PitchForm(FlaskForm):
details = TextAreaField('Add your pitch', validators=[DataRequired()])
category = SelectField("Choose Category",choices=[('interview','interview'),('Adverts','Adverts'),('products','products')])
pitch = TextAreaField('Your Pitch',validators=[DataRequired()])
title = TextAreaField('Add your pitch', validators=[DataRequired()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
comment = TextAreaField('Your Comment')
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about yourself.',validators = [DataRequired()])
submit = SubmitField('Submit')
| 42.157895
| 125
| 0.755306
|
bb1d9d55e5935ef59f7cc6aabf762d99f032dd54
| 4,304
|
py
|
Python
|
update_rundeck_ldap_info.py
|
dsumike/scripts
|
2bbf631248ec6478f1558e54f887b18db7dcacee
|
[
"MIT"
] | null | null | null |
update_rundeck_ldap_info.py
|
dsumike/scripts
|
2bbf631248ec6478f1558e54f887b18db7dcacee
|
[
"MIT"
] | null | null | null |
update_rundeck_ldap_info.py
|
dsumike/scripts
|
2bbf631248ec6478f1558e54f887b18db7dcacee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Mike Johnson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# TODO: Add some argparse stuff later
#import argparse
import ldap
import mysql.connector
import sys
################################################################
# !! BEGIN: user configuration
################################################################
# Exclusion List: local accounts, do not check for updates
# -------------------------------------------------------------
exclusion = ['admin', 'administrator']
# LDAP Credentials
# --------------------------------------------------------------
ldapserver = "ldap://<server>:<port>"
binddn = "CN=<bind user>,CN=Users,dc=<domain>,dc=<tld>"
bindpw = "<bind user pass>"
basedn = "CN=Users,DC=<domain>,DC=<tld>"
# MySQL Credentials
# --------------------------------------------------------------
mysqlsvr = "localhost"
mysql_db = "<rundeck mysql database>"
mysqlusr = "<rundeck mysql username>"
mysqlpwd = "<rundeck mysql password>"
################################################################
# !! END: user configuration
################################################################
def ldap_search(username):
# LDAP Search
searchFilter = "(&(objectclass=User)(sAMAccountName=%s))" % username
searchAttribute = ["givenName","sn","mail"]
l = ldap.initialize(ldapserver) # Initialize LDAP
searchScope = ldap.SCOPE_SUBTREE # this will scope the entire subtree under UserUnits
# Bind to the server
try:
l.protocol_version = ldap.VERSION3
l.simple_bind_s(binddn, bindpw)
except ldap.INVALID_CREDENTIALS:
sys.exit(0)
except ldap.LDAPError, e:
if type(e.message) == dict and e.message.has_key('desc'):
sys.exit(0)
else:
sys.exit(0)
try:
ldap_result_id = l.search(basedn, searchScope, searchFilter, searchAttribute)
result_set = []
result_type, result_data = l.result(ldap_result_id, 0)
if (result_data == []):
# aww, no data found
data = None
else:
# yay, we found some data
if result_type == ldap.RES_SEARCH_ENTRY:
result_set.append(result_data)
cn = result_data[0][0] # cn Returned first
data = result_data[0][1] # searchAttributes second
# Clean up the data items for easy access
for (i, j) in data.items():
if len(j) == 1:
data[i] = j[0]
return data
except ldap.LDAPError, e:
sys.exit(0)
finally:
l.unbind_s()
return 0
def mysql_update(cursor, username, userdata):
query = "UPDATE rduser SET first_name='{}', last_name='{}', email='{}' WHERE login='{}'".format(
userdata["givenName"], userdata["sn"], userdata["mail"], username)
cursor.execute(query)
def mysql_search():
cnx = mysql.connector.connect(host=mysqlsvr, user=mysqlusr, password=mysqlpwd, database=mysql_db)
cur = cnx.cursor()
query = "SELECT login from rduser where email is NULL and login <> 'admin'"
for login in exclusion:
query += " and login <> '{}'".format(login)
print query
cur.execute(query)
result = cur.fetchall()
print result
sys.exit(0)
for login in result:
userdata = ldap_search(login[0])
mysql_update(cur, login[0], userdata)
cur.close()
cnx.commit()
cnx.close()
def main():
# TODO: Add some argparse
# --full-update ?
mysql_search()
if __name__ == "__main__":
main()
| 30.524823
| 98
| 0.639173
|
e8474f23122c62d914311431ee3b2d054ec78f33
| 11,253
|
py
|
Python
|
tensorflow/python/saved_model/load.py
|
KulieX/tensorflow
|
0064ad1e1c0fe79832cd8cbb588338683b38c937
|
[
"Apache-2.0"
] | 1
|
2019-02-07T13:58:03.000Z
|
2019-02-07T13:58:03.000Z
|
tensorflow/python/saved_model/load.py
|
KulieX/tensorflow
|
0064ad1e1c0fe79832cd8cbb588338683b38c937
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/saved_model/load.py
|
KulieX/tensorflow
|
0064ad1e1c0fe79832cd8cbb588338683b38c937
|
[
"Apache-2.0"
] | 2
|
2019-12-27T07:48:05.000Z
|
2020-06-13T19:20:14.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import a checkpointable object from a SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_deserialization
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import saved_object_graph_pb2
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import graph_view
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
from tensorflow.python.util import compat
class _Loader(object):
"""Helper class to load an object-based SavedModel."""
def __init__(self, object_graph_proto, saved_model_proto, export_dir):
meta_graph = saved_model_proto.meta_graphs[0]
self._asset_file_def = meta_graph.asset_file_def
self._operation_attributes = {
node.name: node.attr for node in meta_graph.graph_def.node}
self._proto = object_graph_proto
self._export_dir = export_dir
self._concrete_functions = (
function_deserialization.load_function_def_library(
meta_graph.graph_def.library))
self._load_all()
self._setup_functions()
self._restore_checkpoint()
def _setup_concrete_function(self, proto, concrete_function, coder):
"""Setup captured tensors and outputs for a single concrete function."""
bound_inputs = [
self._get_tensor_from_node(node_id)
for node_id in proto.bound_inputs]
bound_variables = [
self._nodes[node_id]
for node_id in proto.bound_inputs
if self._proto.nodes[node_id].WhichOneof("kind") == "variable"
]
# TODO(andresp): This is only injecting the captured inputs into the
# concrete function, note that we did not modify the FuncGraph
# itself.
concrete_function._captured_inputs = bound_inputs # pylint: disable=protected-access
concrete_function._func_graph.variables = bound_variables # pylint: disable=protected-access
# By setting the structured_outputs directly, we can rely on this
# function_lib.ConcreteFunction object to perform the output repacking
# logic. The only limitation of that logic is that it only works
# with output that is convertible to Tensors and the conversion
# always happens. For example tf.TensorShape([2, 3]) will be
# converted to Tensor representing [2, 3].
original_outputs = coder.decode_proto(proto.output_signature)
# The original_outputs here had Tensors converted to TensorSpecs, so
# the restored function's structured_outputs field will not be
# exactly the same. Fortunately the repacking logic cares only about
# the structure.
# TODO(vbardiovsky): Should we just replicate the structures, with
# Nones instead of real objects?
concrete_function._func_graph.structured_outputs = original_outputs # pylint: disable=protected-access
concrete_function._func_graph.structured_input_signature = ( # pylint: disable=protected-access
coder.decode_proto(proto.canonicalized_input_signature))
def _setup_functions(self):
"""Setup captures and output structure in restored functions."""
coder = nested_structure_coder.StructureCoder()
for name, concrete_function_proto in self._proto.concrete_functions.items():
self._setup_concrete_function(
concrete_function_proto,
self._concrete_functions[name],
coder)
def _get_tensor_from_node(self, node_id):
obj = self._nodes[node_id]
if resource_variable_ops.is_resource_variable(obj):
return obj.handle
elif isinstance(obj, tracking.TrackableAsset):
return obj.asset_path.handle
elif tensor_util.is_tensor(obj):
return obj
raise ValueError("Can't convert node %s to tensor" % (type(obj)))
def _load_all(self):
"""Load all saved objects and wire their properties."""
self._nodes = []
node_setters = []
for proto in self._proto.nodes:
node, setter = self._recreate(proto)
self._nodes.append(node)
node_setters.append(setter)
# After creating the objects, construct the edges between the objects.
for obj, object_proto, setter in zip(self._nodes, self._proto.nodes,
node_setters):
for reference in object_proto.children:
setter(obj, reference.local_name, self._nodes[reference.node_id])
# Note: if an object has an attribute `__call__` add a class method
# that allows `obj()` syntax to work. This is done per-instance to
# allow `callable` to be used to find out if an object is callable.
if reference.local_name == "__call__":
setattr(type(obj), "__call__", _call_attribute)
def _restore_checkpoint(self):
"""Load state from checkpoint into the deserialized objects."""
variables_path = saved_model_utils.get_variables_path(self._export_dir)
# TODO(andresp): Clean use of private methods of CheckpointableSaver.
# pylint: disable=protected-access
saver = util.CheckpointableSaver(graph_view.ObjectGraphView(self.get(0)))
saver._file_prefix_placeholder = constant_op.constant(variables_path)
load_status = saver.restore(variables_path)
load_status.assert_existing_objects_matched()
checkpoint = load_status._checkpoint
# When running in eager mode, the `restore` call above has already run and
# restored the state of checkpointables, call `position.restore_ops()` will
# return an empty list as there is nothing left to do. In graph mode, that
# will return the list of ops that must run to restore the object on that
# position. We have to wire them in the initializers of the objects so that
# they get initialized properly when using common practices (e.g. the ones
# used by ManagedSession) without further user action.
for object_id, obj in dict(checkpoint.object_by_proto_id).items():
position = base.CheckpointPosition(checkpoint=checkpoint,
proto_id=object_id)
restore_ops = position.restore_ops()
if restore_ops:
if resource_variable_ops.is_resource_variable(obj):
obj._initializer_op = restore_ops
else:
raise NotImplementedError(
("Missing functionality to restore state of object "
"%r from the checkpoint." % obj))
def get(self, node_id):
return self._nodes[node_id]
def _recreate(self, proto):
"""Creates a Python object from a SavedObject protocol buffer."""
factory = {
"user_object": lambda: self._recreate_user_object(proto.user_object),
"asset": lambda: self._recreate_asset(proto.asset),
"function": lambda: self._recreate_function(proto.function),
"bare_concrete_function": functools.partial(
self._recreate_bare_concrete_function,
proto.bare_concrete_function),
"variable": lambda: self._recreate_variable(proto.variable),
"constant": lambda: self._recreate_constant(proto.constant),
}
kind = proto.WhichOneof("kind")
if kind not in factory:
raise ValueError("Unknown SavedObject type: %r" % kind)
return factory[kind]()
def _recreate_user_object(self, proto):
"""Instantiates a SavedUserObject."""
looked_up = revived_types.deserialize(proto)
if looked_up is None:
# Note: each user object has its own class. This allows to make each one
# individually callable by adding a `__call__` method to the classes of
# the objects instances that have a `__call__` property.
class _UserObject(tracking.AutoCheckpointable):
pass
return _UserObject(), setattr
return looked_up
def _recreate_asset(self, proto):
filename = os.path.join(
saved_model_utils.get_assets_dir(self._export_dir),
self._asset_file_def[proto.asset_file_def_index].filename)
return tracking.TrackableAsset(filename), setattr
def _recreate_function(self, proto):
return function_deserialization.recreate_function(
proto, self._concrete_functions), setattr
def _recreate_bare_concrete_function(self, proto):
return function_deserialization.setup_bare_concrete_function(
proto, self._concrete_functions), setattr
def _recreate_variable(self, proto):
# TODO(andresp): Can we use the checkpointed value as initializer?
dummy_value = init_ops.Zeros(dtype=proto.dtype)(shape=proto.shape)
return variables.Variable(dummy_value, trainable=proto.trainable), setattr
def _recreate_constant(self, proto):
tensor_proto = self._operation_attributes[proto.operation]["value"].tensor
imported_constant = constant_op.constant(
tensor_util.MakeNdarray(tensor_proto))
return imported_constant, setattr
def _call_attribute(instance, *args, **kwargs):
return instance.__call__(*args, **kwargs)
def _load_saved_object_graph_proto(filename):
with file_io.FileIO(filename, "rb") as f:
contents = f.read()
return saved_object_graph_pb2.SavedObjectGraph.FromString(contents)
def load(export_dir):
"""Load a SavedModel from `export_dir`."""
saved_model_proto = loader_impl.parse_saved_model(export_dir)
object_graph_filename = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.EXTRA_ASSETS_DIRECTORY),
compat.as_bytes("object_graph.pb"))
if file_io.file_exists(object_graph_filename):
object_graph_proto = _load_saved_object_graph_proto(object_graph_filename)
with ops.init_scope():
loader = _Loader(object_graph_proto,
saved_model_proto,
export_dir)
root = loader.get(0)
else:
raise NotImplementedError(
"Currently only SavedModels exported with `tf.saved_model.save` may be "
"imported. Other SavedModels may eventually be supported via load().")
return root
| 44.832669
| 107
| 0.734915
|
7b214e6cfab86941e910dd6e1287556f9fb80953
| 10,119
|
py
|
Python
|
notebooks/helpers.py
|
leouieda/case-studies-gravity
|
f1b0b3c18b752ee67b515afb93ebac03f11ea2b4
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 3
|
2018-08-31T05:39:57.000Z
|
2020-06-24T08:32:37.000Z
|
notebooks/helpers.py
|
GeophysicsLibrary/gravity-case-studies
|
f1b0b3c18b752ee67b515afb93ebac03f11ea2b4
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 2
|
2018-04-13T03:35:40.000Z
|
2018-04-13T21:32:26.000Z
|
notebooks/helpers.py
|
GeophysicsLibrary/gravity-case-studies
|
f1b0b3c18b752ee67b515afb93ebac03f11ea2b4
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
"""
Helper functions for displaying data and making interactive plots.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import ipywidgets as widgets
from IPython.display import display, clear_output
import cmocean
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
def minmax(data, fields):
"""
Get the minimum and maximum data values out of all fields given.
Returns them in a dictionary with the 'vmin' and 'vmax' keys.
"""
vmin = min(data[field].min() for field in fields)
vmax = max(data[field].max() for field in fields)
return dict(vmin=vmin, vmax=vmax)
def plot_field(ax, data, field, cmap=None, gridline_spacing=3, cb_pad=0.03,
cb_aspect=50, cb_shrink=0.8, ticks=True, title=True, **kwargs):
"""
Make a pcolormesh plot of the given data field.
Set's the plot extent and includes ticks in longitude and latitude.
"""
if title:
ax.set_title(field)
if 'add_colorbar' not in kwargs:
kwargs['cbar_kwargs'] = dict(orientation='horizontal',
aspect=cb_aspect, pad=cb_pad,
shrink=cb_shrink)
data[field].plot.pcolormesh(ax=ax, add_labels=False, cmap=cmap, **kwargs)
ax.coastlines()
w, e, s, n = [data.longitude.values.min(), data.longitude.values.max(),
data.latitude.values.min(), data.latitude.values.max()]
ax.set_extent([w, e, s, n])
xlocs = np.arange(w, e + 0.01, gridline_spacing)
ylocs = np.arange(s, n + 0.01, gridline_spacing)
if ticks:
ax.set_xticks(xlocs)
ax.set_yticks(ylocs)
ax.xaxis.set_major_formatter(LongitudeFormatter())
ax.yaxis.set_major_formatter(LatitudeFormatter())
ax.gridlines(color="#cccccc55", xlocs=xlocs, ylocs=ylocs)
def plot_hawaii_data(data, field, **kwargs):
"""
Plot a given field from our Hawai'i dataset.
"""
fig = plt.figure(figsize=(12, 13))
ax = plt.axes(projection=ccrs.PlateCarree())
plot_field(ax, data, field, **kwargs)
plt.tight_layout(pad=0)
def plot_japan_data(data, field, **kwargs):
"""
Plot a given field from our Japan dataset.
"""
fig = plt.figure(figsize=(12, 13))
ax = plt.axes(projection=ccrs.PlateCarree())
plot_field(ax, data, field, gridline_spacing=5, **kwargs)
plt.tight_layout(pad=0)
def plot_himalayas_data(data, field, **kwargs):
"""
Plot a given field from our Himalayas dataset.
"""
fig = plt.figure(figsize=(12, 13))
ax = plt.axes(projection=ccrs.PlateCarree())
plot_field(ax, data, field, gridline_spacing=5, **kwargs)
plt.tight_layout(pad=0)
class ProfileSelector(object):
"""
Define a widget for selecting and plotting profiles from a dataset.
Use the ``interact`` method to insert an interactive widget to control
the profile location.
Use the ``plot`` method to plot a static profile figure.
Parameters
----------
data : xarray.Dataset
The data grid.
fields : list of str
The fields to plot in the upper profile
figsize : tuple
The size of the profile figure
projection : cartopy CRS
A cartopy projection to apply to the data maps
"""
def __init__(self, data, fields, projection, figsize=(15, 9),
profile_interval=10, dimension='latitude'):
self.data = data
self.fields = fields
self._plot_initiated = False
self.projection = projection
self.figsize = figsize
self.profile_interval = profile_interval
self.default_dimension = dimension
def plot(self, location, dimension):
"""
Plot a figure of the profile at location along dimension.
"""
if not self._plot_initiated:
# Setup the figure and subplot grid
self.fig = plt.figure(figsize=self.figsize)
grid = GridSpec(2, 4, hspace=0, wspace=0)
self.ax_data = self.fig.add_subplot(grid[0,:-1])
self.ax_topo = self.fig.add_subplot(grid[1,:-1])
self.ax_data_map = self.fig.add_subplot(grid[0,-1],
projection=self.projection)
self.ax_topo_map = self.fig.add_subplot(grid[1,-1],
projection=self.projection)
# The y axis limits for the profiles
self._topo_base = -10000
ylim_topo = [self._topo_base, self.data.topography_ell.max()*1.1]
ylim_data = list(sorted(minmax(self.data, self.fields).values()))
# Set labels and dimensions
self.ax_data.set_ylim(ylim_data)
self.ax_data.set_ylabel('mGal')
self.ax_topo.set_ylim(ylim_topo)
self.ax_topo.set_ylabel('Tropography (m)')
self.ax_data.grid(True)
self.ax_data.set_xticklabels([])
# Draw the profile lines
self._data_lines = {}
for field in self.fields:
self._data_lines[field], = self.ax_data.plot([0], [0], '-',
label=field)
self.ax_data.legend(loc='upper right')
# Place holders for the topography polygons
self._water_fill = None
self._topo_fill = None
# Plot the maps
plot_field(self.ax_data_map, self.data, self.fields[0],
ticks=False, add_colorbar=False, title=False,
cmap='RdBu_r')
plot_field(self.ax_topo_map, self.data, 'topography_ell',
ticks=False, add_colorbar=False, title=False,
cmap=cmocean.cm.delta)
# Draw on the maps showing the profiles
self._datamap_profile, = self.ax_data_map.plot([0, 0], [0, 0],
'--k')
self._topomap_profile, = self.ax_topo_map.plot([0, 0], [0, 0],
'--k')
plt.tight_layout(pad=0, h_pad=0, w_pad=0)
self._plot_initiated = True
# Get the name of the other dimension
dim_comp = set(self.data.dims).difference({dimension}).pop()
# Get the profile
x = self.data[dimension]
xlim = [x.min(), x.max()]
profile = self.data.loc[{dim_comp: location}]
# Update the data plots
for field in self.fields:
self._data_lines[field].set_data(x, profile[field])
# Update the topography plot
if self._topo_fill is not None:
self._topo_fill.remove()
if self._water_fill is not None:
self._water_fill.remove()
self._water_fill = self.ax_topo.fill_between(xlim, [0, 0],
self._topo_base,
color='#2780E3')
self._topo_fill = self.ax_topo.fill_between(x, profile.topography_ell,
self._topo_base,
color='#333333')
# Update the profile location plot
profile_location = [xlim, [location, location]]
if dimension.lower() == 'latitude':
profile_location = profile_location[::-1]
self._datamap_profile.set_data(*profile_location)
self._topomap_profile.set_data(*profile_location)
# Make sure the plots are tight
self.ax_data.set_xlim(xlim)
self.ax_topo.set_xlim(xlim)
self.ax_topo.set_xlabel(dimension.capitalize())
plt.show()
def interact(self):
"""
Display an interactive widget for choosing the profile.
"""
# Setup the initial value options for the location
dim = self.default_dimension
dim2 = set(self.data.dims).difference({dim}).pop()
options = self.data[dim2].values.tolist()[::self.profile_interval]
mid = options[len(options)//2]
# Make the slider for choosing the location
slider_label = widgets.Label("at {} value".format(dim2))
slider = widgets.SelectionSlider(options=options, value=mid,
layout=widgets.Layout(width="350px"))
# Make a menu for choosing the profile direction
dimension_chooser = widgets.Dropdown(
options=self.data.dims.keys(), value=dim,
description="Profile along")
def displayer(location, dimension):
"Update and display the plot with given arguments"
self.plot(location, dimension)
display(self.fig)
def handle_dimension_change(change):
"Change the location options when dimension changes"
dim2 = set(self.data.dims).difference({change.new}).pop()
slider_label.value = "at {} value".format(dim2)
options = self.data[dim2].values.tolist()[::self.profile_interval]
slider.options = options
slider.value = options[len(options)//2]
# Connect the dimension change to the slider
dimension_chooser.observe(handle_dimension_change, names='value')
# Make the output display and connect it to the callback
output = widgets.interactive_output(
displayer, {'location': slider, 'dimension': dimension_chooser})
# Make a title for the widget
title = widgets.HTML(
'<strong style="font-size: 1.5em;">Profile selector</strong>')
# Layout the widgets
layout = widgets.VBox(
[title,
widgets.HBox([dimension_chooser, slider_label, slider]),
output],
layout=widgets.Layout(align_items="center"))
# For some reason, calling _figure_setup inserts a plot in the output
# Call clear_output to get rid of it.
with output:
clear_output(wait=True)
display(self.fig)
return layout
| 38.329545
| 79
| 0.591066
|
b399dffd7981ce9c428a48573cc758582b6d208e
| 1,793
|
py
|
Python
|
test/unit/applications/lang/python.py
|
matuhinal/unit
|
e019b7113a533b96e1e0fd5acc7b7e28101300d8
|
[
"Apache-2.0"
] | null | null | null |
test/unit/applications/lang/python.py
|
matuhinal/unit
|
e019b7113a533b96e1e0fd5acc7b7e28101300d8
|
[
"Apache-2.0"
] | null | null | null |
test/unit/applications/lang/python.py
|
matuhinal/unit
|
e019b7113a533b96e1e0fd5acc7b7e28101300d8
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
from urllib.parse import quote
import pytest
from unit.applications.proto import TestApplicationProto
from unit.option import option
class TestApplicationPython(TestApplicationProto):
application_type = "python"
load_module = "wsgi"
def load(self, script, name=None, module=None, **kwargs):
if name is None:
name = script
if module is None:
module = self.load_module
if script[0] == '/':
script_path = script
else:
script_path = option.test_dir + '/python/' + script
if kwargs.get('isolation') and kwargs['isolation'].get('rootfs'):
rootfs = kwargs['isolation']['rootfs']
if not os.path.exists(rootfs + '/app/python/'):
os.makedirs(rootfs + '/app/python/')
if not os.path.exists(rootfs + '/app/python/' + name):
shutil.copytree(script_path, rootfs + '/app/python/' + name)
script_path = '/app/python/' + name
app = {
"type": self.get_application_type(),
"processes": kwargs.pop('processes', {"spare": 0}),
"path": script_path,
"working_directory": script_path,
"module": module,
}
for attr in (
'callable',
'home',
'limits',
'path',
'protocol',
'targets',
'threads',
):
if attr in kwargs:
app[attr] = kwargs.pop(attr)
self._load_conf(
{
"listeners": {
"*:7080": {"pass": "applications/" + quote(name, '')}
},
"applications": {name: app},
},
**kwargs
)
| 27.166667
| 76
| 0.50251
|
641c39c468914770962fe5653c41cb8f6ec4c29c
| 605
|
py
|
Python
|
reverse-vowels-of-a-string/Solution.62349711.py
|
rahul-ramadas/leetcode
|
6c84c2333a613729361c5cdb63dc3fc80203b340
|
[
"MIT"
] | null | null | null |
reverse-vowels-of-a-string/Solution.62349711.py
|
rahul-ramadas/leetcode
|
6c84c2333a613729361c5cdb63dc3fc80203b340
|
[
"MIT"
] | 1
|
2016-09-11T22:26:17.000Z
|
2016-09-13T01:49:48.000Z
|
reverse-vowels-of-a-string/Solution.62349711.py
|
rahul-ramadas/leetcode
|
6c84c2333a613729361c5cdb63dc3fc80203b340
|
[
"MIT"
] | null | null | null |
class Solution:
def is_vowel(self, c):
vowels = {"a", "e", "i", "o", "u"}
return c.lower() in vowels
def reverseVowels(self, s):
left = 0
right = len(s) - 1
s = list(s)
while left < right:
while left < right and not self.is_vowel(s[left]):
left += 1
while left < right and not self.is_vowel(s[right]):
right -= 1
s[left], s[right] = s[right], s[left]
left += 1
right -= 1
return "".join(s)
| 26.304348
| 64
| 0.406612
|
dba7b4c06c721fb49908d94d268f3689fe5b763a
| 2,569
|
py
|
Python
|
kedro/templates/project/{{ cookiecutter.repo_name }}/src/setup.py
|
monkeyclass/kedro
|
65e786bd737de66d3977712dfde6ecca9e81c320
|
[
"Apache-2.0"
] | 1
|
2022-01-26T04:50:53.000Z
|
2022-01-26T04:50:53.000Z
|
kedro/templates/project/{{ cookiecutter.repo_name }}/src/setup.py
|
monkeyclass/kedro
|
65e786bd737de66d3977712dfde6ecca9e81c320
|
[
"Apache-2.0"
] | null | null | null |
kedro/templates/project/{{ cookiecutter.repo_name }}/src/setup.py
|
monkeyclass/kedro
|
65e786bd737de66d3977712dfde6ecca9e81c320
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
entry_point = (
"{{ cookiecutter.repo_name }} = {{ cookiecutter.python_package }}.run:run_package"
)
# get the dependencies and installs
with open("requirements.txt", "r", encoding="utf-8") as f:
# Make sure we strip all comments and options (e.g "--extra-index-url")
# that arise from a modified pip.conf file that configure global options
# when running kedro build-reqs
requires = []
for line in f:
req = line.split("#", 1)[0].strip()
if req and not req.startswith("--"):
requires.append(req)
setup(
name="{{ cookiecutter.python_package }}",
version="0.1",
packages=find_packages(exclude=["tests"]),
entry_points={"console_scripts": [entry_point]},
install_requires=requires,
extras_require={
"docs": [
"sphinx~=3.4.3",
"sphinx_rtd_theme==0.5.1",
"nbsphinx==0.8.1",
"nbstripout==0.3.3",
"recommonmark==0.7.1",
"sphinx-autodoc-typehints==1.11.1",
"sphinx_copybutton==0.3.1",
"ipykernel~=5.3",
]
},
)
| 38.924242
| 86
| 0.694044
|
1968bec034599b962d4f66d4f93b70769b10f7d9
| 2,931
|
py
|
Python
|
tests/clients/test_dapr_grpc_response.py
|
karishma-chawla/python-sdk
|
15e018d48418cca5e1660c8afe37403b91f6298d
|
[
"MIT"
] | null | null | null |
tests/clients/test_dapr_grpc_response.py
|
karishma-chawla/python-sdk
|
15e018d48418cca5e1660c8afe37403b91f6298d
|
[
"MIT"
] | null | null | null |
tests/clients/test_dapr_grpc_response.py
|
karishma-chawla/python-sdk
|
15e018d48418cca5e1660c8afe37403b91f6298d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
"""
import unittest
from google.protobuf.any_pb2 import Any as GrpcAny
from dapr.clients.grpc._response import DaprResponse, InvokeMethodResponse, BindingResponse
from dapr.proto import common_v1
class DaprResponseTests(unittest.TestCase):
test_headers = (
('key1', 'value1'),
('key2', 'value2'),
('key3', 'value3'),
)
def test_convert_metadata(self):
# act
resp = DaprResponse(self.test_headers)
# assert
self.assertEqual(3, len(resp.headers))
for k, v in self.test_headers:
self.assertEqual(resp.headers[k], [v])
class InvokeMethodResponseTests(unittest.TestCase):
def test_non_protobuf_message(self):
with self.assertRaises(ValueError):
resp = InvokeMethodResponse(data=123)
self.assertIsNone(resp, 'This should not be reached.')
def test_is_proto_for_non_protobuf(self):
test_data = GrpcAny(value=b'hello dapr')
resp = InvokeMethodResponse(
data=test_data,
content_type='application/json')
self.assertFalse(resp.is_proto())
def test_is_proto_for_protobuf(self):
fake_req = common_v1.InvokeRequest(method="test")
test_data = GrpcAny()
test_data.Pack(fake_req)
resp = InvokeMethodResponse(data=test_data)
self.assertTrue(resp.is_proto())
def test_proto(self):
fake_req = common_v1.InvokeRequest(method="test")
resp = InvokeMethodResponse(data=fake_req)
self.assertIsNotNone(resp.proto)
def test_data(self):
test_data = GrpcAny(value=b'hello dapr')
resp = InvokeMethodResponse(
data=test_data,
content_type='application/json')
self.assertEqual(b'hello dapr', resp.data)
self.assertEqual('hello dapr', resp.text())
self.assertEqual('application/json', resp.content_type)
def test_unpack(self):
# arrange
fake_req = common_v1.InvokeRequest(method="test")
# act
resp = InvokeMethodResponse(data=fake_req)
resp_proto = common_v1.InvokeRequest()
resp.unpack(resp_proto)
# assert
self.assertEqual("test", resp_proto.method)
class InvokeBindingResponseTests(unittest.TestCase):
def test_bytes_message(self):
resp = BindingResponse(data=b'data', binding_metadata={})
self.assertEqual({}, resp.binding_metadata)
self.assertEqual(b'data', resp.data)
self.assertEqual('data', resp.text())
def test_metadata(self):
resp = BindingResponse(data=b'data', binding_metadata={'status': 'ok'})
self.assertEqual({'status': 'ok'}, resp.binding_metadata)
self.assertEqual(b'data', resp.data)
self.assertEqual('data', resp.text())
if __name__ == '__main__':
unittest.main()
| 30.53125
| 91
| 0.655408
|
da453df8a5d5738769f2aeb89e3ad132453f19ff
| 4,197
|
py
|
Python
|
utils/selenium_browser.py
|
Xiao233-q/JDMemberCloseAccount
|
2617ef1e9fc332a7bffceb02c8d11c8596d22b2b
|
[
"MIT"
] | 1
|
2021-06-06T08:33:21.000Z
|
2021-06-06T08:33:21.000Z
|
utils/selenium_browser.py
|
Xiao233-q/JDMemberCloseAccount
|
2617ef1e9fc332a7bffceb02c8d11c8596d22b2b
|
[
"MIT"
] | null | null | null |
utils/selenium_browser.py
|
Xiao233-q/JDMemberCloseAccount
|
2617ef1e9fc332a7bffceb02c8d11c8596d22b2b
|
[
"MIT"
] | 1
|
2021-07-05T10:13:35.000Z
|
2021-07-05T10:13:35.000Z
|
import os
import sys
from utils.config import get_file
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
def get_browser(_config):
"""
获取浏览器对象
:return:
"""
browser_type = _config['browserType']
headless = _config['headless']
binary = _config['binary']
try:
if browser_type == 'Chrome':
chrome_options = webdriver.ChromeOptions()
# 防止在某些情况下报错`
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_experimental_option("excludeSwitches", ['enable-automation', 'enable-logging'])
if binary != "":
# 当找不到浏览器时需要在 config 里配置路径
chrome_options.binary_location = binary
if headless:
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
if sys.platform == 'linux':
_browser = webdriver.Chrome(executable_path=get_file("./drivers/chromedriver"), desired_capabilities={},
options=chrome_options)
elif sys.platform == 'darwin':
_browser = webdriver.Chrome(executable_path=get_file("./drivers/chromedriver"), desired_capabilities={},
options=chrome_options)
elif sys.platform == 'win32':
_browser = webdriver.Chrome(executable_path=get_file("./drivers/chromedriver"), desired_capabilities={},
options=chrome_options)
elif browser_type == 'Edge':
from msedge.selenium_tools import Edge, EdgeOptions
edge_options = EdgeOptions()
edge_options.use_chromium = True
edge_options.add_argument('--no-sandbox')
edge_options.add_argument('--disable-dev-shm-usage')
edge_options.add_experimental_option("excludeSwitches", ['enable-automation', 'enable-logging'])
if binary != "":
edge_options.binary_location = binary
if headless:
edge_options.add_argument('--headless')
edge_options.add_argument('--disable-gpu')
if sys.platform == 'linux':
_browser = Edge(executable_path=get_file("./drivers/msedgedriver"), options=edge_options,
capabilities={})
elif sys.platform == 'darwin':
_browser = Edge(executable_path=get_file("./drivers/msedgedriver"), capabilities={},
options=edge_options)
elif sys.platform == 'win32':
_browser = Edge(executable_path=get_file("./drivers/msedgedriver"), capabilities={},
options=edge_options)
elif browser_type == 'Firefox':
# 先清除上次的日志
if not os.path.exists(get_file("./logs")):
os.mkdir(get_file("./logs/"))
open(get_file("./logs/geckodriver.log"), "w").close()
firefox_options = webdriver.FirefoxOptions()
firefox_options.log.level = "fatal"
if binary != "":
firefox_options.binary_location = binary
if headless:
firefox_options.add_argument('--headless')
firefox_options.add_argument('--disable-gpu')
if sys.platform == 'linux':
_browser = webdriver.Firefox(executable_path=get_file('./drivers/geckodriver'), options=firefox_options,
service_log_path=get_file("./logs/geckodriver.log"))
elif sys.platform == 'darwin':
_browser = webdriver.Firefox(executable_path=get_file('./drivers/geckodriver'), options=firefox_options)
elif sys.platform == 'win32':
_browser = webdriver.Firefox(executable_path=get_file('./drivers/geckodriver'), options=firefox_options)
else:
raise WebDriverException
return _browser
except WebDriverException:
# 驱动问题
print("ERROR", "浏览器错误", "请检查你的驱动和配置")
| 48.241379
| 120
| 0.582797
|
37f1baa109cd1c5a4c60bb8347e3407aea032ed7
| 6,718
|
py
|
Python
|
pidi_mpris/screens.py
|
chme/pidi-mpris
|
beb96d5221fc6d033333e831624847ba402d05ce
|
[
"MIT"
] | 1
|
2020-11-26T13:11:43.000Z
|
2020-11-26T13:11:43.000Z
|
pidi_mpris/screens.py
|
chme/pidi-mpris
|
beb96d5221fc6d033333e831624847ba402d05ce
|
[
"MIT"
] | 1
|
2020-11-26T15:06:18.000Z
|
2020-11-28T07:19:43.000Z
|
pidi_mpris/screens.py
|
chme/pidi-mpris
|
beb96d5221fc6d033333e831624847ba402d05ce
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2020 Christian Meffert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import csv
import logging
from PIL import Image, ImageFont
import threading
from .buttons import Button
from .display import TextImage
from .util import color_hex_to_rgb
log = logging.getLogger(__name__)
class Screen:
def activate(self):
pass
def deactivate(self):
pass
def onButtonPressed(self, button):
pass
def onButtonLongPress(self, button, secondsPressed):
pass
def onButtonReleased(self, button, secondsPressed):
pass
def onPlayerUpdate(self):
pass
class GifScreen(Screen):
def __init__(self, conf, display):
self._display = display
self._conf = conf['GifScreen']
parser = csv.reader([self._conf['image']])
self._images = [item for sublist in parser for item in sublist]
self._activeImage = 0
self._numImages = len(self._images)
self._irq = threading.Event()
self._thread = threading.Thread(name='gif', target=self._showGif)
def activate(self):
self._irq.clear()
self._thread = threading.Thread(name='gif', target=self._showGif)
self._thread.start()
def deactivate(self):
self._irq.set()
self._thread.join()
self._thread = None
def onButtonPressed(self, button):
if button == Button.Y:
if self._numImages > 1:
self.deactivate()
self._activeImage = (self._activeImage + 1) % self._numImages
self.activate()
def _showGif(self):
image = Image.open(self._images[self._activeImage])
run = True
frame = 0
while run:
try:
image.seek(frame)
self._display.image(image)
frame += 1
duration = 0.05
if 'duration' in image.info:
duration = image.info['duration'] / 1000
run = not self._irq.wait(duration)
except EOFError:
frame = 0
class NowPlayingInfoScreen(Screen):
def __init__(self, conf, display, mprisPlayer):
self._display = display
self._mprisPlayer = mprisPlayer
self._conf = conf['NowPlayingInfoScreen']
self._fonts = []
self._fonts.append(ImageFont.truetype(
self._conf['line1_font_face'], int(self._conf['line1_font_size'])))
self._fonts.append(ImageFont.truetype(
self._conf['line2_font_face'], int(self._conf['line2_font_size'])))
self._fonts.append(ImageFont.truetype(
self._conf['line3_font_face'], int(self._conf['line3_font_size'])))
self._fonts.append(ImageFont.truetype(
self._conf['line4_font_face'], int(self._conf['line4_font_size'])))
self._bgColor = color_hex_to_rgb(self._conf['background'])
self._colors = []
self._colors.append(color_hex_to_rgb(self._conf['line1_font_color']))
self._colors.append(color_hex_to_rgb(self._conf['line2_font_color']))
self._colors.append(color_hex_to_rgb(self._conf['line3_font_color']))
self._colors.append(color_hex_to_rgb(self._conf['line4_font_color']))
log.debug('Text colors: %s', self._colors)
self._texts = []
self._texts.append(self._conf['line1_text'])
self._texts.append(self._conf['line2_text'])
self._texts.append(self._conf['line3_text'])
self._texts.append(self._conf['line4_text'])
log.debug('Text templates: %s', self._texts)
self._txtImage = TextImage(
self._display.width, self._display.height, bgColor=self._bgColor)
def activate(self):
self._showInfo()
def deactivate(self):
pass
def onButtonPressed(self, button):
if button == Button.A:
self._mprisPlayer.previous()
elif button == Button.X:
self._mprisPlayer.next()
elif button == Button.Y:
self._mprisPlayer.playPause()
def onPlayerUpdate(self):
self._showInfo()
def _showInfo(self):
self._txtImage.reset()
artist = ', '.join(self._mprisPlayer.artist())
album = self._mprisPlayer.album()
title = self._mprisPlayer.title()
for i, t in enumerate(self._texts):
if len(t) > 0:
log.debug(t.format(artist=artist, album=album, title=title))
self._txtImage.add(
t.format(artist=artist, album=album, title=title), self._fonts[i], color=self._colors[i])
self._display.image(self._txtImage.draw())
class ArtworkScreen(Screen):
def __init__(self, conf, display, mprisPlayer):
self._conf = conf['ArtworkScreen']
self._defaultImage = self._conf['fallback_image']
self._display = display
self._mprisPlayer = mprisPlayer
self._artUrl = None
def activate(self):
self._showArtwork()
def deactivate(self):
self._artUrl = None
def onButtonPressed(self, button):
if button == Button.A:
self._mprisPlayer.previous()
elif button == Button.X:
self._mprisPlayer.next()
elif button == Button.Y:
self._mprisPlayer.playPause()
def onPlayerUpdate(self):
self._showArtwork()
def _showArtwork(self):
artUrl = self._mprisPlayer.artUrl()
if artUrl.startswith('file://'):
artUrl = artUrl[len('file://'):]
else:
artUrl = self._defaultImage
if self._artUrl is None or self._artUrl != artUrl:
self._artUrl = artUrl
self._display.imageFile(artUrl)
| 31.539906
| 109
| 0.638434
|
3f9884f52a99a1f66bf4b73f3731ae2436246d6b
| 5,934
|
py
|
Python
|
src/sentry/features/__init__.py
|
lauryndbrown/sentry
|
c5304e303966566386f5e61df1b72624a30803b4
|
[
"BSD-3-Clause"
] | 1
|
2020-02-27T02:46:25.000Z
|
2020-02-27T02:46:25.000Z
|
src/sentry/features/__init__.py
|
lauryndbrown/sentry
|
c5304e303966566386f5e61df1b72624a30803b4
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/features/__init__.py
|
lauryndbrown/sentry
|
c5304e303966566386f5e61df1b72624a30803b4
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from .base import * # NOQA
from .handler import * # NOQA
from .manager import * # NOQA
# The feature flag system provides a way to turn on or off features of Sentry.
#
# Registering a new feature:
#
# - Determine what scope your feature falls under. By convention we have a
# organizations and project scope which map to the OrganizationFeature and
# ProjectFeature feature objects. Scoping will provide the feature with
# context.
#
# Organization and Project scoped features will automatically be added into
# the Organization and Project serialized representations.
#
# Additional feature contexts can be found under the features.base module,
# but you will typically deal with the organization or project.
#
# NOTE: There is no currently established convention for features which do not
# fall under these scopes. Use your best judgment for these.
#
# - Set a default for your features.
#
# Feature defaults are configured in the sentry.conf.server.SENTRY_FEATURES
# module variable. This is the DEFAULT value for a feature, the default may be
# overridden by the logic in the exposed feature.manager.FeatureManager
# instance. See the ``has`` method here for a detailed understanding of how
# the default values is overridden.
#
# - Use your feature.
#
# You can check if a feature is enabled using the following call:
#
# >>> features.has('organization:my-feature', organization, actor=user)
#
# NOTE: The second parameter is used to provide the feature context, again
# organization and project are the most common, but it is possible that
# other Feature objects may require more arguments.
#
# NOTE: The actor kwarg should be passed when it's expected that the handler
# needs context of the user.
#
# NOTE: Features that require Snuba to function, add to the
# `requires_snuba` tuple.
default_manager = FeatureManager() # NOQA
# Unscoped features
default_manager.add('auth:register')
default_manager.add('organizations:create')
# Organization scoped features
default_manager.add('organizations:advanced-search', OrganizationFeature) # NOQA
default_manager.add('organizations:boolean-search', OrganizationFeature) # NOQA
default_manager.add('organizations:api-keys', OrganizationFeature) # NOQA
default_manager.add('organizations:discover', OrganizationFeature) # NOQA
default_manager.add('organizations:events', OrganizationFeature) # NOQA
default_manager.add('organizations:events-v2', OrganizationFeature) # NOQA
default_manager.add('organizations:event-attachments', OrganizationFeature) # NOQA
default_manager.add('organizations:symbol-sources', OrganizationFeature) # NOQA
default_manager.add('organizations:global-views', OrganizationFeature) # NOQA
default_manager.add('organizations:incidents', OrganizationFeature) # NOQA
default_manager.add('organizations:integrations-issue-basic', OrganizationFeature) # NOQA
default_manager.add('organizations:integrations-issue-sync', OrganizationFeature) # NOQA
default_manager.add('organizations:integrations-event-hooks', OrganizationFeature) # NOQA
default_manager.add('organizations:internal-catchall', OrganizationFeature) # NOQA
default_manager.add('organizations:incidents', OrganizationFeature) # NOQA
default_manager.add('organizations:sentry-apps', OrganizationFeature) # NOQA
default_manager.add('organizations:invite-members', OrganizationFeature) # NOQA
default_manager.add('organizations:large-debug-files', OrganizationFeature) # NOQA
default_manager.add('organizations:monitors', OrganizationFeature) # NOQA
default_manager.add('organizations:onboarding', OrganizationFeature) # NOQA
default_manager.add('organizations:org-saved-searches', OrganizationFeature) # NOQA
default_manager.add('organizations:relay', OrganizationFeature) # NOQA
default_manager.add('organizations:require-2fa', OrganizationFeature) # NOQA
default_manager.add('organizations:sentry10', OrganizationFeature) # NOQA
default_manager.add('organizations:sso-basic', OrganizationFeature) # NOQA
default_manager.add('organizations:sso-rippling', OrganizationFeature) # NOQA
default_manager.add('organizations:sso-saml2', OrganizationFeature) # NOQA
default_manager.add('organizations:grouping-info', OrganizationFeature) # NOQA
default_manager.add('organizations:tweak-grouping-config', OrganizationFeature) # NOQA
default_manager.add('organizations:set-grouping-config', OrganizationFeature) # NOQA
# Project scoped features
default_manager.add('projects:custom-inbound-filters', ProjectFeature) # NOQA
default_manager.add('projects:data-forwarding', ProjectFeature) # NOQA
default_manager.add('projects:discard-groups', ProjectFeature) # NOQA
default_manager.add('projects:minidump', ProjectFeature) # NOQA
default_manager.add('projects:rate-limits', ProjectFeature) # NOQA
default_manager.add('projects:sample-events', ProjectFeature) # NOQA
default_manager.add('projects:servicehooks', ProjectFeature) # NOQA
default_manager.add('projects:similarity-view', ProjectFeature) # NOQA
default_manager.add('projects:similarity-indexing', ProjectFeature) # NOQA
# Project plugin features
default_manager.add('projects:plugins', ProjectPluginFeature) # NOQA
# This is a gross hardcoded list, but there's no
# other sensible way to manage this right now without augmenting
# features themselves in the manager with detections like this.
requires_snuba = (
'organizations:discover',
'organizations:events',
'organizations:events-v2',
'organizations:global-views',
'organizations:incidents',
'organizations:sentry10',
)
# NOTE: Don't add features down here! Add them to their specific group and sort
# them alphabetically! The order features are registered is not important.
# expose public api
add = default_manager.add
get = default_manager.get
has = default_manager.has
all = default_manager.all
| 49.041322
| 90
| 0.786653
|
c0359db92138e0b43ec02dce90ab9966d4de8fad
| 5,281
|
py
|
Python
|
vqc_qiskit/qdata.py
|
QML-HEP/ae_qml
|
f9061fe4eca76aaf0945255c48e13ced2fb6c24c
|
[
"MIT"
] | 7
|
2021-11-17T12:52:08.000Z
|
2022-02-03T14:49:44.000Z
|
vqc_qiskit/qdata.py
|
QML-HEP/ae_qml
|
f9061fe4eca76aaf0945255c48e13ced2fb6c24c
|
[
"MIT"
] | 2
|
2022-01-21T15:18:37.000Z
|
2022-02-11T13:24:34.000Z
|
vqc_qiskit/qdata.py
|
QML-HEP/ae_qml
|
f9061fe4eca76aaf0945255c48e13ced2fb6c24c
|
[
"MIT"
] | 1
|
2021-12-01T09:22:38.000Z
|
2021-12-01T09:22:38.000Z
|
# Loads the data and an autoencoder model. The original data is passed
# through the AE and the latent space is fed to the qsvm network.
import sys
import os
import numpy as np
sys.path.append("..")
from .terminal_colors import tcols
from autoencoders import data as aedata
from autoencoders import util as aeutil
class qdata:
def __init__(
self,
data_folder,
norm_name,
nevents,
model_path,
train_events=-1,
valid_events=-1,
test_events=-1,
kfolds=0,
):
device = "cpu"
model_folder = os.path.dirname(model_path)
hp_file = os.path.join(model_folder, "hyperparameters.json")
hp = aeutil.import_hyperparams(hp_file)
print(tcols.OKCYAN + "\nLoading training data:" + tcols.ENDC)
self.ae_data = aedata.AE_data(
data_folder,
norm_name,
nevents,
train_events,
valid_events,
test_events,
)
self.model = aeutil.choose_ae_model(hp["ae_type"], device, hp)
self.model.load_model(model_path)
self.ntrain = self.ae_data.trdata.shape[0]
self.nvalid = self.ae_data.vadata.shape[0]
self.ntest = self.ae_data.tedata.shape[0]
if kfolds > 0:
print(tcols.OKCYAN + "Loading k-folded valid data:" + tcols.ENDC)
self.kfolds = kfolds
self.ae_kfold_data = aedata.AE_data(
data_folder,
norm_name,
nevents,
0,
kfolds * valid_events,
kfolds * test_events,
)
def get_latent_space(self, datat) -> np.ndarray:
"""
Get the latent space depending on the data set you want.
@datat :: String of the data type.
returns :: Output of the ae depending on the given data type.
"""
if datat == "train":
return self.model.predict(self.ae_data.trdata)[0]
if datat == "valid":
return self.model.predict(self.ae_data.vadata)[0]
if datat == "test":
return self.model.predict(self.ae_data.tedata)[0]
raise TypeError("Given data type does not exist!")
def get_kfold_latent_space(self, datat) -> np.ndarray:
"""
Get the kfolded latent space for validation or testing data.
@datat :: String of the data type.
returns :: The kfolded output of the ae depending on the data.
"""
if datat == "valid":
return self.model.predict(self.ae_kfold_data.vadata)[0]
if datat == "test":
return self.model.predict(self.ae_kfold_data.tedata)[0]
raise TypeError("Given data type does not exist!")
def fold(self, data, target, events_per_kfold) -> np.ndarray:
"""
Fold the data, given a number of events you want per fold.
All data that is not folded is then discarded.
@data :: Numpy array of the data to be folded.
@target :: Numpy array of the target corresponding to the data.
@events_per_kfold :: The number of events wanted per fold.
returns :: Folded data set with a certain number of events
per fold.
"""
data_sig, data_bkg = self.ae_data.split_sig_bkg(data, target)
data_sig = data_sig.reshape(-1, int(events_per_kfold / 2), data_sig.shape[1])
data_bkg = data_bkg.reshape(-1, int(events_per_kfold / 2), data_bkg.shape[1])
return np.concatenate((data_sig, data_bkg), axis=1)
def get_kfolded_data(self, datat) -> np.ndarray:
"""
Get the kfolded data for either the validation or testing data.
@datat :: String of the data type.
returns :: Folded data set with a certain number of events
pre fold.
"""
if datat == "valid":
return self.fold(
self.get_kfold_latent_space(datat),
self.ae_kfold_data.vatarget,
self.nvalid,
)
if datat == "test":
return self.fold(
self.get_kfold_latent_space(datat),
self.ae_kfold_data.tetarget,
self.ntest,
)
raise TypeError("Given data type does not exist!")
@staticmethod
def batchify(data, batch_size):
"""
Reshape the training data into an array of arrays, the sub arrays
containing the amount of events that are contained in a batch.
@data :: Array of data to be split.
@batch_size :: Int of the batch size.
"""
if len(data.shape) == 1:
return data.reshape(-1, batch_size)
elif len(data.shape) == 2:
return data.reshape(-1, batch_size, data.shape[1])
else:
raise RuntimeError(
"Batchify does not cover arrays with dimension larger than 2."
)
@staticmethod
def to_onehot(target):
"""
Reshape the target that such that it follows onehot encoding.
@target :: Numpy array with target data.
"""
onehot_target = np.zeros((target.size, int(target.max() + 1)))
onehot_target[np.arange(target.size), target.astype(int)] = 1
return onehot_target
| 33.636943
| 85
| 0.587199
|
3cefce3965d6bb441e91a8b958de490b05540c0d
| 1,414
|
py
|
Python
|
projects/oldProyects/EWESim/LightBulb.py
|
GGP00/soba
|
c193f323f26eccf579a454b8bb4bec4e80644444
|
[
"MIT"
] | 1
|
2017-03-06T12:33:02.000Z
|
2017-03-06T12:33:02.000Z
|
projects/oldProyects/EWESim/LightBulb.py
|
GGP00/soba
|
c193f323f26eccf579a454b8bb4bec4e80644444
|
[
"MIT"
] | 3
|
2017-04-26T08:57:35.000Z
|
2019-04-24T08:28:24.000Z
|
projects/oldProyects/EWESim/LightBulb.py
|
GGP00/soba
|
c193f323f26eccf579a454b8bb4bec4e80644444
|
[
"MIT"
] | 1
|
2019-01-20T17:39:00.000Z
|
2019-01-20T17:39:00.000Z
|
from transitions import Machine
from mesa import Agent, Model
from transitions import State
class Bulb(Agent):
states = [
State(name='off', on_enter=['set_off']),
State(name='on', on_enter=['set_on'])
]
def __init__(self, unique_id, model, room):
super().__init__(unique_id, model)
self.room = room
self.machine = Machine(model=self, states=Bulb.states, initial='off')
self.machine.add_transition('switch_on', '*', 'on')
self.machine.add_transition('switch_off', '*', 'off')
def sensorCheck(self):
userInRoom = self.model.ThereIsUserInRoom(self.room)
if userInRoom == True:
if self.state == 'on':
pass
else:
actions = self.model.actionsNear
if actions['channel'] == 'Bulb' and actions['action'] == 'SwitchOn':
self.switch_on()
else:
if self.state == 'off':
pass
else:
actions = self.model.actionsFar
if actions['channel'] == 'Bulb' and actions['action'] == 'SwitchOff':
self.switch_off()
else:
pass
def step(self):
print(self.unique_id, self.state)
self.sensorCheck()
def set_off(self):
pass
def set_on(self):
pass
| 30.085106
| 85
| 0.522631
|
be1a04c59c7bba10777a3e202050426595e1de10
| 3,330
|
py
|
Python
|
web2py/applications/rip/modules/VMPrac.py
|
2spmohanty/vcenter-automation
|
1d10b765ef335087902b0194ed12a61e53807987
|
[
"Apache-2.0"
] | 1
|
2019-10-02T13:25:03.000Z
|
2019-10-02T13:25:03.000Z
|
web2py/applications/rip/modules/VMPrac.py
|
2spmohanty/vcenter-automation
|
1d10b765ef335087902b0194ed12a61e53807987
|
[
"Apache-2.0"
] | null | null | null |
web2py/applications/rip/modules/VMPrac.py
|
2spmohanty/vcenter-automation
|
1d10b765ef335087902b0194ed12a61e53807987
|
[
"Apache-2.0"
] | 1
|
2021-11-05T09:51:02.000Z
|
2021-11-05T09:51:02.000Z
|
import pyVmomi
from pyVmomi import vim, vmodl
def find_obj(si, name, vimtype, threaded=False):
"""
Find an object in vSphere by it's name and return it
"""
content = si.content
obj_view = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
obj_list = obj_view.view
for obj in obj_list:
if threaded:
print('THREAD %s - Checking Object "%s"' % (name, obj.name))
else:
print('Checking object "%s"' % obj.name)
if obj.name == name:
if threaded:
print('THREAD %s - Found object %s' % (name, obj.name))
else:
print('Found object %s' % obj.name)
return obj
return None
def get_container_view(service_instance, obj_type, container=None):
"""
Get a vSphere Container View reference to all objects of type 'obj_type'
It is up to the caller to take care of destroying the View when no longer
needed.
Args:
obj_type (list): A list of managed object types
Returns:
A container view ref to the discovered managed objects
"""
if not container:
container = service_instance.content.rootFolder
view_ref = service_instance.content.viewManager.CreateContainerView(
container=container,
type=obj_type,
recursive=True
)
return view_ref
def collect_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False,desired_vm=None):
"""
Collect properties for managed objects from a view ref
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
properties = {}
try:
for obj in props:
for prop in obj.propSet:
if desired_vm in prop.val :
properties[prop.val] = obj.obj
else:
pass
return properties
except Exception, e:
print "The exception inside collector_properties " + str(e)
return properties
| 32.970297
| 90
| 0.643844
|
bcc5878b0a8eca7943e48f63940555d09efaf0e6
| 778
|
py
|
Python
|
drnalpha/sic_codes/models.py
|
UKGovernmentBEIS/BRE_DigitalRegulationNavigator_Alpha
|
bfa6d08212bc18034b20b9c922a554a6e1ddd0f1
|
[
"MIT"
] | null | null | null |
drnalpha/sic_codes/models.py
|
UKGovernmentBEIS/BRE_DigitalRegulationNavigator_Alpha
|
bfa6d08212bc18034b20b9c922a554a6e1ddd0f1
|
[
"MIT"
] | null | null | null |
drnalpha/sic_codes/models.py
|
UKGovernmentBEIS/BRE_DigitalRegulationNavigator_Alpha
|
bfa6d08212bc18034b20b9c922a554a6e1ddd0f1
|
[
"MIT"
] | 1
|
2021-04-21T09:41:43.000Z
|
2021-04-21T09:41:43.000Z
|
from django.db import models
class CodeQuerySet(models.QuerySet):
def related_to_food(self):
return self.filter(
models.Q(code__startswith="10")
| models.Q(code__startswith="11")
| models.Q(code__startswith="56")
)
class Code(models.Model):
code = models.PositiveIntegerField(unique=True)
title = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = CodeQuerySet.as_manager()
autocomplete_search_field = "title"
class Meta:
ordering = ["code"]
def __str__(self):
return f"{self.code}: {self.title}"
def autocomplete_label(self):
return str(self)
| 25.096774
| 59
| 0.661954
|
d86c3b04fb475bc8c62b45803487b9e4e19cac4e
| 5,264
|
py
|
Python
|
cnns/nnlib/robustness/batch_attack/raw_pgd.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | 1
|
2018-03-25T13:19:46.000Z
|
2018-03-25T13:19:46.000Z
|
cnns/nnlib/robustness/batch_attack/raw_pgd.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | null | null | null |
cnns/nnlib/robustness/batch_attack/raw_pgd.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from typing import Union
def project(x: torch.Tensor, x_adv: torch.Tensor, norm: Union[str, int],
eps: float) -> torch.Tensor:
"""Projects x_adv into the l_norm ball around x
Assumes x and x_adv are 4D Tensors representing batches of images
Args:
x: Batch of natural images
x_adv: Batch of adversarial images
norm: Norm of ball around x
eps: Radius of ball
Returns:
x_adv: Adversarial examples projected to be at most eps
distance from x under a certain norm
"""
if x.shape != x_adv.shape:
raise ValueError('Input Tensors must have the same shape')
if norm == 'inf':
# Workaround as PyTorch doesn't have elementwise clip
x_adv = torch.max(torch.min(x_adv, x + eps), x - eps)
else:
delta = x_adv - x
# Assume x and x_adv are batched tensors where the first dimension is
# a batch dimension
mask = delta.view(delta.shape[0], -1).norm(norm, dim=1) <= eps
scaling_factor = delta.view(delta.shape[0], -1).norm(norm, dim=1)
scaling_factor[mask] = eps
# .view() assumes batched images as a 4D Tensor
delta *= eps / scaling_factor.view(-1, 1, 1, 1)
x_adv = x + delta
return x_adv
def random_perturbation(x: torch.Tensor,
norm: Union[str, int],
eps: float) -> torch.Tensor:
"""Applies a random l_norm bounded perturbation to x
Assumes x is a 4D Tensor representing a batch of images
Args:
x: Batch of images
norm: Norm to measure size of perturbation
eps: Size of perturbation
Returns:
x_perturbed: Randomly perturbed version of x
"""
perturbation = torch.normal(torch.zeros_like(x), torch.ones_like(x))
if norm == 'inf':
perturbation = torch.sign(perturbation) * eps
else:
perturbation = project(torch.zeros_like(x), perturbation, norm, eps)
return x + perturbation
class RAW_PGD:
def __init__(self,
model,
num_steps: int = 120,
step_size: int = 0.01,
eps: float = 2.0,
norm=2,
clamp=(0, 1),
y_target=None,
loss_fn=nn.CrossEntropyLoss(),
random: bool = True):
self.model = model
self.loss_fn = loss_fn
self.num_steps = num_steps
self.step_size = step_size
self.norm = norm
self.eps = eps
self.clamp = clamp
self.y_target = y_target
self.random = random
def projected_gradient_descent(self, x, y):
"""Performs the projected gradient descent attack on a batch of images."""
x_adv = x.clone().detach().requires_grad_(True).to(x.device)
targeted = self.y_target is not None
num_channels = x.shape[1]
if self.random:
x_adv = random_perturbation(x_adv, self.norm, self.eps)
for i in range(self.num_steps):
_x_adv = x_adv.clone().detach().requires_grad_(True)
prediction = self.model(_x_adv)
loss = self.loss_fn(prediction, self.y_target if targeted else y)
loss.backward()
with torch.no_grad():
# Force the gradient step to be a fixed size in a certain norm
if self.norm == 'inf':
gradients = _x_adv.grad.sign() * self.step_size
else:
# Note .view() assumes batched image data as 4D tensor
gradients = _x_adv.grad * self.step_size / _x_adv.grad.view(
_x_adv.shape[0], -1) \
.norm(self.norm, dim=-1) \
.view(-1, num_channels, 1, 1)
if targeted:
# Targeted: Gradient descent with on the loss of the (incorrect) target label
# w.r.t. the image data
x_adv -= gradients
else:
# Untargeted: Gradient ascent on the loss of the correct label w.r.t.
# the model parameters
x_adv += gradients
# Project back into l_norm ball and correct range
if self.norm == 'inf':
# Workaround as PyTorch doesn't have elementwise clip
x_adv = torch.max(torch.min(x_adv, x + self.eps), x - self.eps)
else:
delta = x_adv - x
# Assume x and x_adv are batched tensors where the first dimension is
# a batch dimension
mask = delta.view(delta.shape[0], -1).norm(self.norm,
dim=1) <= self.eps
scaling_factor = delta.view(delta.shape[0], -1).norm(self.norm,
dim=1)
scaling_factor[mask] = self.eps
# .view() assumes batched images as a 4D Tensor
delta *= self.eps / scaling_factor.view(-1, 1, 1, 1)
x_adv = x + delta
x_adv = x_adv.clamp(*self.clamp)
return x_adv.detach()
| 34.631579
| 97
| 0.542553
|
6242474447ce0d60548535d3478479418e111873
| 21,356
|
py
|
Python
|
client/software/resources/modules/MU_Musica.py
|
MU-Software/Project_Musica_for_Contest
|
c1f63f381b167d6bfe701a36e998727cf4ed3bec
|
[
"MIT"
] | null | null | null |
client/software/resources/modules/MU_Musica.py
|
MU-Software/Project_Musica_for_Contest
|
c1f63f381b167d6bfe701a36e998727cf4ed3bec
|
[
"MIT"
] | null | null | null |
client/software/resources/modules/MU_Musica.py
|
MU-Software/Project_Musica_for_Contest
|
c1f63f381b167d6bfe701a36e998727cf4ed3bec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Project Musica main definition module
import json
try:
from . import *
except:
pass
from __init__ import *
from MU_GUI import *
COMBO_TEXT = \
"""\
COMBO
{0}\
"""
def randomID(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def toggleInterval(interval):
if interval.isPlaying():
interval.pause()
else:
interval.resume()
class MU_Musica(MU_GUI):
def __init__(self):
base.cTrav = CollisionTraverser()
self.collHandEvent = CollisionHandlerEvent()
self.collHandEvent.addInPattern('into-%in')
self.collHandEvent.addOutPattern('outof-%in')
MU_GUI.__init__(self)
self.song_info = json.load(open('resources/song/song_info.json', 'rb'))
self.select.song_info_dict = self.song_info
self.keyStat = {
'collBox_1' : False,
'collBox_2' : False,
'collBox_3' : False,
'collBox_4' : False
}
self.startup_musica()
base.accept("o", self.setup_in_game, ["MUSICA_0002"])
#GUI Controller
def GUI_mgr(self, mode):
if mode == 't2s':
self.title.unload_title_GUI()
taskMgr.doMethodLater(1, self.select.setup_select_GUI, "GUI_select_create")
taskMgr.doMethodLater(1, self.give_select_control, "give_select_control")
taskMgr.doMethodLater(1.5, self.timerTask, "timerTask")
base.ignore('tab')
elif mode == 's2p':
(self.select.unload_select_GUI())
elif mode == '':
pass
def title_coin_text_mgr(self, val):
self.credit_updater(val)
try:
if self.title.credit_return() >= 3 and self.title.title_str_int_list != [15,17,4,18,18,' ',0,13,24,' ',10,4,24]:
self.title.text_animation_change()
base.accept('tab', self.GUI_mgr, ['t2s'])
elif self.title.credit_return() < 3 and self.title.title_str_int_list != [8,13,18,4,17,19,' ',2,14,8,13,'(',18,')']:
self.title.text_animation_change('INSERT COIN(S)')
base.ignore('tab')
except:
pass
def give_select_control(self, task=None):
base.accept("arrow_up" , self.mode_select_movement_arrow, [-1])
base.accept("arrow_down", self.mode_select_movement_arrow, [+1])
base.accept("time-arrow_up-repeat" , self.mode_select_movement_arrow, [-1])
base.accept("time-arrow_down-repeat", self.mode_select_movement_arrow, [+1])
base.accept("arrow_left", self.menu_to_song, ['S2M'])
base.accept("arrow_right", self.menu_to_song, ['M2S'])
def mode_select_movement_arrow(self, pos, Long_Hold_Dummy=None):
pos = 3 if (self.select.m_pos < 1 and pos < 0)\
else -3 if (self.select.m_pos > 3-1 and pos > 0)\
else pos
self.mode_select_movement(self.select.m_pos + pos)
def mode_select_movement(self, num):
select_movement_seq_list = Parallel(name="select_movement_seq")
main_target = self.select.m_target_list[num]
main_target_pos = main_target.getPos()
not_target = [i for i in self.select.m_target_list if not i == main_target]
select_movement_seq_list.append(LerpColorScaleInterval(main_target, .25, VBase4(1, 1, 1, 1)))
select_movement_seq_list.append(
self.select.m_select_frame.posInterval(.1, (main_target_pos[0], 0, main_target_pos[1]+.035)))
select_movement_seq_list.append(
self.select.s_select_frame.posInterval(.1, (main_target_pos[0], 0, main_target_pos[1]+.035)))
for i in not_target:
select_movement_seq_list.append(LerpColorScaleInterval(i, .1, VBase4(1, 1, 1, .25)))
self.select.m_pos = num
select_movement_seq_list.start()
def menu_to_song(self, way):
main_target = self.select.m_target_list[self.select.m_pos]
main_target_pos = main_target.getPos()
seq = Parallel(name="menu_to_song")
if way == 'M2S':
self.select.a_Inf_STR["text"] = u"노래를 선택해 주세요"
pos = (-.05, 0, .05)
scale = (.55, 0, .125)
base.ignore("arrow_up")
base.ignore("arrow_down")
base.ignore("time-arrow_up-repeat")
base.ignore("time-arrow_down-repeat")
base.accept("arrow_up" , self.song_select_movement, [-1])
base.accept("arrow_down", self.song_select_movement, [+1])
base.accept("time-arrow_up-repeat" , self.song_select_movement, [-1])
base.accept("time-arrow_down-repeat", self.song_select_movement, [+1])
base.accept("arrow_left", self.menu_to_song, ['S2M'])
base.accept("arrow_right", self.select_to_play)
else:
self.select.a_Inf_STR["text"] = u"모드를 선택해 주세요"
pos = self.select.m_select_frame.getPos()
scale = self.select.m_select_frame.getScale()
base.ignore("arrow_up")
base.ignore("arrow_down")
base.ignore("time-arrow_up-repeat")
base.ignore("time-arrow_down-repeat")
base.accept("arrow_up" , self.mode_select_movement_arrow, [-1])
base.accept("arrow_down", self.mode_select_movement_arrow, [+1])
base.accept("time-arrow_up-repeat" , self.mode_select_movement_arrow, [-1])
base.accept("time-arrow_down-repeat", self.mode_select_movement_arrow, [+1])
base.accept("arrow_left", self.menu_to_song, ['S2M'])
base.accept("arrow_right", self.menu_to_song, ['M2S'])
seq.append(self.select.s_select_frame.posInterval(.1, pos))
seq.append(LerpScaleInterval(self.select.s_select_frame, .1, scale))
seq.start()
def song_select_movement(self, pos, Long_Hold_Dummy=None):
#if 7, return 3; if 8, return 3.
center_num = int(len(self.select.s_Lst_Lst)/2.1)
center_num += 1 if len(self.select.s_Lst_Lst)%2 else 0
if pos > 0:
self.select.s_Lst_Lst.append(self.select.s_Lst_Lst[0])
self.select.s_Lst_Lst = self.select.s_Lst_Lst[1:]
for i in range(len(self.select.s_Lst_Lst))[::-1]:
if i != 0:
self.select.s_Lst_Lst[i-1].getPos()[1]
self.select.s_Lst_Lst[i].setPos(0, self.select.s_Lst_Lst[i-1].getPos()[1])
else:
self.select.s_Lst_Lst[i].setPos(0, .5)
if not i in range(center_num-2, center_num+3):
self.select.s_Lst_Lst[i].setColorScale(1, 1, 1, 0)
else:
self.select.s_Lst_Lst[i].setColorScale(1, 1, 1, 1)
elif pos < 0:
self.select.s_Lst_Lst.insert(0, self.select.s_Lst_Lst[-1])
self.select.s_Lst_Lst = self.select.s_Lst_Lst[:-1]
for i in range(len(self.select.s_Lst_Lst))[::1]:
if i != len(self.select.s_Lst_Lst)-1:
self.select.s_Lst_Lst[i].setPos(0, self.select.s_Lst_Lst[i+1].getPos()[1])
else:
self.select.s_Lst_Lst[i].setPos(0, -.5)
if not i in range(center_num-2, center_num+3):
self.select.s_Lst_Lst[i].setColorScale(1, 1, 1, 0)
else:
self.select.s_Lst_Lst[i].setColorScale(1, 1, 1, 1)
else: return;
self.select.set_music_info(self.select.song_info_dict[self.select.s_Lst_Lst[center_num].getTag("songID")]['song_info'])
self.select.s_Pos += pos;
if self.select.s_Pos > len(self.select.s_Lst_Lst) - 1:
self.select.s_Pos = 0
elif self.select.s_Pos < 0:
self.select.s_Pos = len(self.select.s_Lst_Lst) -1
def select_to_play(self):
unload_returner = self.select.unload_select_GUI()
if unload_returner.strip():
self.setup_in_game(unload_returner)
def startup_musica(self):
camera.setPosHpr(0, 12.5, 10, 0, -113, 0)
#First, create hitbox that detects miss.
self.Hitbox_missWall = loader.loadModel('hitbox_missWall')
self.createHitbox(self.Hitbox_missWall, self.event_missWall)
self.Hitbox_missWall.setHpr(90, 0, 270)
self.Hitbox_missWall.setPos(0, -1, -5)
self.Hitbox_missWall.setScale(1, 1.25, .25)
self.Hitbox_missWall.reparentTo(hidden)
#Second, create hitbox that will be controlled by player.
self.Hitbox_user_list = list()
for i in range(4):
self.Hitbox_user_list.append(loader.loadModel('hitbox_user.egg'))
self.Hitbox_user_list[i].setTag('name', ('collBox_{0}'.format(i+1)))
self.createHitbox(self.Hitbox_user_list[i], self.event_success, self.event_success)
self.Hitbox_user_list[i].reparentTo(hidden)
self.Hitbox_user_list[i].setPos((.6*(-1+i)-.275)*5, -2, -15)
self.Hitbox_user_list[i].setScale(2, .3, 3)
self.Hitbox_user_list[i].setColor(0, 0, 0, 1)
self.Hitbox_music_start = loader.loadModel('hitbox_missWall')
self.createHitbox(self.Hitbox_music_start, self.event_startMusic)
self.Hitbox_music_start.setHpr(90, 0, 270)
self.Hitbox_music_start.setTransparency(1)
self.Hitbox_music_start.setPos(0, -1, -16)
self.Hitbox_music_start.setScale(1, 1.25, 1)
self.Hitbox_music_start.setColor(0, 0, 0, 0)
self.Hitbox_music_start.reparentTo(render)
#Third, load SFX.
self.play_input = loader.loadSfx('resources/se/play_input_short.wav')
self.play_input.setVolume(0.1)
base.accept('a', self.keyToBool, ['collBox_1', True])
base.accept('d', self.keyToBool, ['collBox_2', True])
base.accept('j', self.keyToBool, ['collBox_3', True])
base.accept('l', self.keyToBool, ['collBox_4', True])
base.accept('a-up', self.keyToBool, ['collBox_1', False])
base.accept('d-up', self.keyToBool, ['collBox_2', False])
base.accept('j-up', self.keyToBool, ['collBox_3', False])
base.accept('l-up', self.keyToBool, ['collBox_4', False])
#IN-Game function
def loadBackground(self):
self.background = OnscreenImage(parent=render2dp, image="galaxy.png")
self.background.setTransparency(1)
self.background.setColorScale(.5, .5, .5, .75)
base.cam2dp.node().getDisplayRegion(0).setSort(-20)
def setup_in_game(self, songID):
self.loadBackground()
self.pattern = self.patternLoader('resources/song/{0}/pattern.json'.format(self.song_info['id_list'][songID][0]))
self.music = loader.loadMusic('resources/song/{0}/music.wav'.format(self.song_info['id_list'][songID][0]))
self.music.stop()
for i in self.Hitbox_user_list:
i.reparentTo(render)
self.Hitbox_missWall.reparentTo(render)
self.select.unload_select_GUI()
self.play.setup_play_GUI()
LerpColorScaleInterval(self.title.GUI_BGI, .5, (0, 0, 0, 0)).start()
self.tunnel = Tunnel(self.song_info[songID]['tunnel']['path'],
self.song_info[songID]['tunnel']['speed'])
self.tunnel.start()
self.score.score_A_deadline = self.song_info[songID]['score_deadline']['A']
self.score.score_B_deadline = self.song_info[songID]['score_deadline']['B']
self.score.score_C_deadline = self.song_info[songID]['score_deadline']['C']
self.score.score_D_deadline = self.song_info[songID]['score_deadline']['D']
def unload_in_game(self):
for i in self.Hitbox_user_list:
i.reparentTo(hidden)
self.Hitbox_missWall.reparentTo(hidden)
self.play.unload_play_GUI()
self.tunnel.close()
LerpColorScaleInterval(self.title.GUI_BGI, .5, (1, 1, 1, 1)).start()
self.score.score_target = self.play.score_val
self.score.hit_count = self.play.highest_combo_count
self.score.miss_count = self.play.miss_count
self.score.combo_count = self.play.combo_count
self.score.unload_score_GUI()
self.score.setup_score_GUI()
def score_to_select():
if self.title.credit_return():
self.score.unload_score_GUI()
self.select.setup_select_GUI()
self.give_select_control()
self.title_coin_text_mgr(self.title.credit_return()-1)
base.ignore('tab')
else:
self.end.setup_end_GUI()
base.accept('tab', score_to_select)
def patternLoader(self, path=None):
pattern_open = json.load(open(path, 'r'))
pattern_seq = Sequence(name='pattern_sequence')
pattern = sorted(pattern_open, key=lambda x: float(x.keys()[0]))
pattern_seq.append(Func(self.createShortNote, 2, None, 2.5, 'note', (0, 0, 0, 0)))
current_time = 0.
for i, val in enumerate(pattern):
if current_time == float(val.keys()[0]):
pass
else:
pattern_seq.append(Wait(float(val.keys()[0]) - current_time))
current_time = float(val.keys()[0])
if val.values()[0].keys()[0] == 'shortNote':
pattern_seq.append(Func(self.createShortNote, val.values()[0][val.values()[0].keys()[0]]))
pattern_seq.start()
print pattern_seq
return pattern_seq
def keyToBool(self, mapKey, status):
self.keyStat[mapKey] = status
if status:
self.Hitbox_user_list[int(mapKey[-1:])-1].setColor(0, 0, 1, 1)
def mk_false_task(target):
self.keyStat[target] = False
self.Hitbox_user_list[int(target[-1:])-1].setColor(0, 0, 0, 1)
taskMgr.doMethodLater(.5, mk_false_task, 'mk_false_task_{0}'.format(mapKey), extraArgs = [mapKey])
else:
self.Hitbox_user_list[int(mapKey[-1:])-1].setColor(0, 0, 0, 1)
taskMgr.remove('mk_false_task_{0}'.format(mapKey))
self.play_input.play() if status else None
def stage_ender(self, task):
if self.music.status() == self.music.READY:
print("END")
self.unload_in_game()
return task.done
return task.cont
#Universal
def createHitbox(self, object, InFunction=None, OutFunction=None, name='note', show=False):
bound = object.getTightBounds()
box = CollisionBox(bound[0],bound[1])
collName = 'Collision_{0}_{1}'.format(name, randomID())
cnodePath = object.attachNewNode(CollisionNode(collName))
cnodePath.node().addSolid(box)
base.cTrav.addCollider(cnodePath, self.collHandEvent)
if InFunction:
base.accept('into-' + collName, InFunction)
if OutFunction:
base.accept('outof-' + collName, OutFunction)
if show:
cnodePath.show()
#Note
def createShortNote(self, line_num, function=None, speed=2.5, model_path='note', color=(1,0,1,1)):
#Find note node, if it already available, Find that node and make using it.
if render.find("note_node"):
note_node = render.find("note_node")
else:
note_node = render.attachNewNode("note_node")
line = -4.7 if line_num == 1 else -1.5 if line_num == 2 else 1.7 if line_num == 3 else 4.9
pos_start = Point3(line, -2, -200)
pos_end = Point3(line, -2, 50)
note = loader.loadModel(model_path)
if function:
self.createHitbox(note, function)
else:
self.createHitbox(note, None)
note.setTag('note', 'short')
note.reparentTo(note_node)
note.setHpr(90, 0, 270)
note.setPos(pos_start)
note.setColorScale(1, 0, 1, 1)
note.setScale(1.45)
note.setTransparency(TransparencyAttrib.MAlpha)
note.setColorScale(color)
self.note_interval = note.posInterval(speed, pos_end)
Sequence(self.note_interval, name=('note_short_'+randomID())).start()
def createLongNote(self, line_num, start_function=None, mid_function=None, end_function=None, speed=2.5, model_path='note', color=(1,1,1,1)):
if render.find("note_node"):
note_node = render.find("note_node")
else:
note_node = render.attachNewNode("note_node")
line = -4.5 if line_num == 1 else -1.5 if line_num == 2 else 1.5 if line_num == 3 else 4.5
pos_start = Point3(line, -2, -200)
pos_end = Point3(line,-2, 50)
note_head = loader.loadModel('note_head')
note_body = loader.loadModel('note_body')
note_tail = loader.loadModel('note_tail')
if start_function:
self.createHitbox(note_head, start_function)
else:
self.createHitbox(note_head, self.event_dummy)
if mid_function:
self.createHitbox(note_body, mid_function)
else:
pass
if end_function:
self.createHitbox(note_tail, end_function)
else:
self.createHitbox(note_tail, self.event_dummy)
self.createHitbox(note_head, self.event_dummy)
self.createHitbox(note_tail, self.event_dummy)
note_head.setTag('note', 'head_{0}'.format(line_num))
note_body.setTag('note', 'body_{0}'.format(line_num))
note_tail.setTag('note', 'tail_{0}'.format(line_num))
note_head.reparentTo(note_node)
note_body.reparentTo(note_node)
note_tail.reparentTo(note_node)
note_head.setHpr(90, 0, 270)
note_body.setHpr(90, 0, 270)
note_tail.setHpr(90, 0, 270)
note_head.setTransparency(TransparencyAttrib.MAlpha)
note_body.setTransparency(TransparencyAttrib.MAlpha)
note_tail.setTransparency(TransparencyAttrib.MAlpha)
note_head.setPos(pos_start)
note_body.setPos(pos_start)
note_tail.setPos(pos_start)
note_head.setColorScale(color)
note_body.setColorScale(color)
note_tail.setColorScale(color)
note_head.setScale(.5)
note_body.setScale(.5)
note_tail.setScale(.5)
head_interval = Parallel(note_head.posInterval(speed, pos_end),
name=('note_long_head_{0}_{1}'.format(line_num, randomID())))
body_interval = Parallel(note_body.posInterval(speed*2, pos_end),
note_body.scaleInterval(speed*2, (499, 0.5, 0.5)),
name=('note_long_body_create_{0}_{1}'.format(line_num, randomID())))
head_interval.start()
body_interval.start()
def endLongNote(self, line_num):
body_interval_list = ivalMgr.getIntervalsMatching('note_long_body_create_{0}_*'.format(line_num))
for i in body_interval_list:
i.pause()
i = None
note_node = render.find("note_node")
# head = note_node.find('=note=head_*')
body = note_node.find('=note=body_{0}'.format(line_num))
tail = note_node.find('=note=tail_{0}'.format(line_num))
body_interval = Parallel(body.posInterval(self.note_speed, (body.getX(), body.getY(), body.getZ()+250)),
name=('note_long_body_{0}_{1}'.format(line_num, randomID())))
tail_interval = Parallel(tail.posInterval(self.note_speed, Point3(tail.getX(),-1, 50)),
name=('note_long_tail_{0}_{1}'.format(line_num, randomID())))
body_interval.start()
tail_interval.start()
#Events
def event_dummy(self, collEntry):
pass
def event_missWall(self, collEntry):
model = collEntry.getFromNodePath().getParent()
collName = collEntry.getFromNodePath().getNode(0).getName()
self.play.combo_count = 0
self.play.miss_updater(1)
self.play.set_combo_text('MISS')
base.ignore('into-' + collName)
model.removeNode()
def event_startMusic(self, collEntry):
Hitbox_node = collEntry.getIntoNodePath().getParent()
Note_node = collEntry.getFromNodePath().getParent()
if Note_node.getTag('note') == 'short':
Hitbox_node.removeNode()
Note_node.removeNode()
self.music.play()
taskMgr.add(self.stage_ender, 'stage_ender')
def event_success(self, collEntry):
Hitbox_node = collEntry.getIntoNodePath().getParent()
Hitbox_tag = Hitbox_node.getTag('name')
if self.keyStat[Hitbox_tag]:
model = collEntry.getFromNodePath().getParent()
collName = collEntry.getFromNodePath().getNode(0).getName()
self.play.combo_count += 1
self.play.hit_count += 1
self.play.score_updater(200)
self.play.set_combo_text(COMBO_TEXT.format(self.play.combo_count))
if model.getTag('note') == 'short':
base.ignore('into-' + collName)
model.removeNode()
#MIDI to JSON exporter
class midi2pattern:
def convert(self, path=None, enable_long_using_tick=False):
noteSeq = list()
def event(time, event_func, args=None):
return {str(time) : {str(event_func) : args}}
event_templete = dict()
file = midi.read_midifile(path)
bpm = 0
current_timeline = 0.
pattern = list(file)
resolution = file.resolution
for i1 in pattern:
for i2 in i1:
if i2.name == 'Set Tempo':
bpm = i2.get_bpm()
tick_sec = ((60.0 * 1000000.0 / bpm) / resolution) / 1000000.0
elif i2.name == 'Note On':
if i2.tick:
current_timeline += tick_sec * i2.tick
note = i2.data[0]
if (note in [67, 69, 71, 72]) or (i2.tick > 60 and enable_long_using_tick):
print "LONG"
line = 4 if note == 72 else 3 if note == 71 else 2 if note == 69 else 1
noteSeq.append(event(current_timeline, 'longNote_Start', line))
else:
line = 4 if note == 65 else 3 if note == 64 else 2 if note == 62 else 1 if note == 60 else random.randrange(1, 5)
noteSeq.append(event(current_timeline, 'shortNote', line))
elif i2.name == 'Note Off':
if i2.tick:
current_timeline += tick_sec * i2.tick
note = i2.data[0]
if note in [67, 69, 71, 72]:
noteSeq.append(event(current_timeline, 'longNote_End'))
return noteSeq
class Tunnel:
def __init__(self, path='tunnel_default.egg', speed=1, length=50, quantity=8):
self.tunnel_model_value = quantity
self.tunnel_time = speed
self.tunnel_segment_length = length
self.path = path
#for pausing tunnel
self.pause = (lambda:toggleInterval(self.tunnelMove))
def start(self):
self.defineFog()
self.initTunnel(path=self.path)
def close(self):
if self.tunnelMove.isPlaying():
self.tunnelMove.finish()
else:
pass
model_list= render.findAllMatches("=tunnel")
for m in model_list:
m.removeNode()
#Tunnel related Func
def defineFog(self):
self.fog = Fog('distanceFog')
self.fog.setColor(0)
self.fog.setExpDensity(.01)
render.setFog(self.fog)
def initTunnel(self, path, tag='normal'):
self.tunnel = [None] * (self.tunnel_model_value + 1)
for x in range(self.tunnel_model_value + 1):
self.tunnel[x] = loader.loadModel(path)
self.tunnel[x].setTag('tunnel',tag)
self.tunnel[x].setColor(1, 1, 1)
self.tunnel[x].setTransparency(True)
self.tunnel[x].setColorScale(1, 1, 1, .99)
#self.tunnel[x].setRenderModeWireframe(True)
if x == 0:
self.tunnel[x].reparentTo(render)
else:
self.tunnel[x].reparentTo(self.tunnel[x - 1])
self.tunnel[x].setPos(0, 0, -self.tunnel_segment_length)
self.contTunnel()
def contTunnel(self):
self.tunnel = self.tunnel[1:] + self.tunnel[0:1]
self.tunnel[0].setZ(0)
#self.tunnel[0].setP(90)
self.tunnel[0].reparentTo(render)
self.tunnel[0].setScale(.155, .155, .305)
for i in range(self.tunnel_model_value, 0, -1):
self.tunnel[i].reparentTo(self.tunnel[i - 1])
self.tunnel[self.tunnel_model_value].setZ(-self.tunnel_segment_length)
self.tunnel[self.tunnel_model_value].setScale(1)
self.tunnelMove = Sequence(
LerpFunc(self.tunnel[0].setZ,
duration=self.tunnel_time,
fromData=0,
toData=self.tunnel_segment_length * .305),
# Func(self.contTunnel),
name='note_tunnel')
self.tunnelMove.loop()
| 37.865248
| 142
| 0.708653
|
1fd52e657e719d5aa5e86e8f3b96c3512bd00e6a
| 2,536
|
py
|
Python
|
lib/django-1.4/django/contrib/sites/tests.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 26
|
2015-01-20T08:02:38.000Z
|
2020-06-10T04:57:41.000Z
|
lib/django-1.4/django/contrib/sites/tests.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 4
|
2016-02-28T05:53:54.000Z
|
2017-01-03T07:39:50.000Z
|
lib/django-1.4/django/contrib/sites/tests.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 13
|
2016-02-28T00:14:23.000Z
|
2021-05-03T15:47:36.000Z
|
from django.conf import settings
from django.contrib.sites.models import Site, RequestSite, get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
class SitesFrameworkTests(TestCase):
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def test_save_another(self):
# Regression for #17415
# On some backends the sequence needs reset after save with explicit ID.
# Test that there is no sequence collisions by saving another site.
Site(domain="example2.com", name="example2.com").save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertTrue(isinstance(s, Site))
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual(u"example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual(u"Example site", site.name)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertTrue(isinstance(site, Site))
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
Site._meta.installed = False
site = get_current_site(request)
self.assertTrue(isinstance(site, RequestSite))
self.assertEqual(site.name, u"example.com")
| 39.015385
| 82
| 0.680205
|
354acac3f4650bc15132c32b15b69b644fd966eb
| 4,630
|
py
|
Python
|
models/import_products.py
|
hedra-digital/SOC
|
36a99dca15424ff8c06d51eaad2bb21afbfdace5
|
[
"MIT"
] | null | null | null |
models/import_products.py
|
hedra-digital/SOC
|
36a99dca15424ff8c06d51eaad2bb21afbfdace5
|
[
"MIT"
] | null | null | null |
models/import_products.py
|
hedra-digital/SOC
|
36a99dca15424ff8c06d51eaad2bb21afbfdace5
|
[
"MIT"
] | null | null | null |
from odoo import models, fields, api
#try:
# import cStringIO as StringIO
#except ImportError:
# import StringIO
#import csv
#import base64
class import_wizard_product_adj(models.Model):
_name = 'import.wizard.product.adj'
csv_file = fields.Binary("CSV File", required=True)
log = fields.Char()
@api.multi
def process_csv_file(self):
input = StringIO.StringIO(base64.decodestring(self.csv_file))
reader = csv.reader(input, delimiter=',')
product_obj = self.env['product.product']
sil_obj = self.env['stock.inventory.line']
stock_inv = self.env['stock.inventory'].browse(self._context.get('active_id'))
line_vals = []
log = ''
if stock_inv.state not in ['draft','confirm']:
raise except_orm(_('Please Try in Draft State'),
_('Please try in draft state only'))
if stock_inv.filter != 'partial':
stock_inv.filter = 'partial'
stock_inv.prepare_inventory()
default_location_id = stock_inv.location_id.id
ctx = dict(self._context)
ctx.update({'default_location_id':default_location_id})
line_no=0
# for line in reader:
# line_no += 1
# if len(line) >= 4:
# product = product_obj.search(['|',('ean13','=',line[2]),('name','=',line[1])], limit=1)
# if not product:
# log += 'No product found for Barcode "%s" on Line %s \n'%(str(line[0]), str(line_no))
# continue
# try:
# qty = float(line[1])
# except:
# log += 'Improper Qty "%s" for Product "%s" on Line %s \n'%(str(line[1]), product.name,str(line_no))
# continue
# existing_line = sil_obj.search([('product_id','=',product.id),
# ('inventory_id', '=', stock_inv.id)], limit=1)
# # Check if that products line already exist, if exist, then just update the quantity,
# # else create new line
# if existing_line:
# existing_line.product_qty = qty
# updated_vals = existing_line.onchange_createline( location_id=existing_line.location_id and existing_line.location_id.id or False,
# product_id=existing_line.product_id and existing_line.product_id.id or False,
# uom_id=existing_line.product_uom_id and existing_line.product_uom_id.id or False,
# package_id=existing_line.package_id and existing_line.package_id.id or False,
# prod_lot_id=existing_line.prod_lot_id and existing_line.prod_lot_id.id or False,
# partner_id=existing_line.partner_id and existing_line.partner_id.id or False,
# company_id=existing_line.company_id and existing_line.company_id.id or False, context=None)
# if updated_vals and updated_vals.get('value') and updated_vals['value'].get('theoretical_qty'):
# existing_line._model._store_set_values(self._cr, self._uid, [x.id for x in existing_line], ['theoretical_qty'], self._context)
# else:
# line_vals = sil_obj.with_context(ctx).onchange_createline(product_id=product.id,
# location_id=stock_inv.location_id.id)
# line_vals.update({'inventory_id':stock_inv.id,'location_id':default_location_id,
# 'product_qty':qty, 'product_id': product.id})
# inv_line = sil_obj.create(line_vals)
# if log:
# self.log = log
# else:
# self.log = 'No errors found in importing the products'
return {
'name': _('File Processed Successfully'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'update.real.stock',
'view_id': self.env.ref('physical_inventory.update_real_stock_form').id,
'type': 'ir.actions.act_window',
'res_id': self.id,
'target': 'new'
}
| 52.613636
| 163
| 0.520734
|
559593f00d1b4d4a522778582e34ccfc1c85ac83
| 745
|
py
|
Python
|
visuals/2022-python/circles.py
|
houseofleft/Processing
|
13e65c9ef5918c153ed83dcb44c4b53277f8bdd6
|
[
"MIT"
] | 8
|
2020-03-14T20:32:00.000Z
|
2020-07-22T17:51:31.000Z
|
visuals/2022-python/circles.py
|
houseofleft/generative_processing_scripts
|
13e65c9ef5918c153ed83dcb44c4b53277f8bdd6
|
[
"MIT"
] | null | null | null |
visuals/2022-python/circles.py
|
houseofleft/generative_processing_scripts
|
13e65c9ef5918c153ed83dcb44c4b53277f8bdd6
|
[
"MIT"
] | 2
|
2020-03-14T16:33:11.000Z
|
2020-03-17T18:07:53.000Z
|
import shades
import random
canvas = shades.Canvas(1500, 1500)
noise = shades.NoiseField(scale=0.02)
ink = shades.BlockColor()
colors = [
(249, 65, 68),
(243, 114, 44),
(248, 150, 30),
(249, 132, 74),
(249, 199, 79),
(144, 190, 109),
(67, 170, 139),
(77, 144, 142),
(87, 117, 144),
(39, 125, 161),
]
for y in range(0, canvas.height, 5):
segments = 15
for i in range(segments):
x = (canvas.width/segments) * i
if i % 2:
size = canvas.width/segments/2 * noise.noise((x, y))
else:
size = canvas.width/segments/2 * (1 - noise.noise((x, y)))
ink.color = random.choice(colors)
ink.circle_outline(canvas, (x, y), size, 1)
canvas.show()
| 22.575758
| 70
| 0.544966
|
29a9ff2af58a72c0a6358d2019a9a62d3ca481c1
| 287
|
py
|
Python
|
task_scheduler/src/send_mail_task/views.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | null | null | null |
task_scheduler/src/send_mail_task/views.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 40
|
2020-06-05T22:10:58.000Z
|
2022-03-11T23:56:09.000Z
|
task_scheduler/src/send_mail_task/views.py
|
muhammad-mamdouh/Django_Projects
|
1f31e12aefb36b33474256db40a2c551882f445e
|
[
"MIT"
] | 1
|
2021-03-31T10:30:03.000Z
|
2021-03-31T10:30:03.000Z
|
from django.http import HttpResponse
from .tasks import sleepy, deliver_mail_task
def index(request):
sleepy.delay(5)
return HttpResponse('<h1>Done!</h1>')
def deliver_mail(request):
deliver_mail_task.delay()
return HttpResponse('<h1>EMAIL HAS BEEN DELIVERED!</h1>')
| 22.076923
| 61
| 0.731707
|
d9efb4185bdea7b71a00f547041b6ec0f38f282c
| 2,612
|
py
|
Python
|
jwt_frappe/auth.py
|
anvilerp/jwt_frappe
|
b034ed23070fdc7ee9dc4a17403d48be75bb8c9e
|
[
"MIT"
] | 4
|
2020-11-20T18:53:49.000Z
|
2021-07-29T05:41:13.000Z
|
jwt_frappe/auth.py
|
anvilerp/jwt_frappe
|
b034ed23070fdc7ee9dc4a17403d48be75bb8c9e
|
[
"MIT"
] | null | null | null |
jwt_frappe/auth.py
|
anvilerp/jwt_frappe
|
b034ed23070fdc7ee9dc4a17403d48be75bb8c9e
|
[
"MIT"
] | 2
|
2021-07-28T20:49:26.000Z
|
2021-08-14T22:47:04.000Z
|
import frappe
import jwt
from frappe.auth import HTTPRequest, LoginManager, get_lang_code, check_session_stopped, CookieManager
# frappe's CookieManager is having old class style
class CookieManagerJWT(CookieManager, object):
def flush_cookies(self, response):
# use this opportunity to set the response headers
response.headers["X-Client-Site"] = frappe.local.site
if frappe.flags.jwt_clear_cookies:
# Case when right after login
# We set the flag on session_create
self.cookies = frappe._dict()
if frappe.flags.jwt:
# Case when the incoming request has jwt token
# We leave cookies untouched
# There can be other browser tabs
return
return super(CookieManagerJWT, self).flush_cookies(response)
class AnvilHTTPRequest(HTTPRequest):
def __init__(self):
# Get Environment variables
self.domain = frappe.request.host
if self.domain and self.domain.startswith('www.'):
self.domain = self.domain[4:]
if frappe.get_request_header('X-Forwarded-For'):
frappe.local.request_ip = (frappe.get_request_header(
'X-Forwarded-For').split(",")[0]).strip()
elif frappe.get_request_header('REMOTE_ADDR'):
frappe.local.request_ip = frappe.get_request_header('REMOTE_ADDR')
else:
frappe.local.request_ip = '127.0.0.1'
# language
self.set_lang()
# set db before jwt check, so token error handling can be stored
# We get Internal Server Error otherwise
self.connect()
# JWT
jwt_token = None
# Check for Auth Header, if present, replace the request cookie value
if frappe.get_request_header("Authorization"):
token_header = frappe.get_request_header(
"Authorization").split(" ")
if token_header[0].lower() not in ("basic", "bearer") and ":" not in token_header[-1]:
jwt_token = token_header[-1]
elif frappe.request.path.startswith('/private/files/') and frappe.request.args.get("token"):
jwt_token = frappe.request.args.get("token")
if jwt_token:
headers = frappe._dict(frappe.request.headers)
headers["Authorization"] = f"Bearer {jwt_token}"
frappe.request.headers = headers
# load cookies
frappe.local.cookie_manager = CookieManagerJWT()
# login
frappe.local.login_manager = LoginManager()
if frappe.form_dict._lang:
lang = get_lang_code(frappe.form_dict._lang)
if lang:
frappe.local.lang = lang
self.validate_csrf_token()
# write out latest cookies
frappe.local.cookie_manager.init_cookies()
# check status
check_session_stopped()
| 31.853659
| 102
| 0.696401
|
c67c179258c3ed3c3cd80fe1ccbbc4a013ab17b7
| 31,043
|
py
|
Python
|
Autocoders/Python/src/fprime_ac/generators/visitors/ComponentVisitorBase.py
|
sthagen/nasa-fprime
|
7762d633d1c0728e68ef9217fb12a7c3070b61ac
|
[
"Apache-2.0"
] | 1
|
2022-03-15T16:17:15.000Z
|
2022-03-15T16:17:15.000Z
|
Autocoders/Python/src/fprime_ac/generators/visitors/ComponentVisitorBase.py
|
sthagen/nasa-fprime
|
7762d633d1c0728e68ef9217fb12a7c3070b61ac
|
[
"Apache-2.0"
] | null | null | null |
Autocoders/Python/src/fprime_ac/generators/visitors/ComponentVisitorBase.py
|
sthagen/nasa-fprime
|
7762d633d1c0728e68ef9217fb12a7c3070b61ac
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# NAME: ComponentVisitorBase.py
#
# DESCRIPTION: A base class for test code generation.
#
# AUTHOR: bocchino
# EMAIL: bocchino@jpl.nasa.gov
# DATE CREATED : August 24, 2015
#
# Based on code by Leonard Reder
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import datetime
import logging
import sys
from getpass import getuser
from fprime_ac.generators import formatters
from fprime_ac.generators.visitors import AbstractVisitor
from fprime_ac.models import ModelParser
#
# Python extension modules and custom interfaces
#
from fprime_ac.utils import ConfigManager
#
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
#
LAST = 0
NOT_LAST = 1
class ComponentVisitorBase(AbstractVisitor.AbstractVisitor):
"""
An abstract base class for unit test code generation.
"""
__instance = None
__config = None
__fp = None
__form = None
__form_comment = None
__model_parser = None
__visitor = None
def __init__(self):
super().__init__()
def _writeTmpl(self, c, visit_str):
"""
Wrapper to write tmpl to files desc.
"""
DEBUG.debug("ComponentVisitorBase:%s" % visit_str)
DEBUG.debug("===================================")
DEBUG.debug(c)
self.__fp.writelines(c.__str__())
DEBUG.debug("===================================")
def argsString(self, args):
"""
Make a list of args into a string
"""
return ", ".join(args)
def buildFileName(self, obj):
"""
Build the file name
"""
if self.config("component", "XMLDefaultFileName") == "True":
filename = (
obj.get_namespace()
+ obj.get_name()
+ self.config("component", self.__visitor)
)
DEBUG.info(
"Generating code filename: %s, using XML namespace and name attributes..."
% filename
)
else:
xml_file = obj.get_xml_filename()
x = xml_file.split(".")
s = self.config("component", "ComponentXML").split(".")
l = len(s[0])
if (x[0][-l:] == s[0]) & (x[1] == s[1]):
filename = x[0].split(s[0])[0] + self.config(
"component", self.__visitor
)
DEBUG.info("Generating code filename: %s..." % filename)
else:
msg = (
"XML file naming format not allowed (must be XXXComponentAi.xml), Filename: %s"
% xml_file
)
PRINT.info(msg)
raise ValueError(msg)
return filename
def commandArgsStr(self):
"""
Make a list of command args into a string
"""
def f(lst):
def g(xxx_todo_changeme):
(name, a, b, c) = xxx_todo_changeme
return name
return self.argsString(list(map(g, lst)))
return f
def config(self, a, b):
return self.__config.get(a, b)
def doxygenPostComment(self, comment):
"""
Emit a doxygen post comment
"""
if comment is None or comment == "":
return ""
else:
return "/*!< " + comment + "*/"
def doxygenPreComment(self, comment):
"""
Emit a doxygen pre comment
"""
if comment is None or comment == "":
return ""
else:
return "/*! " + comment + "*/"
def emitComment(self, comment):
"""
Emit a comment
"""
if comment is None or comment == "":
return ""
else:
return "/* " + comment + "*/"
def emitIndent(self, indent):
str = ""
for i in range(0, indent):
str += " "
return str
def emitNonPortParamsCpp(self, indent, params):
"""
Emit a list of non-port function parameters in a .cpp file
"""
return self.emitParams(self.paramStrsCpp, indent, params)
def emitNonPortParamsHpp(self, indent, params):
"""
Emit a list of non-port function parameters in a .hpp file
"""
return self.emitParams(self.paramStrsHpp, indent, params)
def emitParam(self, kind, indent, paramStrs, param):
"""
Emit a parameter
"""
paramStr, commentStr = paramStrs(param)
indentStr = self.emitIndent(indent)
if kind == LAST:
return indentStr + paramStr + commentStr
else:
return indentStr + paramStr + "," + commentStr + "\n"
def emitParams(self, paramStrs, indent, params):
"""
Emit a list of parameters (port or non-port)
"""
length = len(params)
if length == 0:
return ""
else:
str = ""
for i in range(0, length - 1):
str += self.emitParam(NOT_LAST, indent, paramStrs, params[i])
str += self.emitParam(LAST, indent, paramStrs, params[length - 1])
return str
def emitPortParamsCpp(self, indent, params):
"""
Emit a list of port function parameters in a .cpp file
"""
return self.emitParams(self.portParamStrsCpp, indent, params)
def emitPortParamsHpp(self, indent, params):
"""
Emit a list of port function parameters in a .hpp file
"""
return self.emitParams(self.portParamStrsHpp, indent, params)
def eventArgsStr(self):
"""
Make a list of event args into a string
"""
def f(args):
def g(lst):
name = lst[0]
return name
return self.argsString(list(map(g, args)))
return f
def finishSourceFilesVisit(self, obj):
self.__fp.close()
def fp(self):
return self.__fp
def includes1Visit(self, obj):
pass
def includes2Visit(self, obj):
pass
def init(self, obj, c):
self.initPreamble(obj, c)
self.initCommands(obj, c)
self.initEvents(obj, c)
self.initInternalInterfaces(obj, c)
self.initIncludes(obj, c)
# self.initParameters(obj, c)
self.initPorts(obj, c)
self.initTelemetry(obj, c)
self.initParameters(obj, c)
if c.kind == "passive":
c.params_init_hpp = [c.param_instance_default_zero]
c.params_init_cpp = [c.param_instance]
elif c.needs_msg_size:
c.params_init_hpp = [
c.param_queueDepth,
c.param_msgSize,
c.param_instance_default_zero,
]
c.params_init_cpp = [c.param_queueDepth, c.param_msgSize, c.param_instance]
else:
c.params_init_hpp = [c.param_queueDepth, c.param_instance_default_zero]
c.params_init_cpp = [c.param_queueDepth, c.param_instance]
def initBase(self, visitor):
self.__config = ConfigManager.ConfigManager.getInstance()
self.__form = formatters.Formatters.getInstance()
self.__form_comment = formatters.CommentFormatters()
self.__model_parser = ModelParser.ModelParser.getInstance()
self.__visitor = visitor
DEBUG.info("ComponentVisitor: Instanced.")
self.bodytext = ""
self.prototypetext = ""
def initCommandParams(self, obj, c):
"""
Command function parameters for code generation
"""
c.param_cmdSeq = ("cmdSeq", "const U32", "The command sequence number")
c.param_opCode = ("opCode", "const FwOpcodeType", "The opcode")
c.param_response = (
"response",
"const Fw::CmdResponse",
"The command response",
)
def initCommands(self, obj, c):
c.has_commands = len(obj.get_commands()) > 0
c.commands = self.__model_parser.getCommandsList(obj)
c.commands_sync = self.__model_parser.getCommandsListSync(obj)
c.command_enums = self.__model_parser.getEnumList(obj)
c.command_param_strs = self.__model_parser.getCommandArgsPrototypeStringDict(
obj
)
c.command_args_str_dict = c.command_param_strs
c.command_args = self.__model_parser.getCommandArgsDict(obj)
c.command_params = self.__model_parser.getCommandArgsDict(obj, True)
c.command_args_str = self.commandArgsStr()
self.initCommandParams(obj, c)
def initCompIncludePath(self, obj, c):
"""
Configurable override for includes
"""
relative_path = self.relativePath()
if self.__config.get("includes", "comp_include_path") == "None":
if relative_path is not None:
c.comp_include_path = relative_path
else:
c.comp_include_path = obj.get_namespace()
else:
c.comp_include_path = self.__config.get("includes", "comp_include_path")
c.include_path = c.comp_include_path
def initEventParams(self, obj, c):
"""
Event function parameters for code generation
Some of these are also used for telemetry
"""
c.param_event_id = ("id", "const FwEventIdType", "The event ID")
c.param_timeTag = ("timeTag", "Fw::Time&", "The time")
c.param_const_timeTag = ("timeTag", "const Fw::Time&", "The time")
c.param_log_severity = ("severity", "const Fw::LogSeverity", "The severity")
c.param_text_log_severity = (
"severity",
"const Fw::TextLogSeverity&",
"The severity",
)
c.param_args = ("args", "Fw::LogBuffer&", "The serialized arguments")
c.param_text = ("text", "const Fw::TextLogString&", "The event string")
def initEvents(self, obj, c):
c.has_events = len(obj.get_events()) > 0
c.events = self.__model_parser.getEventsList(obj)
c.event_enums = self.__model_parser.getEventEnumList(obj)
c.event_args = self.__model_parser.getEventArgsDict(obj)
c.event_params = c.event_args
c.event_args_str = self.eventArgsStr()
c.event_param_strs = self.__model_parser.getEventArgsPrototypeStringDict(obj)
self.initEventParams(obj, c)
def initIncludeName(self, obj, c):
"""
Generate the header file name
"""
if self.__config.get("component", "XMLDefaultFileName") == "False":
c.include_name = obj.get_xml_filename().split("ComponentAi.xml")[0]
else:
c.include_name = c.name
def initIncludes(self, obj, c):
self.initTypeIncludes(obj, c)
self.initPortIncludes(obj, c)
self.initSerialIncludes(obj, c)
self.initIncludeName(obj, c)
self.initCompIncludePath(obj, c)
def initInternalInterfaces(self, obj, c):
c.has_internal_interfaces = len(obj.get_internal_interfaces()) > 0
c.internal_interface_enums = self.__model_parser.getInternalInterfaceEnumList(
obj
)
c.internal_interfaces = self.__model_parser.getInternalInterfacesList(obj)
c.internal_interface_param_strs = (
self.__model_parser.getInternalInterfaceArgsPrototypeStringDict(obj)
)
c.internal_interface_args_str_dict = c.internal_interface_param_strs
c.internal_interface_args = self.__model_parser.getInternalInterfaceArgsDict(
obj
)
c.internal_interface_args_str = self.internalInterfaceArgsStr()
c.internal_interface_params = self.__model_parser.getInternalInterfaceArgsDict(
obj, True
)
def initMsgTypes(self, obj, c):
"""
Construct msg types list
"""
def f(xxx_todo_changeme2):
(instance, type, direction, sync, priority, role) = xxx_todo_changeme2
if self.isInput(direction) and self.isAsync(sync):
return instance.upper() + "_" + type.upper()
else:
return None
port_types = self.mapPartial(f, c.ports_sync)
def g(xxx_todo_changeme3):
(mnemonic, opcodes, sync, priority, full, comment) = xxx_todo_changeme3
if self.isAsync(sync):
if len(opcodes) == 1:
return f"CMD_{mnemonic.upper()}"
else:
mlist = []
inst = 0
for opcode in opcodes:
mlist.append("CMD_" + mnemonic.upper() + "_%d" % inst)
inst += 1
return mlist
else:
return None
cmd_types = self.mapPartial(g, c.commands)
def h(xxx_todo_changeme4):
(name, priority, full) = xxx_todo_changeme4
return f"INT_IF_{name.upper()}"
self.__model_parser.getInternalInterfacesList(obj)
interface_types = self.mapPartial(h, c.internal_interfaces)
c.msg_types = port_types + cmd_types + interface_types
def initParameterParams(self, obj, c):
"""
Parameter function parameters for code generation
"""
c.param_valid = ("valid", "Fw::ParamValid", "The parameter valid flag")
def initParameters(self, obj, c):
c.has_parameters = len(obj.get_parameters()) > 0
c.parameter_enums = self.__model_parser.getParamEnumList(obj)
c.parameters = self.__model_parser.getParametersList(obj)
self.initParameterParams(obj, c)
def initPortFlags(self, obj, c):
"""
Init port flags
"""
# Do we have serializable ports?
c.has_serializable_ports = self.__model_parser.hasSerializablePort(obj)
# Do we have guarded ports?
c.has_guarded_ports = obj.get_has_guarded_ports()
# Do we need a message size?
c.needs_msg_size = False
for (name, type, direction, sync, priority, role) in c.ports_sync:
if self.isSerial(type) and self.isAsync(sync):
c.needs_msg_size = True
# Flags for different port categories
c.has_ports = len(c.ports) > 0
c.has_input_ports = len(c.input_ports) > 0
c.has_typed_input_ports = len(c.typed_input_ports) > 0
c.has_serial_input_ports = len(c.serial_input_ports) > 0
c.has_output_ports = len(c.output_ports) > 0
c.has_typed_output_ports = len(c.typed_output_ports) > 0
c.has_serial_output_ports = len(c.serial_output_ports) > 0
roles = [
role for name, ptype, sync, priority, role, max_number in c.output_ports
]
c.has_time_get = "TimeGet" in roles
def initPortIncludes(self, obj, c):
c.port_includes = []
for include in self.__model_parser.uniqueList(obj.get_xml_port_files()):
c.port_includes.append(include.replace("PortAi.xml", "PortAc.hpp"))
def initPortInputTypes(self, obj, c):
"""
Construct port input type list
"""
def f(xxx_todo_changeme5):
(instance, type, direction, sync, priority, role) = xxx_todo_changeme5
if self.isSerial(type):
# No serial ports
return None
elif not self.isAsync(sync):
# Only async input ports
return None
else:
return type
l = self.mapPartial(f, c.ports_sync)
c.port_input_types = self.__model_parser.uniqueList(l)
def initPortLists(self, obj, c):
"""
Construct port lists
"""
# Construct (instance, type, direction, role) list
c.ports = self.__model_parser.getPortsList(obj)
# Construct (instance, type, direction, sync, priority, role, max num) list
c.ports_all = self.__model_parser.getPortsListAll(obj)
# Construct (instance, type, direction, sync, priority, role) list
c.ports_sync = self.__model_parser.getPortsListSync(obj)
# Construct (instance, type, direction, max num, role) list
c.ports_max_num = self.__model_parser.getPortsListMaxNum(obj)
# Construct port input type list
self.initPortInputTypes(obj, c)
# Construct port type list
l = [p.get_type() for p in obj.get_ports()]
c.port_types = self.__model_parser.uniqueList(l)
# Construct msg type list
self.initMsgTypes(obj, c)
# Init input ports
self.initInputPorts(c)
# Init output ports
self.initOutputPorts(c)
# Init handlers
self.initHandlers(c)
# Init pre-message hooks
self.initPreMessageHooks(c)
# Init invocation functions
self.initInvocationFunctions(c)
# Init message calls
self.initMessageCalls(c)
def initInputPorts(self, c):
"""
Init input ports
"""
# All input ports
c.input_ports = [
(instance, type, sync, priority, full, role, max_num)
for (
instance,
type,
direction,
sync,
priority,
full,
role,
max_num,
) in c.ports_all
if direction == "Input"
]
# User input ports
c.user_input_ports = [
(instance, type, sync, priority, full, role, max_num)
for (instance, type, sync, priority, full, role, max_num) in c.input_ports
if role is None
]
# Serial input ports
c.serial_input_ports = [
(instance, sync, priority, full, max_num)
for (instance, type, sync, priority, full, role, max_num) in c.input_ports
if type == "Serial"
]
# Typed input ports
c.typed_input_ports = [
(instance, type, sync, priority, full, role, max_num)
for (instance, type, sync, priority, full, role, max_num) in c.input_ports
if type != "Serial"
]
# Typed user input ports
c.typed_user_input_ports = [
(instance, type, sync, priority, full, role, max_num)
for (
instance,
type,
sync,
priority,
full,
role,
max_num,
) in c.typed_input_ports
if role is None
]
def initOutputPorts(self, c):
"""
Init output ports
"""
# All output ports
c.output_ports = [
(instance, type, sync, priority, role, max_num)
for (
instance,
type,
direction,
sync,
priority,
full,
role,
max_num,
) in c.ports_all
if direction == "Output"
]
# User output ports
c.user_output_ports = [
(instance, type, sync, priority, role, max_num)
for (instance, type, sync, priority, role, max_num) in c.output_ports
if role is None
]
# Typed output ports
c.typed_output_ports = [
(instance, type, sync, priority, role, max_num)
for (instance, type, sync, priority, role, max_num) in c.output_ports
if type != "Serial"
]
# Typed user output ports
c.typed_user_output_ports = [
(instance, type, sync, priority, role, max_num)
for (instance, type, sync, priority, role, max_num) in c.typed_output_ports
if role is None
]
# Serial output ports
c.serial_output_ports = [
(instance, sync, priority, max_num)
for (instance, type, sync, priority, role, max_num) in c.output_ports
if type == "Serial"
]
def initHandlers(self, c):
"""
Handlers to implement for input ports
"""
# Typed handlers
c.handlers_typed = [
(instance, type, sync, priority, full, role, max_num)
for (
instance,
type,
sync,
priority,
full,
role,
max_num,
) in c.typed_input_ports
if role != "Cmd" and sync != "model"
]
# Serial handlers
c.handlers_serial = c.serial_input_ports
def initPreMessageHooks(self, c):
"""
Init Pre-message hooks
"""
# Pre-message hooks for async input ports
c.pre_message_hooks = [
(instance, type, sync, priority, role, max_num)
for (instance, type, sync, priority, full, role, max_num) in c.input_ports
if role != "Cmd" and sync == "async"
]
# Pre-message hooks for typed async input ports
c.pre_message_hooks_typed = [
(instance, type, sync, priority, full, role, max_num)
for (
instance,
type,
sync,
priority,
full,
role,
max_num,
) in c.typed_input_ports
if role != "Cmd" and sync == "async"
]
# Pre-message hooks for serial async input ports
c.pre_message_hooks_serial = c.serial_input_ports
def initInvocationFunctions(self, c):
# Invocation functions for output ports
c.invocation_functions = [
(instance, type, sync, priority, role, max_num)
for (instance, type, sync, priority, role, max_num) in c.output_ports
if role is None
]
# Invocation functions for typed output ports
c.typed_invocation_functions = [
(instance, type, sync, priority, role, max_num)
for (instance, type, sync, priority, role, max_num) in c.typed_output_ports
if role is None
]
# Invocation functions for serial output ports
c.serial_invocation_functions = c.serial_output_ports
def initMessageCalls(self, c):
"""
Calls for messages
"""
# Message calls on typed input ports
c.message_calls_typed = [
(instance, type, sync, priority, full, role, max_num)
for (
instance,
type,
sync,
priority,
full,
role,
max_num,
) in c.typed_input_ports
if role != "Cmd"
]
def initPortMaps(self, obj, c):
"""
Construct port maps
"""
# Construct map: port type -> namespace
c.port_namespaces = self.__model_parser.getPortNamespaceTypeDict(obj)
# Construct map: port name -> arg list
c.port_args = self.__model_parser.getPortArgsDict(obj)
c.port_params = c.port_args
# Construct map: port name -> args string
c.port_arg_strs = self.__model_parser.getPortArgsCallStringDict(obj)
# Construct map: port name -> params string
c.port_param_strs = self.__model_parser.getPortArgsPrototypeStringDict(obj)
# Construct map: port name -> return type string
c.port_return_type_strs = self.__model_parser.getPortReturnDict(obj)
def initPortNames(self, obj, c):
"""
Set special port names
"""
for name, type, direction, role in c.ports:
if role == "Cmd":
c.Cmd_Name = name
if role == "CmdRegistration":
c.CmdReg_Name = name
if role == "CmdResponse":
c.CmdStatus_Name = name
if role == "LogEvent":
c.LogEvent_Name = name
if role == "LogTextEvent":
c.LogTextEvent_Name = name
if role == "ParamGet":
c.ParamGet_Name = name
if role == "ParamSet":
c.ParamSet_Name = name
if role == "Telemetry":
c.Tlm_Name = name
if role == "TimeGet":
c.Time_Name = name
def initPortParams(self, obj, c):
"""
Port function parameters for code generation
"""
c.param_portNum = ("portNum", "const NATIVE_INT_TYPE", "The port number", "")
c.param_Buffer = (
"Buffer",
"Fw::SerializeBufferBase",
"The serialization buffer",
"&",
)
c.param_callComp = (
"callComp",
"Fw::PassiveComponentBase",
"The component instance",
"*const ",
)
c.emit_port_params_hpp = self.emitPortParamsHpp
c.emit_port_params = c.emit_port_params_hpp
c.emit_port_params_cpp = self.emitPortParamsCpp
def initPorts(self, obj, c):
"""
Initialize port info
"""
self.initPortLists(obj, c)
self.initPortMaps(obj, c)
self.initPortFlags(obj, c)
self.initPortNames(obj, c)
self.initPortParams(obj, c)
def initPreamble(self, obj, c):
d = datetime.datetime.now()
c.date = d.strftime("%A, %d %B %Y")
c.kind = obj.get_kind()
c.modeler = obj.get_modeler()
c.name = obj.get_name
c.component_base = c.name() + "ComponentBase"
if obj.get_namespace() is None:
c.namespace_list = None
else:
c.namespace_list = obj.get_namespace().split("::")
c.user = getuser()
c.args_string = self.argsString
c.doxygen_pre_comment = self.doxygenPreComment
c.doxygen_post_comment = self.doxygenPostComment
c.is_primitive_type = self.isPrimitiveType
c.emit_non_port_params_hpp = self.emitNonPortParamsHpp
c.emit_non_port_params = c.emit_non_port_params_hpp
c.emit_non_port_params_cpp = self.emitNonPortParamsCpp
c.param_compName = ("compName", "const char *const", "The component name")
c.param_instance = ("instance", "const NATIVE_INT_TYPE", "The instance number")
c.param_instance_default_zero = (
"instance = 0",
"const NATIVE_INT_TYPE",
"The instance number",
)
c.param_msgSize = ("msgSize", "const NATIVE_INT_TYPE", "The message size")
c.param_queueDepth = ("queueDepth", "const NATIVE_INT_TYPE", "The queue depth")
def initSerialIncludes(self, obj, c):
"""
Include any headers for channel/parameter serializable includes
"""
ser_includes = self.__model_parser.uniqueList(obj.get_xml_serializable_files())
s_includes = [sinc.replace("Ai.xml", "Ac.hpp") for sinc in ser_includes]
c.ser_includes = s_includes
def initTelemetry(self, obj, c):
"""
Init telemetry info
"""
c.has_telemetry = len(obj.get_channels()) > 0
c.has_channels = c.has_telemetry
c.channel_enums = self.__model_parser.getTelemEnumList(obj)
c.channels = self.__model_parser.getChannelsList(obj)
self.initTelemetryParams(obj, c)
def initTelemetryParams(self, obj, c):
"""
Telemetry function parameters for code generation
"""
c.param_tlm_id = ("id", "const FwChanIdType", "The channel ID")
c.param_val = ("val", "Fw::TlmBuffer&", "The channel value")
def initTypeIncludes(self, obj, c):
type_includes = [
e.replace("Ai.xml", "Ac.hpp") for e in obj.get_included_enums()
]
for p in obj.get_ports():
type_includes = type_includes + p.get_includes()
c.types_includes = self.__model_parser.uniqueList(type_includes)
c.c_includes = obj.get_c_header_files()
if False in [x[-3:] == "hpp" or x[-1:] == "h" for x in c.c_includes]:
PRINT.info(
"ERROR: Only .hpp or .h files can be given within <include_header> tag!!!"
)
sys.exit(-1)
def internalInterfaceArgsStr(self):
"""
Make a list of command args into a string
"""
def f(lst):
def g(xxx_todo_changeme1):
(name, a, b, c) = xxx_todo_changeme1
return name
return self.argsString(list(map(g, lst)))
return f
def isPrimitiveType(self, type):
return type in [
"I8",
"U8",
"I16",
"U16",
"I32",
"U32",
"I64",
"U64",
"F32",
"F64",
"bool",
]
def mapPartial(self, f, l):
"""
Map partial function f over list l
"""
result = []
for x in l:
y = f(x)
if y is not None:
# check if list
if isinstance(y, list):
result += y
else:
result += [y]
return result
def namespaceVisit(self, obj):
pass
def openFile(self, filename):
"""
Open the file for writing
"""
DEBUG.info("Open file: %s" % filename)
self.__fp = open(filename, "w")
if self.__fp is None:
raise Exception("Could not open file %s") % filename
DEBUG.info("Completed")
def initFilesVisit(self, obj):
filename = self.buildFileName(obj)
self.openFile(filename)
def paramStrsCpp(self, param):
"""
Get the strings for a function parameter in a .cpp file
"""
name, type = param[:2]
param_str = "{} {}".format(type, name)
return param_str, ""
def paramStrsHpp(self, param):
"""
Get the strings for a function parameter in a .hpp file
"""
name, type, comment = param[:3]
param_str = "{} {}".format(type, name)
comment_str = " " + self.doxygenPostComment(comment)
return param_str, comment_str
def portParamStrsCpp(self, param):
"""
Get the strings for a port function parameter in a .cpp file
"""
name, type, comment, modifier = param[:4]
param_str = "{} {}{}".format(type, modifier, name)
return param_str, ""
def portParamStrsHpp(self, param):
"""
Get the strings for a port function parameter in a .hpp file
"""
name, type, comment, modifier = param[:4]
param_str = "{} {}{}".format(type, modifier, name)
comment_str = " " + self.doxygenPostComment(comment)
return param_str, comment_str
def privateVisit(self, obj):
pass
def protectedVisit(self, obj):
pass
def publicVisit(self, obj):
pass
| 33.669197
| 99
| 0.557807
|
bcc8c1ed778a0e12690192b491efa6d7a64e2b09
| 7,969
|
py
|
Python
|
scripts/hapbuilder.py
|
openharmony-gitee-mirror/build
|
2eee03d63d877a247e6634438239b38aaa7d3ba6
|
[
"Apache-2.0"
] | null | null | null |
scripts/hapbuilder.py
|
openharmony-gitee-mirror/build
|
2eee03d63d877a247e6634438239b38aaa7d3ba6
|
[
"Apache-2.0"
] | 14
|
2021-09-07T08:39:43.000Z
|
2021-09-17T08:50:23.000Z
|
scripts/hapbuilder.py
|
openharmony-gitee-mirror/build
|
2eee03d63d877a247e6634438239b38aaa7d3ba6
|
[
"Apache-2.0"
] | 1
|
2021-09-07T06:19:48.000Z
|
2021-09-07T06:19:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
import subprocess
import sys
import shutil
import os
import tempfile
import json
from util import build_utils # noqa: E402
def sign_hap(hapsigner, private_key_path, sign_algo, certificate_profile,
keystore_path, keystorepasswd, keyalias, certificate_file,
unsigned_hap_path, signed_hap_path):
cmd = ['java', '-jar', hapsigner, 'sign']
cmd.extend(['-mode', 'localjks'])
cmd.extend(['-signAlg', sign_algo])
cmd.extend(['-privatekey', private_key_path])
cmd.extend(['-inputFile', unsigned_hap_path])
cmd.extend(['-outputFile', signed_hap_path])
cmd.extend(['-profile', certificate_profile])
cmd.extend(['-keystore', keystore_path])
cmd.extend(['-keystorepasswd', keystorepasswd])
cmd.extend(['-keyaliaspasswd', keyalias])
cmd.extend(['-certpath', certificate_file])
cmd.extend(['-profileSigned', '1'])
child = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
if child.returncode:
print(stdout.decode(), stderr.decode())
raise Exception("Failed to sign hap")
def add_resources(packaged_resources, package_dir, packing_cmd):
if packaged_resources:
build_utils.extract_all(packaged_resources,
package_dir,
no_clobber=False)
index_file_path = os.path.join(package_dir, 'resources.index')
if os.path.exists(index_file_path):
packing_cmd.extend(['--index-path', index_file_path])
packing_cmd.extend(
['--res-path',
os.path.join(package_dir, 'resources')])
def add_assets(packaged_js_assets, assets, package_dir, packing_cmd):
assets_dir = os.path.join(package_dir, 'assets')
if packaged_js_assets:
build_utils.extract_all(packaged_js_assets,
package_dir,
no_clobber=False)
if assets:
if not os.path.exists(assets_dir):
os.mkdir(assets_dir)
for item in assets:
if os.path.isfile(item):
shutil.copyfile(
item, os.path.join(assets_dir, os.path.basename(item)))
elif os.path.isdir(item):
shutil.copytree(
item, os.path.join(assets_dir, os.path.basename(item)))
if os.path.exists(assets_dir) and len(os.listdir(assets_dir)) != 0:
packing_cmd.extend(['--assets-path', assets_dir])
def get_ark_toolchain_version(options):
cmd = [options.nodejs_path, options.js2abc_js, '--bc-version']
return build_utils.check_output(cmd).strip('\n')
def tweak_hap_profile(options, package_dir):
hap_profile = os.path.join(package_dir, 'config.json')
if not os.path.exists(hap_profile):
raise Exception('Error: config.json of hap file not exists')
config = {}
with open(hap_profile, 'r') as fileobj:
config = json.load(fileobj)
config['module']['distro']['virtualMachine'] = 'ark{}'.format(
get_ark_toolchain_version(options))
build_utils.write_json(config, hap_profile)
def create_hap(options, signed_hap):
with build_utils.temp_dir() as package_dir, tempfile.NamedTemporaryFile(
suffix='.hap') as output:
packing_cmd = ['java', '-jar', options.hap_packing_tool]
packing_cmd.extend(
['--mode', 'hap', '--force', 'true', '--out-path', output.name])
hap_profile_path = os.path.join(package_dir,
os.path.basename(options.hap_profile))
shutil.copy(options.hap_profile, hap_profile_path)
packing_cmd.extend(['--json-path', hap_profile_path])
add_assets(options.packaged_js_assets, options.assets, package_dir,
packing_cmd)
add_resources(options.packaged_resources, package_dir, packing_cmd)
if options.js2abc:
tweak_hap_profile(options, package_dir)
if options.dso:
lib_path = os.path.join(package_dir, "lib")
os.mkdir(lib_path)
for dso in sorted(options.dso):
shutil.copy(dso, lib_path)
packing_cmd.extend(['--lib-path', lib_path])
build_utils.check_output(packing_cmd)
sign_hap(options.hapsigner, options.private_key_path,
options.sign_algo, options.certificate_profile,
options.keystore_path, options.keystorepasswd,
options.keyalias, options.certificate_file, output.name,
signed_hap)
def parse_args(args):
args = build_utils.expand_file_args(args)
parser = optparse.OptionParser()
build_utils.add_depfile_option(parser)
parser.add_option('--hap-path', help='path to output hap')
parser.add_option('--hapsigner', help='path to signer')
parser.add_option('--assets', help='path to assets')
parser.add_option('--dso',
action="append",
help='path to dynamic shared objects')
parser.add_option('--hap-profile', help='path to hap profile')
parser.add_option('--nodejs-path', help='path to node')
parser.add_option('--js2abc-js', help='path to ts2abc.js')
parser.add_option('--js2abc',
action='store_true',
default=False,
help='whether to transform js to ark bytecode')
parser.add_option('--hap-packing-tool', help='path to hap packing tool')
parser.add_option('--private-key-path', help='path to private key')
parser.add_option('--sign-algo', help='signature algorithm')
parser.add_option('--certificate-profile',
help='path to certificate profile')
parser.add_option('--keyalias', help='keyalias')
parser.add_option('--keystore-path', help='path to keystore')
parser.add_option('--keystorepasswd', help='password of keystore')
parser.add_option('--certificate-file', help='path to certificate file')
parser.add_option('--packaged-resources',
help='path to packaged resources')
parser.add_option('--packaged-js-assets',
help='path to packaged js assets')
options, _ = parser.parse_args(args)
if options.assets:
options.assets = build_utils.parse_gn_list(options.assets)
return options
def main(args):
options = parse_args(args)
inputs = [
options.hap_profile, options.packaged_js_assets,
options.packaged_resources, options.certificate_file,
options.keystore_path, options.certificate_profile
]
depfiles = []
for dire in options.assets:
depfiles += (build_utils.get_all_files(dire))
if options.dso:
depfiles.extend(options.dso)
build_utils.call_and_write_depfile_if_stale(
lambda: create_hap(options, options.hap_path),
options,
depfile_deps=depfiles,
input_paths=inputs + depfiles,
input_strings=[
options.keystorepasswd, options.keyalias, options.sign_algo,
options.private_key_path
],
output_paths=([options.hap_path]),
force=False,
add_pydeps=False)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 39.646766
| 78
| 0.642364
|
f3ed016b0041b597c6087389e18f02754883a817
| 192
|
py
|
Python
|
app/admin.py
|
nyagajr/neighborhood
|
98955d9c0df46b15d39e861a6eb22655990d7769
|
[
"Unlicense"
] | null | null | null |
app/admin.py
|
nyagajr/neighborhood
|
98955d9c0df46b15d39e861a6eb22655990d7769
|
[
"Unlicense"
] | null | null | null |
app/admin.py
|
nyagajr/neighborhood
|
98955d9c0df46b15d39e861a6eb22655990d7769
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Profile, Hood, Business
# Register your models here.
admin.site.register(Profile)
admin.site.register(Hood)
admin.site.register(Business)
| 24
| 43
| 0.807292
|
041dea46e60d90c95da8516067595ebea21277ff
| 2,852
|
py
|
Python
|
opacus/grad_sample/utils.py
|
parkbeomsik/opacus-reweight
|
5c58867a0a405382b107d2d0c1c498a3c98e85b4
|
[
"Apache-2.0"
] | null | null | null |
opacus/grad_sample/utils.py
|
parkbeomsik/opacus-reweight
|
5c58867a0a405382b107d2d0c1c498a3c98e85b4
|
[
"Apache-2.0"
] | null | null | null |
opacus/grad_sample/utils.py
|
parkbeomsik/opacus-reweight
|
5c58867a0a405382b107d2d0c1c498a3c98e85b4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from opacus.utils.cuda_timer import cuda_timer
from typing import Sequence, Union
import torch
import torch.nn as nn
from .grad_sample_module import GradSampleModule
def register_grad_sampler(target_class_or_classes: Union[type, Sequence[type]]):
"""
Registers the decorated function as the ``grad_sampler`` of ``target_class_or_classes``, which is
the function that will be invoked every time you want to compute a per-sample gradient
of ``target_class_or_classes``. The signature of every grad_sampler is always the same:
>>> @register_grad_sampler(nn.MyCustomClass)
>>> def compute_grad_sample(module, activations, backprops):
>>> pass
It may help you to take a look at the existing grad_samplers inside Opacus, under ``opacus.grad_sample.``
"""
def decorator(f):
target_classes = (
target_class_or_classes
if isinstance(target_class_or_classes, Sequence)
else [target_class_or_classes]
)
for target_class in target_classes:
GradSampleModule.GRAD_SAMPLERS[target_class] = f
return f
return decorator
def create_or_extend_grad_sample(
param: torch.Tensor, grad_sample: torch.Tensor, batch_dim: int
) -> None:
"""
Creates a ``grad_sample`` attribute in the given parameter, or appends to it
if the ``grad_sample`` attribute already exists.
Args:
param: Parameter to which ``grad_sample`` will be added
grad_sample: Per-sample gradients tensor. Must be of the same
shape as ``param`` with extra batch dimension
batch_dim: Position of the batch dimension in the shape of
``grad_sample``
"""
if hasattr(param, "grad_sample"):
param.grad_sample = torch.cat((param.grad_sample, grad_sample), batch_dim)
else:
param.grad_sample = grad_sample
def create_or_accumulate_grad_sample(
param: torch.Tensor, grad_sample: torch.Tensor, layer: nn.Module
) -> None:
"""
Creates a ``grad_sample`` attribute in the given parameter, or adds to it
if the ``grad_sample`` attribute already exists.
Args:
param: Parameter to which ``grad_sample`` will be added
grad_sample: Per-sample gradients tensor. Must be of the same
shape as ``param`` with extra batch dimension
"""
if hasattr(param, "grad_sample"):
param.grad_sample[: grad_sample.shape[0]] += grad_sample
else:
max_batch_len = layer.max_batch_len
param.grad_sample = torch.zeros(
torch.Size([max_batch_len]) + grad_sample.shape[1:],
device=grad_sample.device,
dtype=grad_sample.dtype,
)
param.grad_sample[: grad_sample.shape[0]] = grad_sample
| 33.952381
| 109
| 0.683731
|
bade8151a2401f61492ff799c4a08828923cd11e
| 4,748
|
py
|
Python
|
python/perspective/bench/stresstest/server/server.py
|
gidsg/perspective
|
337ee8ef696b41bbfc83f24451af862166d3b24e
|
[
"Apache-2.0"
] | null | null | null |
python/perspective/bench/stresstest/server/server.py
|
gidsg/perspective
|
337ee8ef696b41bbfc83f24451af862166d3b24e
|
[
"Apache-2.0"
] | null | null | null |
python/perspective/bench/stresstest/server/server.py
|
gidsg/perspective
|
337ee8ef696b41bbfc83f24451af862166d3b24e
|
[
"Apache-2.0"
] | null | null | null |
################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import logging
import argparse
import os
import random
import tornado
import perspective
from manager_telemetry import PerspectiveManagerWithTelemetry
from tornado_handler_telemetry import PerspectiveTornadoHandlerWithTelemetry
PARSER = argparse.ArgumentParser(
description="A perspective-python server configured to provide telemetry for use with stress testing."
)
PARSER.add_argument(
"--table_size",
dest="table_size",
default=10000,
type=int,
help="The row size of the initial table. Defaults to 10000 rows.",
)
PARSER.add_argument(
"--update_size",
dest="update_size",
type=int,
default=50,
help="The row size of each update. Defaults to 50 rows.",
)
PARSER.add_argument(
"--update_rate",
dest="update_rate",
type=float,
default=500,
help="The frequency of each update in milliseconds. Defaults to 500 milliseconds.",
)
PARSER.add_argument(
"--port",
dest="port",
type=float,
default=8888,
help="A port to host the Tornado server on. Defaults to 8888.",
)
HERE = os.path.abspath(os.path.dirname(__file__))
TABLE = None
VIEW = None
MANAGER = PerspectiveManagerWithTelemetry()
with open(
os.path.join(
HERE,
"..",
"..",
"..",
"..",
"..",
"node_modules",
"superstore-arrow",
"superstore.arrow",
),
"rb",
) as arrow:
TABLE = perspective.Table(arrow.read(), index="Row ID")
VIEW = TABLE.view()
def get_data(update_size):
"""Return `update_size` random rows from the dataset, with their Row IDs
tweaked to be half appends and half partial updates."""
size = TABLE.size()
start = random.randint(0, size - update_size - 1)
end = start + update_size
data = VIEW.to_dict(start_row=start, end_row=end)
# Generate some random row IDs
data["Row ID"] = [
random.randint(size, size + update_size) if i % 2 else data["Row ID"][i]
for i in range(len(data["Row ID"]))
]
# And other randomized values
data["Sales"] = [
random.randint(10, 1000) * random.random() for i in range(len(data["Sales"]))
]
data["Profit"] = [
random.randint(10, 100) * random.random() for i in range(len(data["Profit"]))
]
return data
def make_app(table_size, update_size, update_rate):
"""Create a Tornado application for the webserver."""
MANAGER.host_table("table", TABLE)
MANAGER.host_view("view", VIEW)
if table_size is not None and TABLE.size() < table_size:
current_size = TABLE.size()
while current_size < table_size:
logging.warning(
"Current table size %d, requested table size %d - inflating",
TABLE.size(),
table_size,
)
diff = table_size - TABLE.size()
data = []
# less than 2x table size
if diff < TABLE.size():
data = VIEW.to_dict(end_row=diff)
else:
data = VIEW.to_dict()
data["Row ID"] = [i for i in range(TABLE.size() + 1, table_size)]
TABLE.update(data)
current_size = TABLE.size()
logging.info("Table size: %d", TABLE.size())
# Update the table with `update_size` rows every `update_rate` milliseconds
def updater():
TABLE.update(get_data(update_size))
callback = tornado.ioloop.PeriodicCallback(
callback=updater, callback_time=update_rate
)
callback.start()
return tornado.web.Application(
[
(
r"/",
PerspectiveTornadoHandlerWithTelemetry,
{"manager": MANAGER, "check_origin": True},
)
]
)
def start(port, table_size, update_size, update_rate):
"""Start the webserver at the given port."""
app = make_app(table_size, update_size, update_rate)
app.listen(port)
logging.critical("Listening on http://localhost:{}".format(port))
loop = tornado.ioloop.IOLoop.current()
loop.start()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = PARSER.parse_args()
logging.info(
"Running server on port %d - Hosting Table of size %d, updating with %d rows every %2f milliseconds",
args.port,
args.table_size,
args.update_size,
args.update_rate,
)
start(args.port, args.table_size, args.update_size, args.update_rate)
| 27.445087
| 109
| 0.614785
|
e540b4d652ce323a30c353df7781c9ba61e8af23
| 987
|
py
|
Python
|
src/glod/api/transaction_node.py
|
gordon-elliott/glod
|
a381e21455d05d9c005942a3dee4ac67e10f366a
|
[
"MIT"
] | null | null | null |
src/glod/api/transaction_node.py
|
gordon-elliott/glod
|
a381e21455d05d9c005942a3dee4ac67e10f366a
|
[
"MIT"
] | 1
|
2021-03-10T16:48:34.000Z
|
2021-03-10T16:48:34.000Z
|
src/glod/api/transaction_node.py
|
gordon-elliott/glod
|
a381e21455d05d9c005942a3dee4ac67e10f366a
|
[
"MIT"
] | null | null | null |
__copyright__ = 'Copyright(c) Gordon Elliott 2018'
"""
"""
from a_tuin.api import (
node_class,
node_connection_field,
get_update_mutation,
get_create_mutation,
get_local_fields
)
from glod.api.transaction_leaf import TransactionLeaf
from glod.db.transaction import Transaction, TransactionQuery
transaction_fields = get_local_fields(Transaction)
TransactionNode = node_class(Transaction.__name__, TransactionLeaf, transaction_fields)
transactions_connection_field = node_connection_field(
Transaction,
TransactionQuery,
TransactionNode,
description='List of all transactions'
)
transactions_options_field = node_connection_field(
Transaction,
TransactionQuery,
TransactionLeaf,
description='List of all transactions for Select fields'
)
CreateTransactionLeaf = get_create_mutation(Transaction, transaction_fields, TransactionLeaf)
UpdateTransactionLeaf = get_update_mutation(Transaction, transaction_fields, TransactionLeaf)
| 26.675676
| 93
| 0.806484
|
f2f4e10b76d3f2a841b89e24f26a4202e4aefb87
| 21
|
py
|
Python
|
Unidad 2/packages/extra/good/beta.py
|
angelxehg/utzac-ppy
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
[
"MIT"
] | null | null | null |
Unidad 2/packages/extra/good/beta.py
|
angelxehg/utzac-ppy
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
[
"MIT"
] | null | null | null |
Unidad 2/packages/extra/good/beta.py
|
angelxehg/utzac-ppy
|
fb88bcc661518bb35c08a102a67c20d0659f71db
|
[
"MIT"
] | null | null | null |
def funB():
pass
| 7
| 11
| 0.52381
|
fe3a25868d20c9588a6474c66f084dcaba0adaf4
| 1,044
|
py
|
Python
|
test/test_resources.py
|
luca-penasa/circle-craters
|
62881f7fa7f032c8377dee130598ec7a93ccdae3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-01T13:59:29.000Z
|
2021-02-01T13:59:29.000Z
|
test/test_resources.py
|
europlanet-gmap/circle-craters
|
62881f7fa7f032c8377dee130598ec7a93ccdae3
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_resources.py
|
europlanet-gmap/circle-craters
|
62881f7fa7f032c8377dee130598ec7a93ccdae3
|
[
"BSD-3-Clause"
] | 1
|
2020-10-21T13:50:34.000Z
|
2020-10-21T13:50:34.000Z
|
# coding=utf-8
"""Resources test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'braden.sarah@gmail.com'
__date__ = '2014-12-31'
__copyright__ = 'Copyright 2014, Sarah E Braden'
import unittest
from qgis.PyQt.QtGui import QIcon
class CircleCratersDialogTest(unittest.TestCase):
"""Test rerources work."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_icon_png(self):
"""Test we can click OK."""
path = ':/plugins/CircleCraters/icon.png'
icon = QIcon(path)
self.assertFalse(icon.isNull())
if __name__ == "__main__":
suite = unittest.makeSuite(CircleCratersResourcesTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| 23.2
| 78
| 0.668582
|
87b48d242faa2f1c7c0757c3f07caa97b9a5fa33
| 852
|
py
|
Python
|
benchmarks/parse_unsat.py
|
guluchen/z3
|
f96ae3a0f0d7b9fa7cc5cbd9ad138eda2208ca4d
|
[
"MIT"
] | 3
|
2018-11-01T06:58:00.000Z
|
2019-04-25T13:57:59.000Z
|
benchmarks/parse_unsat.py
|
PhucVH888/z3
|
f7402f8b6134e366a2060571c5176c0e6ea34ddc
|
[
"MIT"
] | null | null | null |
benchmarks/parse_unsat.py
|
PhucVH888/z3
|
f7402f8b6134e366a2060571c5176c0e6ea34ddc
|
[
"MIT"
] | 4
|
2019-07-19T07:17:12.000Z
|
2021-06-21T17:27:14.000Z
|
#!/usr/bin/env python3
import sys
import os
_base_dir = os.path.dirname(os.path.realpath(__file__))
def main(path):
files = sorted([os.path.join(root, file)
for root, dirs, files in os.walk(path) for file in files])
cmd = "cvc4 --tlimit=15000 --dump-unsat-cores-full --lang smt --strings-exp --quiet "
for f in files:
res = os.popen(cmd + f).readlines()
if "unsat" in res[0]:
save = False
for r in res:
if "int.to.re" in r or "str.to.int" in r:
save = True
print(f)
break
if not save:
print(f + " neg")
os.system("rm " + f)
else:
print(f + " " + res[0].replace("\n", ""))
if __name__ == '__main__':
main(sys.argv[1])
| 28.4
| 89
| 0.4777
|
84cb78ab31fa4abc9250e6ac055f33d074da0f0e
| 2,164
|
py
|
Python
|
share/qt/extract_strings_qt.py
|
swampcoin/AeriumX
|
76c4340a9e16ec326b20c4d7a07be88d84ee97e3
|
[
"MIT"
] | 5
|
2018-06-10T21:32:12.000Z
|
2019-01-13T14:47:12.000Z
|
share/qt/extract_strings_qt.py
|
swampcoin/AeriumX
|
76c4340a9e16ec326b20c4d7a07be88d84ee97e3
|
[
"MIT"
] | 1
|
2019-07-07T08:25:21.000Z
|
2019-07-07T08:25:21.000Z
|
share/qt/extract_strings_qt.py
|
swampcoin/AeriumX
|
76c4340a9e16ec326b20c4d7a07be88d84ee97e3
|
[
"MIT"
] | 13
|
2018-06-16T18:01:29.000Z
|
2020-08-02T16:36:33.000Z
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/aeriumxstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *aeriumx_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("aeriumx-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.761905
| 105
| 0.619686
|
57e5f53c71ff2d1281ad3f48215d660ced32d547
| 2,078
|
py
|
Python
|
tests/integration/mci/test_happy_path.py
|
MebinAbraham/eq-survey-runner
|
f8bb059bca19371043d79abeefd5d1e6c0e82e79
|
[
"MIT"
] | null | null | null |
tests/integration/mci/test_happy_path.py
|
MebinAbraham/eq-survey-runner
|
f8bb059bca19371043d79abeefd5d1e6c0e82e79
|
[
"MIT"
] | null | null | null |
tests/integration/mci/test_happy_path.py
|
MebinAbraham/eq-survey-runner
|
f8bb059bca19371043d79abeefd5d1e6c0e82e79
|
[
"MIT"
] | null | null | null |
from tests.integration.integration_test_case import IntegrationTestCase
class HappyPathHelperMixin():
def _happy_path(self, form_type_id, eq_id):
self.launchSurvey(eq_id, form_type_id)
# We are on the introduction page
self.assertInBody('>Start survey<')
self.assertInBody('Monthly Business Survey - Retail Sales Index')
# We proceed to the questionnaire
self.post(action='start_questionnaire')
# We are in the Questionnaire
self.assertInBody('>Monthly Business Survey - Retail Sales Index</')
self.assertInBody('What are the dates of the sales period you are reporting for?')
self.assertInBody('>Save and continue<')
# check with have some guidance
self.assertInBody('alcoholic drink')
# We fill in our answers
form_data = {
# Start Date
'period-from-day': '01',
'period-from-month': '4',
'period-from-year': '2016',
# End Date
'period-to-day': '30',
'period-to-month': '4',
'period-to-year': '2016',
# Total Turnover
'total-retail-turnover': '100000',
}
# We submit the form
self.post(form_data)
# There are no validation errors
self.assertInUrl('summary')
# We are on the review answers page
self.assertInBody('>Monthly Business Survey - Retail Sales Index</')
self.assertInBody('>Check your answers and submit<')
self.assertInBody('You can check your answers below')
self.assertInBody('>Submit answers<')
# Submit answers
self.post(action=None)
# We are on the thank you page
self.assertRegexPage('(?s)Monthly Business Survey - Retail Sales Index.*?Monthly Business Survey - Retail Sales Index')
class TestHappyPath(IntegrationTestCase, HappyPathHelperMixin):
def test_happy_path_203(self):
self._happy_path('0203', 'test')
def test_happy_path_205(self):
self._happy_path('0205', 'test')
| 34.065574
| 127
| 0.625602
|
e448d02d4af0a4f4d45bc79ec4e176efde0694a4
| 398
|
py
|
Python
|
chat/admin.py
|
Pygmy-Final/social-club-backend
|
ceb2d4ee879900597cf65685f7119c1f03e92637
|
[
"MIT"
] | null | null | null |
chat/admin.py
|
Pygmy-Final/social-club-backend
|
ceb2d4ee879900597cf65685f7119c1f03e92637
|
[
"MIT"
] | null | null | null |
chat/admin.py
|
Pygmy-Final/social-club-backend
|
ceb2d4ee879900597cf65685f7119c1f03e92637
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Message
class MessageAdmin(admin.ModelAdmin):
list_display = ['message', 'seen','sender', 'receiver', 'date_created']
model = Message
def save_model(self, request, obj, form, change):
if obj.receiver != request.user:
obj.sender = request.user
obj.save()
admin.site.register(Message)
| 30.615385
| 75
| 0.643216
|
66734461d376307888a65124e6a6ae2d98544fbe
| 10,134
|
py
|
Python
|
python/kfserving/kfserving/api/creds_utils.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 6
|
2022-02-15T21:54:19.000Z
|
2022-02-16T21:18:54.000Z
|
python/kfserving/kfserving/api/creds_utils.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 7
|
2021-08-31T23:55:06.000Z
|
2022-03-02T11:34:58.000Z
|
python/kfserving/kfserving/api/creds_utils.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 2
|
2021-12-16T10:32:07.000Z
|
2022-02-28T17:08:52.000Z
|
# Copyright 2019 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import configparser
from os.path import expanduser
from kubernetes import client
from ..constants import constants
logger = logging.getLogger(__name__)
def set_gcs_credentials(namespace, credentials_file, service_account):
"""
Set GCS Credentails (secret and service account) with credentials file.
Args:
namespace(str): The kubernetes namespace.
credentials_file(str): The path for the gcs credentials file.
service_account(str): The name of service account. If the service_account
is specified, will attach created secret with the service account,
otherwise will create new one and attach with created secret.
"""
with open(expanduser(credentials_file)) as f:
gcs_creds_content = f.read()
# Try to get GCS creds file name from configmap, set default value then if cannot.
gcs_creds_file_name = get_creds_name_from_config_map(
'gcsCredentialFileName')
if not gcs_creds_file_name:
gcs_creds_file_name = constants.GCS_CREDS_FILE_DEFAULT_NAME
string_data = {gcs_creds_file_name: gcs_creds_content}
secret_name = create_secret(
namespace=namespace, string_data=string_data)
set_service_account(namespace=namespace,
service_account=service_account,
secret_name=secret_name)
def set_s3_credentials(namespace, credentials_file, service_account,
s3_profile='default', # pylint: disable=too-many-locals,too-many-arguments
s3_endpoint=None, s3_region=None, s3_use_https=None,
s3_verify_ssl=None): # pylint: disable=unused-argument
"""
Set S3 Credentails (secret and service account).
Args:
namespace(str): The kubernetes namespace.
credentials_file(str): The path for the S3 credentials file.
s3_profile(str): The profile for S3, default value is 'default'.
service_account(str): The name of service account(Optional). If the service_account
is specified, will attach created secret with the service account,
otherwise will create new one and attach with created secret.
s3_endpoint(str): S3 settings variable S3_ENDPOINT.
s3_region(str): S3 settings variable AWS_REGION.
s3_use_https(str): S3 settings variable S3_USE_HTTPS.
s3_verify_ssl(str): S3 settings variable S3_VERIFY_SSL.
"""
config = configparser.ConfigParser()
config.read([expanduser(credentials_file)])
s3_access_key_id = config.get(s3_profile, 'aws_access_key_id')
s3_secret_access_key = config.get(
s3_profile, 'aws_secret_access_key')
# Try to get S3 creds name from configmap, set default value then if cannot.
s3_access_key_id_name = get_creds_name_from_config_map(
's3AccessKeyIDName')
if not s3_access_key_id_name:
s3_access_key_id_name = constants.S3_ACCESS_KEY_ID_DEFAULT_NAME
s3_secret_access_key_name = get_creds_name_from_config_map(
's3SecretAccessKeyName')
if not s3_secret_access_key_name:
s3_secret_access_key_name = constants.S3_SECRET_ACCESS_KEY_DEFAULT_NAME
data = {
s3_access_key_id_name: s3_access_key_id,
s3_secret_access_key_name: s3_secret_access_key,
}
s3_cred_sets = {
's3_endpoint': constants.KFSERVING_GROUP + "/s3-endpoint",
's3_region': constants.KFSERVING_GROUP + "/s3-region",
's3_use_https': constants.KFSERVING_GROUP + "/s3-usehttps",
's3_verify_ssl': constants.KFSERVING_GROUP + "/s3-verifyssl",
}
s3_annotations = {}
for key, value in s3_cred_sets.items():
arg = vars()[key]
if arg is not None:
s3_annotations.update({value: arg})
secret_name = create_secret(
namespace=namespace, annotations=s3_annotations, data=data)
set_service_account(namespace=namespace,
service_account=service_account,
secret_name=secret_name)
def set_azure_credentials(namespace, credentials_file, service_account):
"""
Set Azure Credentails (secret and service account) with credentials file.
Args:
namespace(str): The kubernetes namespace.
credentials_file(str): The path for the Azure credentials file.
service_account(str): The name of service account. If the service_account
is specified, will attach created secret with the service account,
otherwise will create new one and attach with created secret.
"""
with open(expanduser(credentials_file)) as azure_creds_file:
azure_creds = json.load(azure_creds_file)
data = {
'AZ_CLIENT_ID': azure_creds['clientId'],
'AZ_CLIENT_SECRET': azure_creds['clientSecret'],
'AZ_SUBSCRIPTION_ID': azure_creds['subscriptionId'],
'AZ_TENANT_ID': azure_creds['tenantId'],
}
secret_name = create_secret(
namespace=namespace, data=data)
set_service_account(namespace=namespace,
service_account=service_account,
secret_name=secret_name)
def create_secret(namespace, annotations=None, data=None, string_data=None):
'Create namespaced secret, and return the secret name.'
try:
created_secret = client.CoreV1Api().create_namespaced_secret(
namespace,
client.V1Secret(
api_version='v1',
kind='Secret',
metadata=client.V1ObjectMeta(
generate_name=constants.DEFAULT_SECRET_NAME,
annotations=annotations),
data=data,
string_data=string_data))
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CoreV1Api->create_namespaced_secret: %s\n" % e)
secret_name = created_secret.metadata.name
logger.info('Created Secret: %s in namespace %s', secret_name, namespace)
return secret_name
def set_service_account(namespace, service_account, secret_name):
"""
Set service account, create if service_account does not exist, otherwise patch it.
"""
if check_sa_exists(namespace=namespace, service_account=service_account):
patch_service_account(secret_name=secret_name,
namespace=namespace,
sa_name=service_account)
else:
create_service_account(secret_name=secret_name,
namespace=namespace,
sa_name=service_account)
def check_sa_exists(namespace, service_account):
"""
Check if the specified service account existing.
"""
sa_list = client.CoreV1Api().list_namespaced_service_account(namespace=namespace)
sa_name_list = [sa.metadata.name for sa in sa_list.items]
if service_account in sa_name_list:
return True
return False
def create_service_account(secret_name, namespace, sa_name):
"""
Create namespaced service account, and return the service account name
"""
try:
client.CoreV1Api().create_namespaced_service_account(
namespace,
client.V1ServiceAccount(
metadata=client.V1ObjectMeta(
name=sa_name
),
secrets=[client.V1ObjectReference(
kind='Secret',
name=secret_name)]))
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CoreV1Api->create_namespaced_service_account: %s\n" % e)
logger.info('Created Service account: %s in namespace %s', sa_name, namespace)
def patch_service_account(secret_name, namespace, sa_name):
"""
Patch namespaced service account to attach with created secret.
"""
try:
client.CoreV1Api().patch_namespaced_service_account(
sa_name,
namespace,
client.V1ServiceAccount(
secrets=[client.V1ObjectReference(
kind='Secret',
name=secret_name)]))
except client.rest.ApiException as e:
raise RuntimeError(
"Exception when calling CoreV1Api->patch_namespaced_service_account: %s\n" % e)
logger.info('Pacthed Service account: %s in namespace %s', sa_name, namespace)
def get_creds_name_from_config_map(creds):
"""
Get the credentials name from inferenceservice config map.
"""
try:
isvc_config_map = client.CoreV1Api().read_namespaced_config_map(
constants.INFERENCESERVICE_CONFIG_MAP_NAME,
constants.INFERENCESERVICE_SYSTEM_NAMESPACE)
except client.rest.ApiException:
logging.warning('Cannot get configmap %s in namespace %s.',
constants.INFERENCESERVICE_CONFIG_MAP_NAME,
constants.INFERENCESERVICE_SYSTEM_NAMESPACE)
return None
isvc_creds_str = isvc_config_map.data['credentials']
isvc_creds_json = json.loads(isvc_creds_str)
if creds == 'gcsCredentialFileName':
return isvc_creds_json['gcs']['gcsCredentialFileName']
elif creds == 's3AccessKeyIDName':
return isvc_creds_json['s3']['s3AccessKeyIDName']
elif creds == 's3SecretAccessKeyName':
return isvc_creds_json['s3']['s3SecretAccessKeyName']
else:
raise RuntimeError("Unknown credentials.")
| 38.679389
| 98
| 0.67091
|
69767362695f02aa204d4a40ac63f4051d009e16
| 1,876
|
py
|
Python
|
solutions/543. Diameter of Binary Tree.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
solutions/543. Diameter of Binary Tree.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
solutions/543. Diameter of Binary Tree.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
"""
Runtime: 472 ms, faster than 6.80% of Python3 online submissions for Diameter of Binary Tree.
Memory Usage: 16 MB, less than 93.37% of Python3 online submissions for Diameter of Binary Tree.
"""
from typing import List
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def diameterOfBinaryTree(self, root: Optional[TreeNode]) -> int:
if (root is None) or (root.left is None and root.right is None):
return 0
elif root.left is None:
return max(self.diameterOfBinaryTree(root.right), self.depthOfBinaryTree(root.right)+1)
elif root.right is None:
return max(self.diameterOfBinaryTree(root.left), self.depthOfBinaryTree(root.left) + 1)
else:
l_dia = self.diameterOfBinaryTree(root.left)
r_dia = self.diameterOfBinaryTree(root.right)
l_dep = self.depthOfBinaryTree(root.left)
r_dep = self.depthOfBinaryTree(root.right)
choices = [l_dia, r_dia, l_dep + r_dep + 2]
return max(choices)
def depthOfBinaryTree(self, root: Optional[TreeNode]) -> int:
if (root is None) or (root.left is None and root.right is None):
return 0
elif root.left is None:
return self.depthOfBinaryTree(root.right) + 1
elif root.right is None:
return self.depthOfBinaryTree(root.left) + 1
else:
max_d = max(self.depthOfBinaryTree(root.right), self.depthOfBinaryTree(root.left))
return max_d + 1
def main():
sol = Solution()
t = TreeNode(1, TreeNode(2, TreeNode(4), TreeNode(5)), TreeNode(3))
print('Output:', sol.diameterOfBinaryTree(t))
print('Expected:', 3)
if __name__ == "__main__":
main()
| 38.285714
| 99
| 0.637527
|
a81eee5a35567b4f49467a43fb07a1f130595f4f
| 10,963
|
py
|
Python
|
runstats/core.py
|
cimeister/runstats
|
81bb3f7eb8dc1adc197dc3761c28dcc46fc76976
|
[
"Apache-2.0"
] | 1
|
2020-12-07T11:23:01.000Z
|
2020-12-07T11:23:01.000Z
|
runstats/core.py
|
cimeister/runstats
|
81bb3f7eb8dc1adc197dc3761c28dcc46fc76976
|
[
"Apache-2.0"
] | null | null | null |
runstats/core.py
|
cimeister/runstats
|
81bb3f7eb8dc1adc197dc3761c28dcc46fc76976
|
[
"Apache-2.0"
] | null | null | null |
"""Python RunStats
Compute Statistics and Regression in a single pass.
"""
from __future__ import division
class Statistics(object):
"""Compute statistics in a single pass.
# pylint: disable=too-many-instance-attributes
Computes the minimum, maximum, mean, variance, standard deviation,
skewness, and kurtosis.
Statistics objects may also be added together and copied.
Based entirely on the C++ code by John D Cook at
http://www.johndcook.com/skewness_kurtosis.html
"""
def __init__(self, iterable=()):
"""Initialize Statistics object.
Iterates optional parameter `iterable` and pushes each value into the
statistics summary.
"""
self.clear()
for value in iterable:
self.push(value)
def clear(self):
"""Clear Statistics object."""
self._count = self._eta = self._rho = self._rho2 \
= self._max_offset = self._tau = self._phi = 0.0
self._min = self._max = float('nan')
self._last = None
def __eq__(self, that):
return self.get_state() == that.get_state()
def __ne__(self, that):
return self.get_state() != that.get_state()
def get_state(self):
"""Get internal state."""
return (
self._count,
self._eta,
self._rho,
self._rho2,
self._tau,
self._phi,
self._min,
self._max,
self._max_offset
)
def set_state(self, state):
"""Set internal state."""
(
self._count,
self._eta,
self._rho,
self._rho2,
self._tau,
self._phi,
self._min,
self._max,
self._max_offset
) = state
@classmethod
def fromstate(cls, state):
"""Return Statistics object from state."""
stats = cls()
stats.set_state(state)
return stats
def __reduce__(self):
return make_statistics, (self.get_state(),)
def copy(self, _=None):
"""Copy Statistics object."""
return self.fromstate(self.get_state())
__copy__ = copy
__deepcopy__ = copy
def __len__(self):
"""Number of values that have been pushed."""
return int(self._count)
def push(self, value, cur_max=None):
"""Add `value` to the Statistics summary."""
value = float(value)
if self._count == 0.0:
self._min = value
self._max = value
else:
self._min = min(self._min, value)
self._max = max(self._max, value)
delta = value - self._eta
delta_n = delta / (self._count + 1)
delta_n2 = delta_n * delta_n
term = delta * delta_n * self._count
self._count += 1
self._eta += delta_n
self._phi += (
term * delta_n2 * (self._count ** 2 - 3 * self._count + 3)
+ 6 * delta_n2 * self._rho
- 4 * delta_n * self._tau
)
self._tau += (
term * delta_n * (self._count - 2)
- 3 * delta_n * self._rho
)
self._rho += term
#additions
if self._last is not None:
self._rho2 += (value - self._last)**2
self._last = value
if cur_max is not None:
self._max_offset += (value - cur_max)**2
def minimum(self):
"""Minimum of values."""
return self._min
def maximum(self):
"""Maximum of values."""
return self._max
def mean(self):
"""Mean of values."""
return self._eta
def max_offset(self):
"""Mean of values."""
return self._max_offset / self._count
def local_variance(self, ddof=1.0):
"""Variance of values (with `ddof` degrees of freedom)."""
return self._rho2 / (self._count - ddof)
def variance(self, ddof=1.0):
"""Variance of values (with `ddof` degrees of freedom)."""
return self._rho / (self._count - ddof)
def stddev(self, ddof=1.0):
"""Standard deviation of values (with `ddof` degrees of freedom)."""
return self.variance(ddof) ** 0.5
def skewness(self):
"""Skewness of values."""
return (self._count ** 0.5) * self._tau / pow(self._rho, 1.5)
def kurtosis(self):
"""Kurtosis of values."""
return self._count * self._phi / (self._rho * self._rho) - 3.0
def __add__(self, that):
"""Add two Statistics objects together."""
sigma = self.copy()
sigma += that
return sigma
def __iadd__(self, that):
"""Add another Statistics object to this one."""
sum_count = self._count + that._count
if sum_count == 0:
return self
delta = that._eta - self._eta
delta2 = delta ** 2
delta3 = delta ** 3
delta4 = delta ** 4
sum_eta = (
(self._count * self._eta + that._count * that._eta)
/ sum_count
)
sum_rho = (
self._rho + that._rho
+ delta2 * self._count * that._count / sum_count
)
sum_rho2 = (
self._rho2 + that._rho2
)
sum_tau = (
self._tau + that._tau
+ delta3 * self._count * that._count
* (self._count - that._count) / (sum_count ** 2)
+ 3.0 * delta
* (self._count * that._rho - that._count * self._rho) / sum_count
)
sum_phi = (
self._phi + that._phi
+ delta4 * self._count * that._count
* (self._count ** 2 - self._count * that._count + that._count ** 2)
/ (sum_count ** 3)
+ 6.0 * delta2 * (
self._count * self._count * that._rho
+ that._count * that._count * self._rho
)
/ (sum_count ** 2)
+ 4.0 * delta
* (self._count * that._tau - that._count * self._tau) / sum_count
)
if self._count == 0.0:
self._min = that._min
self._max = that._max
elif that._count != 0.0:
self._min = min(self._min, that._min)
self._max = max(self._max, that._max)
self._count = sum_count
self._eta = sum_eta
self._rho = sum_rho
self._rho2 = sum_rho2
self._tau = sum_tau
self._phi = sum_phi
return self
def __mul__(self, that):
"""Multiply by a scalar to change Statistics weighting."""
sigma = self.copy()
sigma *= that
return sigma
__rmul__ = __mul__
def __imul__(self, that):
"""Multiply by a scalar to change Statistics weighting in-place."""
that = float(that)
self._count *= that
self._rho *= that
self._rho2 *= that
self._tau *= that
self._phi *= that
return self
def make_statistics(state):
"""Make Statistics object from state."""
return Statistics.fromstate(state)
class Regression(object):
"""
Compute simple linear regression in a single pass.
Computes the slope, intercept, and correlation.
Regression objects may also be added together and copied.
Based entirely on the C++ code by John D Cook at
http://www.johndcook.com/running_regression.html
"""
def __init__(self, iterable=()):
"""Initialize Regression object.
Iterates optional parameter `iterable` and pushes each pair into the
regression summary.
"""
self._xstats = Statistics()
self._ystats = Statistics()
self.clear()
for xcoord, ycoord in iterable:
self.push(xcoord, ycoord)
def __eq__(self, that):
return self.get_state() == that.get_state()
def __ne__(self, that):
return self.get_state() != that.get_state()
def clear(self):
"""Clear Regression object."""
self._xstats.clear()
self._ystats.clear()
self._count = self._sxy = 0.0
def get_state(self):
"""Get internal state."""
return (
self._count, self._sxy, self._xstats.get_state(),
self._ystats.get_state()
)
def set_state(self, state):
"""Set internal state."""
count, sxy, xstats, ystats = state
self._count = count
self._sxy = sxy
self._xstats.set_state(xstats)
self._ystats.set_state(ystats)
@classmethod
def fromstate(cls, state):
"""Return Regression object from state."""
regr = cls()
regr.set_state(state)
return regr
def __reduce__(self):
return make_regression, (self.get_state(),)
def copy(self, _=None):
"""Copy Regression object."""
return self.fromstate(self.get_state())
__copy__ = copy
__deepcopy__ = copy
def __len__(self):
"""Number of values that have been pushed."""
return int(self._count)
def push(self, xcoord, ycoord):
"""Add a pair `(x, y)` to the Regression summary."""
self._sxy += (
(self._xstats.mean() - xcoord)
* (self._ystats.mean() - ycoord)
* self._count
/ (self._count + 1)
)
self._xstats.push(xcoord)
self._ystats.push(ycoord)
self._count += 1
def slope(self, ddof=1.0):
"""Slope of values (with `ddof` degrees of freedom)."""
sxx = self._xstats.variance(ddof) * (self._count - ddof)
return self._sxy / sxx
def intercept(self, ddof=1.0):
"""Intercept of values (with `ddof` degrees of freedom)."""
return self._ystats.mean() - self.slope(ddof) * self._xstats.mean()
def correlation(self, ddof=1.0):
"""Correlation of values (with `ddof` degrees of freedom)."""
term = self._xstats.stddev(ddof) * self._ystats.stddev(ddof)
return self._sxy / ((self._count - ddof) * term)
def __add__(self, that):
"""Add two Regression objects together."""
sigma = self.copy()
sigma += that
return sigma
def __iadd__(self, that):
"""Add another Regression object to this one."""
sum_count = self._count + that._count
if sum_count == 0:
return self
sum_xstats = self._xstats + that._xstats
sum_ystats = self._ystats + that._ystats
deltax = that._xstats.mean() - self._xstats.mean()
deltay = that._ystats.mean() - self._ystats.mean()
sum_sxy = (
self._sxy + that._sxy
+ self._count * that._count * deltax * deltay / sum_count
)
self._count = sum_count
self._xstats = sum_xstats
self._ystats = sum_ystats
self._sxy = sum_sxy
return self
def make_regression(state):
"""Make Regression object from state."""
return Regression.fromstate(state)
| 27.966837
| 79
| 0.551035
|
7f5534c8573bae5036dbd860f93da0aea314de22
| 21,151
|
py
|
Python
|
com/vmware/nsx_policy/infra/deployment_zones_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/nsx_policy/infra/deployment_zones_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/nsx_policy/infra/deployment_zones_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx_policy.infra.deployment_zones.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class EnforcementPoints(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.deployment_zones.enforcement_points'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _EnforcementPointsStub)
def delete(self,
deployment_zone_id,
enforcementpoint_id,
):
"""
Delete EnforcementPoint
:type deployment_zone_id: :class:`str`
:param deployment_zone_id: Deployment zone id (required)
:type enforcementpoint_id: :class:`str`
:param enforcementpoint_id: enforcementpoint-id (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'deployment_zone_id': deployment_zone_id,
'enforcementpoint_id': enforcementpoint_id,
})
def get(self,
deployment_zone_id,
enforcementpoint_id,
):
"""
Read an Enforcement Point
:type deployment_zone_id: :class:`str`
:param deployment_zone_id: Deployment zone id (required)
:type enforcementpoint_id: :class:`str`
:param enforcementpoint_id: EnforcementPoint id (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.EnforcementPoint`
:return: com.vmware.nsx_policy.model.EnforcementPoint
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'deployment_zone_id': deployment_zone_id,
'enforcementpoint_id': enforcementpoint_id,
})
def list(self,
deployment_zone_id,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of all enforcementpoints for infra.
:type deployment_zone_id: :class:`str`
:param deployment_zone_id: (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.EnforcementPointListResult`
:return: com.vmware.nsx_policy.model.EnforcementPointListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'deployment_zone_id': deployment_zone_id,
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
deployment_zone_id,
enforcementpoint_id,
enforcement_point,
):
"""
If the passed Enforcement Point does not already exist, create a new
Enforcement Point. If it already exists, patch it.
:type deployment_zone_id: :class:`str`
:param deployment_zone_id: Deployment zone id (required)
:type enforcementpoint_id: :class:`str`
:param enforcementpoint_id: EnforcementPoint id (required)
:type enforcement_point: :class:`com.vmware.nsx_policy.model_client.EnforcementPoint`
:param enforcement_point: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'deployment_zone_id': deployment_zone_id,
'enforcementpoint_id': enforcementpoint_id,
'enforcement_point': enforcement_point,
})
def update(self,
deployment_zone_id,
enforcementpoint_id,
enforcement_point,
):
"""
If the passed Enforcement Point does not already exist, create a new
Enforcement Point. If it already exists, replace it.
:type deployment_zone_id: :class:`str`
:param deployment_zone_id: Deployment zone id (required)
:type enforcementpoint_id: :class:`str`
:param enforcementpoint_id: EnforcementPoint id (required)
:type enforcement_point: :class:`com.vmware.nsx_policy.model_client.EnforcementPoint`
:param enforcement_point: (required)
:rtype: :class:`com.vmware.nsx_policy.model_client.EnforcementPoint`
:return: com.vmware.nsx_policy.model.EnforcementPoint
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'deployment_zone_id': deployment_zone_id,
'enforcementpoint_id': enforcementpoint_id,
'enforcement_point': enforcement_point,
})
class _EnforcementPointsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'deployment_zone_id': type.StringType(),
'enforcementpoint_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/deployment-zones/{deployment-zone-id}/enforcement-points/{enforcementpoint-id}',
path_variables={
'deployment_zone_id': 'deployment-zone-id',
'enforcementpoint_id': 'enforcementpoint-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'deployment_zone_id': type.StringType(),
'enforcementpoint_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/deployment-zones/{deployment-zone-id}/enforcement-points/{enforcementpoint-id}',
path_variables={
'deployment_zone_id': 'deployment-zone-id',
'enforcementpoint_id': 'enforcementpoint-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'deployment_zone_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/deployment-zones/{deployment-zone-id}/enforcement-points',
path_variables={
'deployment_zone_id': 'deployment-zone-id',
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'deployment_zone_id': type.StringType(),
'enforcementpoint_id': type.StringType(),
'enforcement_point': type.ReferenceType('com.vmware.nsx_policy.model_client', 'EnforcementPoint'),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/deployment-zones/{deployment-zone-id}/enforcement-points/{enforcementpoint-id}',
request_body_parameter='enforcement_point',
path_variables={
'deployment_zone_id': 'deployment-zone-id',
'enforcementpoint_id': 'enforcementpoint-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'deployment_zone_id': type.StringType(),
'enforcementpoint_id': type.StringType(),
'enforcement_point': type.ReferenceType('com.vmware.nsx_policy.model_client', 'EnforcementPoint'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/deployment-zones/{deployment-zone-id}/enforcement-points/{enforcementpoint-id}',
request_body_parameter='enforcement_point',
path_variables={
'deployment_zone_id': 'deployment-zone-id',
'enforcementpoint_id': 'enforcementpoint-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'EnforcementPoint'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'EnforcementPointListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'EnforcementPoint'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.deployment_zones.enforcement_points',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'EnforcementPoints': EnforcementPoints,
}
| 44.622363
| 127
| 0.604463
|
3c8fe959d77775122bfbb76d80b4ae1c2ccf6e8c
| 4,928
|
py
|
Python
|
tests/test_valid_tzdatetimefield.py
|
heavenshell/django-timezone-utils
|
e6a3da069dd3490135c57dc8ca94877e03026ac5
|
[
"MIT"
] | 28
|
2015-02-06T01:41:15.000Z
|
2020-12-27T19:23:18.000Z
|
tests/test_valid_tzdatetimefield.py
|
heavenshell/django-timezone-utils
|
e6a3da069dd3490135c57dc8ca94877e03026ac5
|
[
"MIT"
] | 15
|
2015-02-06T01:41:15.000Z
|
2022-02-16T02:04:17.000Z
|
tests/test_valid_tzdatetimefield.py
|
heavenshell/django-timezone-utils
|
e6a3da069dd3490135c57dc8ca94877e03026ac5
|
[
"MIT"
] | 10
|
2015-02-06T01:57:08.000Z
|
2022-02-14T10:46:38.000Z
|
# ==============================================================================
# IMPORTS
# ==============================================================================
# Python
from datetime import datetime
import pytz
# Django
from django.conf import settings
from django.test import TestCase
from django.utils.timezone import make_aware
# App
from tests.models import TZWithGoodStringDefault
from .models import (ModelWithDateTimeOnly, CallableTimeStampedModel,
StaticTimeStampedModel, ModelWithForeignKeyToTimeZone,
NullModelWithDateTimeOnly, ModelWithLocalTimeZone,
ModelWithLocalTZCharField, TZTimeFramedModel)
# ==============================================================================
# TESTS: Valid ateTimeWithTimeZoneField
# ==============================================================================
class DateTimeWithTimeZoneFieldTestCase(TestCase):
def setUp(self):
ModelWithDateTimeOnly.objects.create()
CallableTimeStampedModel.objects.create()
StaticTimeStampedModel.objects.create()
NullModelWithDateTimeOnly.objects.create()
ModelWithLocalTimeZone.objects.create()
location = TZWithGoodStringDefault.objects.create()
ModelWithForeignKeyToTimeZone.objects.create(other_model=location)
TZTimeFramedModel.objects.create(
start=datetime(2014, 1, 1),
end=make_aware(datetime(2014, 12, 31), pytz.timezone('US/Eastern')),
other_model=location
)
def test_that_model_timestamp_is_unaltered(self):
"""Make sure that we aren't modifying the timezone if one is not
provided.
"""
model_instance = ModelWithDateTimeOnly.objects.get()
self.assertEquals(
model_instance.timestamp,
settings.TEST_DATETIME
)
def test_callable_timestamp_time_override(self):
model_instance = CallableTimeStampedModel.objects.get()
self.assertEquals(
model_instance.start.astimezone(
pytz.timezone(settings.TIME_ZONE)
).time(),
datetime.min.time(),
'Start time does not match datetime.min.time().'
)
self.assertEquals(
model_instance.end.astimezone(
pytz.timezone(settings.TIME_ZONE)
).time(),
datetime.max.time(),
'End time does not match datetime.max.time().'
)
def test_datetime_time_timestamp_override(self):
model_instance = StaticTimeStampedModel.objects.get()
tz = pytz.timezone(settings.TIME_ZONE)
start_time = tz.normalize(
make_aware(datetime(2014, 1, 1, 0, 0, 0, 0), tz)
)
end_time = tz.normalize(
make_aware(datetime(2014, 1, 1, 23, 59, 59, 999999), tz)
)
self.assertEquals(
model_instance.start,
start_time,
'Start time != datetime.min.time(): ({0} != {1})'.format(
repr(model_instance.start),
repr(start_time)
)
)
self.assertEquals(
model_instance.end,
end_time,
'End time != datetime.max.time(): ({0} != {1})'.format(
repr(model_instance.end),
repr(end_time)
)
)
def test_populate_from_foreignkey_timezone(self):
model_instance = ModelWithForeignKeyToTimeZone.objects.get()
self.assertEqual(
model_instance.timestamp,
settings.TEST_DATETIME,
)
def test_populate_from_local_timezone(self):
model_instance = ModelWithLocalTimeZone.objects.get()
self.assertEqual(
model_instance.timestamp,
settings.TEST_DATETIME,
)
def test_populate_from_local_timezone_charfield(self):
model_instance = ModelWithLocalTZCharField.objects.create()
self.assertEqual(
model_instance.timestamp,
settings.TEST_DATETIME
)
def test_to_python_conversion(self):
model_instance = ModelWithForeignKeyToTimeZone.objects.get()
self.assertEqual(
model_instance.timestamp,
settings.TEST_DATETIME
)
self.assertEqual(
model_instance.timestamp.tzinfo,
pytz.timezone('US/Eastern').normalize(
model_instance.timestamp
).tzinfo
)
self.assertEqual(
str(model_instance.timestamp),
'2013-12-31 19:00:00-05:00'
)
def test_full_overrides(self):
model_instance = TZTimeFramedModel.objects.get()
self.assertEqual(
str(model_instance.start),
'2014-01-01 00:00:00-05:00'
)
self.assertEqual(
str(model_instance.end),
'2014-12-31 23:59:59.999999-05:00'
)
| 33.753425
| 80
| 0.576705
|
1199940b95d378f5b87c8ff37e6af5beb2ef0b3b
| 4,465
|
py
|
Python
|
examples/surname_classification_with_cnn/surname_dataset.py
|
Jochen-M/pytorch_nlp
|
75ffbe60d1a9c383981396f346c6dcbabbb9e5d7
|
[
"Apache-2.0"
] | 1
|
2019-11-21T13:07:41.000Z
|
2019-11-21T13:07:41.000Z
|
examples/surname_classification_with_cnn/surname_dataset.py
|
Jochen-M/pytorch_nlp
|
75ffbe60d1a9c383981396f346c6dcbabbb9e5d7
|
[
"Apache-2.0"
] | null | null | null |
examples/surname_classification_with_cnn/surname_dataset.py
|
Jochen-M/pytorch_nlp
|
75ffbe60d1a9c383981396f346c6dcbabbb9e5d7
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf8
import json
import torch
import pandas as pd
from torch.utils.data import Dataset
from examples.surname_classification_with_cnn.surname_vectorizer import SurnameVectorizer
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
:param surname_df (pandas.DataFrame): the dataset
:param vectorizer (SurnameVectorizer): vectorizer instantiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split == "train"]
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split == "val"]
self.val_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split == "test"]
self.test_size = len(self.test_df)
self._lookup_dict = {"train": (self.train_df, self.train_size),
"val": (self.val_df, self.val_size),
"test": (self.test_df, self.test_size)}
self.set_split("train")
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
""" Load dataset and make a new vectorizer from scratch
:param surname_csv (str): location of the dataset
:return an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
return cls(surname_df, SurnameVectorizer.from_dataframe(surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
""" Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
:param surname_csv (str): location of the dataset
:param vectorizer_filepath (str): location of the saved vectorizer
:return an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer(vectorizer_filepath):
""" A static method for loading the vectorizer from file
:param vectorizer_filepath (str): the location of the serialized vectorizer
:return an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
""" Saves the vectorizer to disk using json
:param vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
"""
:return the vectorizer
"""
return self._vectorizer
def set_split(self, split="train"):
""" Selects the splits in the dataset using a column in the dataframe
:param split (str): one of "train", "val", or "test"
:return
"""
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
""" The primary entry point method for PyTorch datasets
:param index (int): the index to the data point
:return a dictionary holding the data point's features (x_data) and label (y_target)
"""
row = self._target_df.iloc[index]
surname_matrix = self._vectorizer.vectorize(row.surname)
nationality_index = self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {"x_surname": surname_matrix,
"y_nationality": nationality_index}
def get_num_batches(self, batch_size):
""" Given a batch size, return the number of batches in the dataset
:param batch_size (int)
:return number of batches in the dataset
"""
return len(self) // batch_size
| 36.300813
| 92
| 0.663382
|
37c7c6653da52bab1d4dd0c31c7b66f987f0f2ae
| 890
|
py
|
Python
|
wdfn-server/waterdata/services/sifta.py
|
skaymen/waterdataui
|
3f419360771a007f8b441d631c782cd67a4e78b4
|
[
"CC0-1.0"
] | null | null | null |
wdfn-server/waterdata/services/sifta.py
|
skaymen/waterdataui
|
3f419360771a007f8b441d631c782cd67a4e78b4
|
[
"CC0-1.0"
] | null | null | null |
wdfn-server/waterdata/services/sifta.py
|
skaymen/waterdataui
|
3f419360771a007f8b441d631c782cd67a4e78b4
|
[
"CC0-1.0"
] | null | null | null |
"""
Helpers to retrieve SIFTA cooperator data.
"""
from waterdata import app
from waterdata.utils import execute_get_request
def get_cooperators(site_no, district_cd):
"""
Gets the cooperator data from a json file, currently a feature toggle, and limited to district codes 20 and 51
:param site_no: USGS site number
:param district_cd: the district code of the monitoring location
"""
# Handle feature flag for cooperator data
if not app.config['COOPERATOR_LOOKUP_ENABLED'] or (
app.config['COOPERATOR_LOOKUP_ENABLED'] is not True and
district_cd not in app.config['COOPERATOR_LOOKUP_ENABLED']):
return []
url = app.config['COOPERATOR_SERVICE_PATTERN'].format(site_no=site_no)
response = execute_get_request(url)
if response.status_code != 200:
return []
return response.json().get('Customers', [])
| 30.689655
| 114
| 0.708989
|
43e229010da2ae4bc7eb6e09e7f0b3b191ea1a4f
| 21,552
|
py
|
Python
|
python-package/basedosdados/cli/cli.py
|
ProfLeao/mais
|
364baa36a21d1189be594ecb9915f6fcc73c8990
|
[
"MIT"
] | 1
|
2021-02-25T04:04:23.000Z
|
2021-02-25T04:04:23.000Z
|
python-package/basedosdados/cli/cli.py
|
ProfLeao/mais
|
364baa36a21d1189be594ecb9915f6fcc73c8990
|
[
"MIT"
] | null | null | null |
python-package/basedosdados/cli/cli.py
|
ProfLeao/mais
|
364baa36a21d1189be594ecb9915f6fcc73c8990
|
[
"MIT"
] | null | null | null |
import os
import time
import basedosdados as bd
import click
from basedosdados.upload.base import Base
from basedosdados.upload.dataset import Dataset
from basedosdados.upload.storage import Storage
from basedosdados.upload.table import Table
@click.group()
@click.option("--templates", default=None, help="Templates path")
@click.option("--bucket_name", default=None, help="Project bucket name")
@click.option("--metadata_path", default=None, help="Folder to store metadata")
@click.pass_context
def cli(ctx, templates, bucket_name, metadata_path):
ctx.obj = dict(
templates=templates,
bucket_name=bucket_name,
metadata_path=metadata_path,
)
@click.group(name="dataset")
@click.pass_context
def cli_dataset(ctx):
pass
@cli_dataset.command(name="init", help="Initialize metadata files of dataset")
@click.argument("dataset_id")
@click.option(
"--replace",
is_flag=True,
help="Whether to replace current metadata files",
)
@click.pass_context
def init_dataset(ctx, dataset_id, replace):
d = Dataset(dataset_id=dataset_id, **ctx.obj).init(replace=replace)
click.echo(
click.style(
f"Dataset `{dataset_id}` folder and metadata were created at {d.metadata_path}",
fg="green",
)
)
def mode_text(mode, verb, obj_id):
if mode == "all":
text = f"Datasets `{obj_id}` and `{obj_id}_staging` were {verb} in BigQuery"
elif mode == "staging":
text = f"Dataset `{obj_id}_stating` was {verb} in BigQuery"
elif mode == "prod":
text = f"Dataset `{obj_id}` was {verb} in BigQuery"
return text
@cli_dataset.command(name="create", help="Create dataset on BigQuery")
@click.argument("dataset_id")
@click.option(
"--mode", "-m", default="all", help="What datasets to create [all|staging|prod]"
)
@click.option(
"--if_exists",
default="raise",
help="[raise|update|replace|pass] if dataset alread exists",
)
@click.pass_context
def create_dataset(ctx, dataset_id, mode, if_exists):
Dataset(dataset_id=dataset_id, **ctx.obj).create(mode=mode, if_exists=if_exists)
click.echo(
click.style(
mode_text(mode, "created", dataset_id),
fg="green",
)
)
@cli_dataset.command(name="update", help="Update dataset on BigQuery")
@click.argument("dataset_id")
@click.option(
"--mode", "-m", default="all", help="What datasets to create [all|staging|prod]"
)
@click.pass_context
def update_dataset(ctx, dataset_id, mode):
Dataset(dataset_id=dataset_id, **ctx.obj).update(mode=mode)
click.echo(
click.style(
mode_text(mode, "updated", dataset_id),
fg="green",
)
)
@cli_dataset.command(name="publicize", help="Make a dataset public")
@click.argument("dataset_id")
@click.pass_context
def publicize_dataset(ctx, dataset_id):
Dataset(dataset_id=dataset_id, **ctx.obj).publicize()
click.echo(
click.style(
f"Dataset `{dataset_id}` became public!",
fg="green",
)
)
@cli_dataset.command(name="delete", help="Delete dataset")
@click.argument("dataset_id")
@click.option(
"--mode", "-m", default="all", help="What datasets to create [all|staging|prod]"
)
@click.pass_context
def delete_dataset(ctx, dataset_id, mode):
if click.confirm(f"Are you sure you want to delete `{dataset_id}`?"):
Dataset(dataset_id=dataset_id, **ctx.obj).delete(mode=mode)
click.echo(
click.style(
mode_text(mode, "deleted", dataset_id),
fg="green",
)
)
@click.group(name="table")
def cli_table():
pass
@cli_table.command(name="init", help="Create metadata files")
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--data_sample_path",
default=None,
help="Sample data used to pre-fill metadata",
type=click.Path(exists=True),
)
@click.option(
"--if_folder_exists",
default="raise",
help="[raise|replace|pass] actions if table folder exists",
)
@click.option(
"--if_table_config_exists",
default="raise",
help="[raise|replace|pass] actions if table config files already exist",
)
@click.option(
"--columns_config_url",
default=None,
help="google sheets URL. Must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>. The sheet must contain the column name: 'coluna' and column description: 'descricao'.",
)
@click.pass_context
def init_table(
ctx,
dataset_id,
table_id,
data_sample_path,
if_folder_exists,
if_table_config_exists,
columns_config_url,
):
t = Table(table_id=table_id, dataset_id=dataset_id, **ctx.obj).init(
data_sample_path=data_sample_path,
if_folder_exists=if_folder_exists,
if_table_config_exists=if_table_config_exists,
columns_config_url=columns_config_url,
)
click.echo(
click.style(
f"Table `{table_id}` folder and metadata were created at {t.metadata_path}{dataset_id}",
fg="green",
)
)
@cli_table.command(name="create", help="Create stagging table in BigQuery")
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--path",
"-p",
type=click.Path(exists=True),
default=None,
help="Path of data folder or file.",
)
@click.option(
"--job_config_params", default=None, help="File to advanced load config params "
)
@click.option(
"--if_table_exists",
default="raise",
help="[raise|replace|pass] actions if table exists",
)
@click.option(
"--force_dataset",
default=True,
help="Whether to automatically create the dataset folders and in BigQuery",
)
@click.option(
"--if_storage_data_exists",
default="raise",
help="[raise|replace|pass] actions if table data already exists at Storage",
)
@click.option(
"--if_table_config_exists",
default="raise",
help="[raise|replace|pass] actions if table config files already exist",
)
@click.option(
"--columns_config_url",
default=None,
help="google sheets URL. Must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>. The sheet must contain the column name: 'coluna' and column description: 'descricao'.",
)
@click.pass_context
def create_table(
ctx,
dataset_id,
table_id,
path,
job_config_params,
if_table_exists,
force_dataset,
if_storage_data_exists,
if_table_config_exists,
columns_config_url,
):
Table(table_id=table_id, dataset_id=dataset_id, **ctx.obj).create(
path=path,
job_config_params=job_config_params,
if_table_exists=if_table_exists,
force_dataset=force_dataset,
if_storage_data_exists=if_storage_data_exists,
if_table_config_exists=if_table_config_exists,
columns_config_url=columns_config_url,
)
click.echo(
click.style(
f"Table `{dataset_id}_staging.{table_id}` was created in BigQuery",
fg="green",
)
)
@cli_table.command(name="update", help="Update tables in BigQuery")
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--mode",
default="all",
help="Choose a table from a dataset to update [all|staging|prod]",
)
@click.pass_context
def update_table(ctx, dataset_id, table_id, mode):
Table(table_id=table_id, dataset_id=dataset_id, **ctx.obj).update(
mode=mode,
)
click.echo(
click.style(
f"All tables `{dataset_id}*.{table_id}` were updated in BigQuery",
fg="green",
)
)
@cli_table.command(
name="update_columns", help="Update columns descriptions in tables_config.yaml "
)
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--columns_config_url",
default=None,
help="google sheets URL. Must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>. The sheet must contain the column name: 'coluna' and column description: 'descricao'.",
)
@click.pass_context
def update_columns(ctx, dataset_id, table_id, columns_config_url):
Table(table_id=table_id, dataset_id=dataset_id, **ctx.obj).update_columns(
columns_config_url=columns_config_url,
)
click.echo(
click.style(
f"All columns descriptions `{dataset_id}*.{table_id}` were updated in table_config.yaml",
fg="green",
)
)
@cli_table.command(name="publish", help="Publish staging table to prod")
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--if_exists",
default="raise",
help="[raise|replace] actions if table exists",
)
@click.pass_context
def publish_table(ctx, dataset_id, table_id, if_exists):
Table(table_id=table_id, dataset_id=dataset_id, **ctx.obj).publish(
if_exists=if_exists,
)
click.echo(
click.style(
f"Table `{dataset_id}.{table_id}` was published in BigQuery",
fg="green",
)
)
@cli_table.command(name="delete", help="Delete BigQuery table")
@click.argument("dataset_id")
@click.argument("table_id")
@click.option("--mode", help="Which table to delete [all|prod|staging]", required=True)
@click.pass_context
def delete_table(ctx, dataset_id, table_id, mode):
Table(table_id=table_id, dataset_id=dataset_id, **ctx.obj).delete(
mode=mode,
)
@cli_table.command(name="append", help="Append new data to existing table")
@click.argument("dataset_id")
@click.argument("table_id")
@click.argument("filepath", type=click.Path(exists=True))
@click.option("--partitions", help="Data partition as `value=key/value2=key2`")
@click.option(
"--if_exists",
default="raise",
help="[raise|replace|pass] if file alread exists",
)
@click.pass_context
def upload_table(ctx, dataset_id, table_id, filepath, partitions, if_exists):
blob_name = Table(table_id=table_id, dataset_id=dataset_id, **ctx.obj).append(
filepath=filepath, partitions=partitions, if_exists=if_exists
)
click.echo(
click.style(
f"Data was added to `{dataset_id}.{table_id}`",
fg="green",
)
)
@click.group(name="storage")
def cli_storage():
pass
@cli_storage.command(name="init", help="Create bucket and initial folders")
@click.option("--bucket_name", default="basedosdados", help="Bucket name")
@click.option(
"--replace",
is_flag=True,
help="Whether to replace current bucket files",
)
@click.option(
"--very-sure/--not-sure",
default=False,
help="Are you sure that you want to replace current bucket files?",
)
@click.pass_context
def init_storage(ctx, bucket_name, replace, very_sure):
# TODO: Create config file to store bucket_name, etc...
ctx.obj.pop("bucket_name")
Storage(bucket_name=bucket_name, **ctx.obj).init(
replace=replace, very_sure=very_sure
)
click.echo(
click.style(
f"Bucket `{bucket_name}` was created",
fg="green",
)
)
@cli_storage.command(name="upload", help="Upload file to bucket")
@click.argument("dataset_id")
@click.argument("table_id")
@click.argument("filepath", type=click.Path(exists=True))
@click.option(
"--mode", "-m", required=True, help="[raw|staging] where to save the file"
)
@click.option("--partitions", help="Data partition as `value=key/value2=key2`")
@click.option(
"--if_exists",
default="raise",
help="[raise|replace|pass] if file alread exists",
)
@click.pass_context
def upload_storage(ctx, dataset_id, table_id, filepath, mode, partitions, if_exists):
ctx.obj.pop("bucket_name")
blob_name = Storage(dataset_id, table_id, **ctx.obj).upload(
filepath=filepath, mode=mode, partitions=partitions, if_exists=if_exists
)
click.echo(
click.style(
f"Data was added to `{blob_name}`",
fg="green",
)
)
@cli_storage.command(name="download", help="Download file from bucket")
@click.argument("dataset_id")
@click.argument("table_id")
@click.argument("savepath", type=click.Path(exists=True))
@click.option(
"--filename",
"-f",
default="*",
help="filename to download single file. If * downloads all files from bucket folder",
)
@click.option(
"--mode", "-m", default="raw", help="[raw|staging] where to download data from"
)
@click.option("--partitions", help="Data partition as `value=key/value2=key2`")
@click.option(
"--if_not_exists",
default="raise",
help="[raise|pass] if file file not found at bucket folder",
)
@click.pass_context
def download_storage(
ctx, dataset_id, table_id, filename, savepath, partitions, mode, if_not_exists
):
Storage(dataset_id, table_id, **ctx.obj).download(
filename, savepath, partitions, mode, if_not_exists
)
click.echo(
click.style(
f"Data was downloaded to `{savepath}`",
fg="green",
)
)
@cli_storage.command(name="delete_table", help="Delete table from bucket")
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--mode",
"-m",
required=True,
default="staging",
help="[raw|staging] where to delete the file from",
)
@click.option(
"--bucket_name",
default=None,
help="Bucket from which to delete data, you can change it to delete from a bucket other than yours",
)
@click.option("--not_found_ok", default=False, help="what to do if table not found")
@click.pass_context
def storage_delete_table(ctx, dataset_id, table_id, mode, not_found_ok, bucket_name):
Storage(dataset_id, table_id, **ctx.obj).delete_table(
mode=mode, not_found_ok=not_found_ok, bucket_name=bucket_name
)
click.echo(
click.style(
f"Data was deleted from bucket `{bucket_name}`",
fg="green",
)
)
@cli_storage.command(name="copy_table", help="Copy table to your bucket")
@click.argument("dataset_id")
@click.argument("table_id")
@click.option("--source_bucket_name", required=True, default="basedosdados")
@click.option(
"--dst_bucket_name",
default=None,
help="Bucket where data will be copied to, defaults to your bucket",
)
@click.option(
"--mode",
"-m",
default="staging",
help="[raw|staging] which bucket folder to get the table",
)
@click.pass_context
def storage_copy_table(
ctx, dataset_id, table_id, source_bucket_name, dst_bucket_name, mode
):
Storage(dataset_id, table_id, **ctx.obj).copy_table(
source_bucket_name=source_bucket_name,
destination_bucket_name=dst_bucket_name,
mode=mode,
)
@click.group(name="list")
def cli_list():
pass
@cli_list.command(name="datasets", help="List datasets available at given project_id")
@click.option(
"--project_id",
default="basedosdados",
help="The project which will be queried. You should have list/read permissions",
)
@click.option(
"--filter_by",
default=None,
help="Filter your search, must be a string",
)
@click.option(
"--with_description",
default=False,
help="[bool]Fetch short description for each dataset",
)
@click.pass_context
def cli_list_datasets(ctx, project_id, filter_by, with_description):
bd.list_datasets(
query_project_id=project_id,
filter_by=filter_by,
with_description=with_description,
)
@cli_list.command(name="dataset_tables", help="List tables available at given dataset")
@click.argument("dataset_id")
@click.option(
"--project_id",
default="basedosdados",
help="The project which will be queried. You should have list/read permissions",
)
@click.option(
"--filter_by",
default=None,
help="Filter your search, must be a string",
)
@click.option(
"--with_description",
default=False,
help="[bool]Fetch short description for each table",
)
@click.pass_context
def cli_list_dataset_tables(ctx, dataset_id, project_id, filter_by, with_description):
bd.list_dataset_tables(
dataset_id=dataset_id,
query_project_id=project_id,
filter_by=filter_by,
with_description=with_description,
)
@click.group(name="get")
def cli_get():
pass
@cli_get.command(
name="dataset_description", help="Get the full description for given dataset"
)
@click.argument("dataset_id")
@click.option(
"--project_id",
default="basedosdados",
help="The project which will be queried. You should have list/read permissions",
)
@click.pass_context
def cli_get_dataset_description(ctx, dataset_id, project_id):
bd.get_dataset_description(
dataset_id=dataset_id,
query_project_id=project_id,
)
@cli_get.command(
name="table_description", help="Get the full description for given table"
)
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--project_id",
default="basedosdados",
help="The project which will be queried. You should have list/read permissions",
)
@click.pass_context
def cli_get_table_description(ctx, dataset_id, table_id, project_id):
bd.get_table_description(
dataset_id=dataset_id,
table_id=table_id,
query_project_id=project_id,
)
@cli_get.command(
name="table_columns",
help="Get fields names,types and description for columns at given table",
)
@click.argument("dataset_id")
@click.argument("table_id")
@click.option(
"--project_id",
default="basedosdados",
help="The project which will be queried. You should have list/read permissions",
)
@click.pass_context
def cli_get_table_columns(
ctx,
dataset_id,
table_id,
project_id,
):
bd.get_table_columns(
dataset_id=dataset_id,
table_id=table_id,
query_project_id=project_id,
)
@click.group(name="config")
def cli_config():
pass
@cli_config.command(name="init", help="Initialize configuration")
@click.option(
"--overwrite",
default=False,
help="Wheteher to overwrite current config",
)
@click.pass_context
def init(ctx, overwrite):
Base(overwrite_cli_config=overwrite, **ctx.obj)
@cli_config.command(name="refresh_template", help="Overwrite current templates")
@click.pass_context
def init_refresh_templates(ctx):
Base(**ctx.obj)._refresh_templates()
@click.command(
name="download",
help="Download data. "
"You can add extra arguments accepted by `pandas.to_csv`.\n\n"
"Examples: --delimiter='|', --index=False",
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
),
)
@click.argument("savepath", type=click.Path(exists=False))
@click.option(
"--dataset_id",
default=None,
help="Dataset_id, enter with table_id to download table",
)
@click.option(
"--table_id",
default=None,
help="Table_id, enter with dataset_id to download table ",
)
@click.option(
"--query",
default=None,
help="A SQL Standard query to download data from BigQuery",
)
@click.option(
"--query_project_id",
default=None,
help="Which project the table lives. You can change this you want to query different projects.",
)
@click.option(
"--billing_project_id",
default=None,
help="Project that will be billed. Find your Project ID here https://console.cloud.google.com/projectselector2/home/dashboard",
)
@click.option(
"--limit",
default=None,
help="Number of rows returned",
)
@click.pass_context
def cli_download(
ctx,
dataset_id,
table_id,
savepath,
query,
query_project_id,
billing_project_id,
limit,
):
pandas_kwargs = dict()
for item in ctx.args:
pandas_kwargs.update([item.replace("--", "").split("=")])
download(
savepath=savepath,
dataset_id=dataset_id,
table_id=table_id,
query=query,
query_project_id=query_project_id,
billing_project_id=billing_project_id,
limit=limit,
**pandas_kwargs,
)
click.echo(
click.style(
f"Table was downloaded to `{savepath}`",
fg="green",
)
)
cli.add_command(cli_dataset)
cli.add_command(cli_table)
cli.add_command(cli_storage)
cli.add_command(cli_config)
cli.add_command(cli_download)
cli.add_command(cli_list)
cli.add_command(cli_get)
def run_bash(command):
stream = os.popen(command)
def set_config_file():
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"):
project_id = input(
"\nWe need to finish setting up your basic enviorinment!\n"
"What is your project id? You should easily find it here: "
"https://console.developers.google.com/cloud-resource-manager?pli=1\n"
"Make sure to copy the ID!\n"
"project_id: "
)
os.popen("gcloud iam service-accounts create basedosdados-cli")
time.sleep(3)
os.popen(
f"""gcloud projects add-iam-policy-binding {project_id} --member "serviceAccount:basedosdados-cli@{project_id}.iam.gserviceaccount.com" --role "roles/owner"
"""
)
time.sleep(3)
os.popen(
f"""gcloud iam service-accounts keys create ~/.basedosdados/iam.json --iam-account basedosdados-cli@{project_id}.iam.gserviceaccount.com"""
)
time.sleep(3)
print(
"\nRun this command and rerun the application:\n"
"export GOOGLE_APPLICATION_CREDENTIALS=~/.basedosdados/iam.json"
)
exit()
# set_config_file()
if __name__ == "__main__":
cli()
| 26.872818
| 211
| 0.673859
|
932d3fa3f91b8994038486156652d6e54a31080e
| 482
|
py
|
Python
|
tests/test_rsp.py
|
giltom/megastone
|
0ef02c724d53acf7a06430f58bc5f777676d78a9
|
[
"MIT"
] | 2
|
2021-04-03T01:55:09.000Z
|
2021-04-12T15:12:10.000Z
|
tests/test_rsp.py
|
giltom/megastone
|
0ef02c724d53acf7a06430f58bc5f777676d78a9
|
[
"MIT"
] | null | null | null |
tests/test_rsp.py
|
giltom/megastone
|
0ef02c724d53acf7a06430f58bc5f777676d78a9
|
[
"MIT"
] | null | null | null |
import pytest
from megastone.rsp import connection
def test_escape():
data = b'}a$#+b*j'
escaped = connection._escape_data(data)
assert len(escaped) == len(data) + 4
assert escaped.count(connection.ESCAPE_BYTE) == 4
assert connection._unescape_data(escaped) == data
def test_unescape():
data = b'}a}bddd}}a'
unescaped = connection._unescape_data(data)
assert len(unescaped) == len(data) - 3
assert unescaped.count(connection.ESCAPE_BYTE) == 0
| 28.352941
| 55
| 0.697095
|
f101e0665ca7e7e98702a9725f431c0326ab86be
| 1,210
|
py
|
Python
|
src/others/logging.py
|
husnain-ali21/ctx-rewriter-for-summ
|
cd172bd934d804aec2c15127fc16276bd60213f4
|
[
"MIT"
] | 22
|
2021-03-10T14:43:13.000Z
|
2022-03-18T02:20:12.000Z
|
src/others/logging.py
|
husnain-ali21/ctx-rewriter-for-summ
|
cd172bd934d804aec2c15127fc16276bd60213f4
|
[
"MIT"
] | 2
|
2021-03-02T09:19:48.000Z
|
2021-03-14T13:22:09.000Z
|
src/others/logging.py
|
husnain-ali21/ctx-rewriter-for-summ
|
cd172bd934d804aec2c15127fc16276bd60213f4
|
[
"MIT"
] | 3
|
2021-09-27T09:39:46.000Z
|
2022-02-05T03:42:06.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
logger = logging.getLogger()
def init_logger(log_file=None, log_file_level=logging.NOTSET):
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
if log_file and log_file != '':
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(log_file_level)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
def set_logger(logger, log_file=None, log_file_level=logging.NOTSET):
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
if log_file and log_file != '':
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(log_file_level)
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
| 32.702703
| 77
| 0.726446
|
941085ac8e0a1044e47bf960dea29a2727972a75
| 1,605
|
py
|
Python
|
salt/returners/cassandra_return.py
|
mattrobenolt/salt
|
e01a4c2d26adc705d8056970777a6313ad10291b
|
[
"Apache-2.0"
] | 4
|
2015-10-06T22:20:27.000Z
|
2017-09-04T08:03:44.000Z
|
salt/returners/cassandra_return.py
|
mika/salt
|
8430482c7177356964c894d161830c94d09f1cab
|
[
"Apache-2.0"
] | null | null | null |
salt/returners/cassandra_return.py
|
mika/salt
|
8430482c7177356964c894d161830c94d09f1cab
|
[
"Apache-2.0"
] | null | null | null |
'''
Return data to a Cassandra ColumnFamily
Here's an example Keyspace / ColumnFamily setup that works with this
returner::
create keyspace salt;
use salt;
create column family returns
with key_validation_class='UTF8Type'
and comparator='UTF8Type'
and default_validation_class='UTF8Type';
Required python modules: pycassa
'''
import logging
try:
import pycassa
has_pycassa = True
except ImportError:
has_pycassa = False
log = logging.getLogger(__name__)
__opts__ = {'cassandra.servers': ['localhost:9160'],
'cassandra.keyspace': 'salt',
'cassandra.column_family': 'returns',
'cassandra.consistency_level': 'ONE'}
def __virtual__():
if not has_pycassa:
return False
return 'cassandra'
def returner(ret):
'''
Return data to a Cassandra ColumnFamily
'''
consistency_level = getattr(pycassa.ConsistencyLevel,
__opts__['cassandra.consistency_level'])
pool = pycassa.ConnectionPool(__opts__['cassandra.keyspace'],
__opts__['cassandra.servers'])
cf = pycassa.ColumnFamily(pool, __opts__['cassandra.column_family'],
write_consistency_level=consistency_level)
columns = {'fun': ret['fun'],
'id': ret['id']}
if isinstance(ret['return'], dict):
for key, value in ret['return'].items():
columns['return.%s' % (key,)] = str(value)
else:
columns['return'] = str(ret['return'])
log.debug(columns)
cf.insert(ret['jid'], columns)
| 25.887097
| 72
| 0.624922
|
562f4e6956c303d0471482bb5d8fe8841ce911a7
| 3,379
|
py
|
Python
|
queue.py
|
deepti-chauhan/Data_Structures_in_python
|
7a3c88fb636b0b69ed880e2da08802e473da657a
|
[
"MIT"
] | null | null | null |
queue.py
|
deepti-chauhan/Data_Structures_in_python
|
7a3c88fb636b0b69ed880e2da08802e473da657a
|
[
"MIT"
] | null | null | null |
queue.py
|
deepti-chauhan/Data_Structures_in_python
|
7a3c88fb636b0b69ed880e2da08802e473da657a
|
[
"MIT"
] | null | null | null |
''' Like stack, queue is a linear data structure that stores items in First In First Out (FIFO) manner.
With a queue the least recently added item is removed first.
A good example of queue is any queue of consumers for a resource where the consumer that came first is served first.'''
class Queue(object):
def __init__(self, size):
self.queue = []
self.size = size
def __str__(self):
myString = ' '.join(str(i) for i in self.queue)
return myString
def enqueue(self, item):
'''This function adds an item to the rear end of the queue '''
if(self.isFull() != True):
self.queue.insert(0, item)
else:
print('Queue is Full!')
def dequeue(self):
''' This function removes an item from the front end of the queue '''
if(self.isEmpty() != True):
return self.queue.pop()
else:
print('Queue is Empty!')
def isEmpty(self):
''' This function checks if the queue is empty '''
return self.queue == []
def isFull(self):
''' This function checks if the queue is full '''
return len(self.queue) == self.size
def peek(self):
''' This function helps to see the first element at the front end of the queue '''
if(self.isEmpty() != True):
return self.queue[-1]
else:
print('Queue is Empty!')
def main():
myQueue = Queue(int(input("Enter size of queue : ")))
while(True):
print(
'------------OPERATIONS-----------\n'
'\t1. enqueue\n'
'\t2. dequeue\n'
'\t3. Front of queue\n'
'\t4. check for empty\n'
'\t5. check for full\n'
'\t6. display Queue\n'
'---------------------------------\n'
)
#for performing certain operations make a choice
ch = int(input('Enter your choice(0 to exit) : '))
print('\n','-'*35)
#breaking condition
if ch == 0:
break
#push operation
elif ch == 1:
e = (input('Enter the element : '))
msg = myQueue.enqueue(e)
if msg == -1:
print('Queue is full item cannot be enqueued!!')
else:
print('item enqueued successfully!!')
#pop operation
elif ch == 2:
msg = myQueue.dequeue()
if msg == -1:
print('Queue is empty item cannot be dequeued!!')
else:
print(' item dequeued successfully!! \n\n\t item dequeued : ',msg)
#peek operation
elif ch == 3:
print('PEEK SUCCESSFUL! \n\n\t : ',myQueue.peek())
#isEmpty operation
elif ch == 4:
print('QUEUE EMPTY ? : ',myQueue.isEmpty())
#isFull operation
elif ch == 5:
print('QUEUE FULL ? : ',myQueue.isFull())
#display operation
elif ch == 6:
print(myQueue)
#default operation
else:
print('INVALID CHOICE!!!')
print('-'*30,'\n')
#---------------------calling main function----------------------#
if __name__ == '__main__':
main()
| 31
| 124
| 0.483575
|
af198764fda7f5c13e34d6a91c029d9a4a8a1427
| 12,030
|
py
|
Python
|
test/testers/winforms/numericupdown-regression.py
|
ABEMBARKA/monoUI
|
5fda266ad2db8f89580a40b525973d86cd8de939
|
[
"MIT"
] | 1
|
2019-08-13T15:22:12.000Z
|
2019-08-13T15:22:12.000Z
|
test/testers/winforms/numericupdown-regression.py
|
ABEMBARKA/monoUI
|
5fda266ad2db8f89580a40b525973d86cd8de939
|
[
"MIT"
] | null | null | null |
test/testers/winforms/numericupdown-regression.py
|
ABEMBARKA/monoUI
|
5fda266ad2db8f89580a40b525973d86cd8de939
|
[
"MIT"
] | 1
|
2019-08-13T15:22:17.000Z
|
2019-08-13T15:22:17.000Z
|
#!/usr/bin/env python
# vim: set tabstop=4 shiftwidth=4 expandtab
##############################################################################
# Written by: Cachen Chen <cachen@novell.com>
# Date: 09/08/2008
# Description: main test script of numericupdown
# ../samples/winforms/numericupdown.py is the test sample script
# numericupdown/* is the wrapper of numericupdown test sample
##############################################################################
# The docstring below is used in the generated log file
"""
Test accessibility of numericupdown widget
"""
# imports
from numericupdown import *
from helpers import *
from states import *
from actions import *
from sys import argv
app_path = None
try:
app_path = argv[1]
except IndexError:
pass #expected
# open the numericupdown sample application
try:
app = launchNumericUpDown(app_path)
except IOError, msg:
print "ERROR: %s" % msg
exit(2)
# make sure we got the app back
if app is None:
exit(4)
# just an alias to make things shorter
nudFrame = app.numericUpDownFrame
##############################
# check numericupdown's states
##############################
statesCheck(nudFrame.editable_numericupdown, "NumericUpDown", add_states=["focused"])
statesCheck(nudFrame.uneditable_numericupdown, "NumericUpDown", invalid_states=["editable"])
# move the focused to uneditable_numericupdown then check the states again
nudFrame.uneditable_numericupdown.mouseClick()
statesCheck(nudFrame.editable_numericupdown, "NumericUpDown")
statesCheck(nudFrame.uneditable_numericupdown, "NumericUpDown", invalid_states=["editable"], add_states=["focused"])
##############################
# input numbers from UI
##############################
# editable NumericUpDown
nudFrame.editable_numericupdown.mouseClick()
nudFrame.editable_numericupdown.typeText("20")
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, 1020)
nudFrame.assertText(nudFrame.editable_numericupdown, "1020")
# uneditable NumericUpDown
nudFrame.uneditable_numericupdown.mouseClick()
nudFrame.uneditable_numericupdown.typeText("20")
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 10)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "10")
#############################
# input numbers from AtkText
#############################
# editable NumericUpDown
nudFrame.enterTextValue(nudFrame.editable_numericupdown, "10")
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, 10)
nudFrame.assertText(nudFrame.editable_numericupdown, "10")
# uneditable NumericUpDown
nudFrame.enterTextValue(nudFrame.uneditable_numericupdown, "100")
nudFrame.keyCombo("Enter", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 10)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "10")
############################
# change value from AtkValue
############################
# editable NumericUpDown
nudFrame.assignValue(nudFrame.editable_numericupdown, 0)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, 0)
nudFrame.assertText(nudFrame.editable_numericupdown, "0")
nudFrame.assignValue(nudFrame.editable_numericupdown, 100)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, 100)
nudFrame.assertText(nudFrame.editable_numericupdown, "100")
# uneditable NumericUpDown
nudFrame.assignValue(nudFrame.uneditable_numericupdown, 50)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 50)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "50")
############################
# set value to max
############################
# set numericupdown's value to maximumValue
# enter text value as a float, which is what we get back from
# queryValue().currentValue
nudFrame.enterTextValue(nudFrame.editable_numericupdown,
str(nudFrame.editableMaximumValue))
nudFrame.mouseClick()
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMaximumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMaximumValue)))
# try to set the uneditable numericupdown control's text to maximumValue, but
# ensure that it doesn't change (since it is readonly)
nudFrame.enterTextValue(nudFrame.uneditable_numericupdown,
str(int(nudFrame.uneditableMaximumValue)))
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 50)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "50")
############################
# set value to max + 1
############################
# set numericupdown's value to maximumValue + 1
nudFrame.enterTextValue(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMaximumValue+1)))
sleep(config.MEDIUM_DELAY)
nudFrame.keyCombo("Enter", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMaximumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMaximumValue)))
############################
# set value to min
############################
# set numericupdown's value to minimumValue
nudFrame.enterTextValue(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue)))
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue)))
nudFrame.enterTextValue(nudFrame.uneditable_numericupdown,
str(int(nudFrame.uneditableMinimumValue)))
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 50)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "50")
############################
# set value to min - 1
############################
#set numericupdown's value to minimumValue-1
nudFrame.enterTextValue(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue - 1)))
sleep(config.MEDIUM_DELAY)
nudFrame.keyCombo("Enter", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue)))
############################
# press Up/Down on editab_numericupdown
############################
# test press Up/Down action to check Text and Value by keyCombo to
# editable_numericupdown which increment value is 20
nudFrame.keyCombo("Up", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue + 20)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue + 20)))
# press "Down" on editable_numericupdown
nudFrame.keyCombo("Down", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue)))
# press "Down" again on editab_numericupdown and make sure the accessible
# text and value do not change (since the control is at its minimum value)
nudFrame.keyCombo("Down", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue)))
############################
# press Up/Down on uneditab_numericupdown
############################
# test press Up/Down action to check Text and Value of
# uneditable_numericupdown which increment value is 1
nudFrame.uneditable_numericupdown.mouseClick()
nudFrame.keyCombo("Up", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 51)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "51")
# press "Down" on uneditab_numericupdown
nudFrame.keyCombo("Down", grabFocus=False)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 50)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "50")
############################
# Use assignValue to change NumericUpDown control values
############################
# try to set each of the controls to something crazy, after which they should
# both remained unchanged
nudFrame.assignValue(nudFrame.editable_numericupdown, 100000)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue)))
nudFrame.assignValue(nudFrame.uneditable_numericupdown, -100000)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 50)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "50")
# set them to min-1 and max+1. Again, they should remain unchanged
nudFrame.assignValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown,
nudFrame.editableMinimumValue)
nudFrame.assertText(nudFrame.editable_numericupdown,
str(int(nudFrame.editableMinimumValue)))
# set both NumericUpDown controls' values to 0
nudFrame.assignValue(nudFrame.editable_numericupdown, 0)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, 0)
nudFrame.assertText(nudFrame.editable_numericupdown, "0")
nudFrame.assignValue(nudFrame.uneditable_numericupdown, 0)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 0)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "0")
# set both NumericUpDown controls' values to 1
nudFrame.assignValue(nudFrame.editable_numericupdown, 1)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, 1)
nudFrame.assertText(nudFrame.editable_numericupdown, "1")
nudFrame.assignValue(nudFrame.uneditable_numericupdown, 1)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 1)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "1")
# set both NumericUpDown controls' values to -1
nudFrame.assignValue(nudFrame.editable_numericupdown, -1)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, -1)
nudFrame.assertText(nudFrame.editable_numericupdown, "-1")
nudFrame.assignValue(nudFrame.uneditable_numericupdown, -1)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, -1)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "-1")
# set both NumericUpDown controls' values to 10
nudFrame.assignValue(nudFrame.editable_numericupdown, 10)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, 10)
nudFrame.assertText(nudFrame.editable_numericupdown, "10")
nudFrame.assignValue(nudFrame.uneditable_numericupdown, 10)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, 10)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "10")
# set both NumericUpDown controls' values to -10
nudFrame.assignValue(nudFrame.editable_numericupdown, -10)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.editable_numericupdown, -10)
nudFrame.assertText(nudFrame.editable_numericupdown, "-10")
nudFrame.assignValue(nudFrame.uneditable_numericupdown, -10)
sleep(config.SHORT_DELAY)
nudFrame.assertValue(nudFrame.uneditable_numericupdown, -10)
nudFrame.assertText(nudFrame.uneditable_numericupdown, "-10")
############################
# End
############################
# close application frame window
nudFrame.quit()
print "INFO: Log written to: %s" % config.OUTPUT_DIR
| 39.313725
| 116
| 0.738238
|
bdddf6204a6c0847c23c129705bb231b06e4670a
| 310
|
py
|
Python
|
packages/pycopy/v2.11.0.1/esp32/stubs/ntptime.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 18
|
2019-07-11T13:31:09.000Z
|
2022-01-27T06:38:40.000Z
|
packages/pycopy/v2.11.0.1/esp32/stubs/ntptime.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 9
|
2019-09-01T21:44:49.000Z
|
2022-02-04T20:55:08.000Z
|
packages/pycopy/v2.11.0.1/esp32/stubs/ntptime.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 6
|
2019-10-08T05:31:21.000Z
|
2021-04-22T10:21:01.000Z
|
"""
Module: 'ntptime' on esp32 1.11.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v2.11.0.1 on 2019-07-26', machine='ESP32 module with ESP32')
# Stubber: 1.2.0
NTP_DELTA = 3155673600
host = 'pool.ntp.org'
def settime():
pass
socket = None
struct = None
def time():
pass
| 19.375
| 130
| 0.648387
|
69b78bce2963194b3d3e2b2a286371b2db336773
| 810
|
py
|
Python
|
manage.py
|
yashasvibajpai/UrlShortener
|
28f9de5309cd3aec7bd5c0f061a5baa87dafa4f2
|
[
"MIT"
] | 64
|
2017-06-14T14:35:16.000Z
|
2021-04-20T07:52:53.000Z
|
manage.py
|
yashasvibajpai/UrlShortener
|
28f9de5309cd3aec7bd5c0f061a5baa87dafa4f2
|
[
"MIT"
] | 26
|
2017-06-04T17:57:05.000Z
|
2021-09-22T17:38:09.000Z
|
manage.py
|
yashasvibajpai/UrlShortener
|
28f9de5309cd3aec7bd5c0f061a5baa87dafa4f2
|
[
"MIT"
] | 118
|
2017-06-05T22:08:02.000Z
|
2020-10-04T07:52:14.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "UrlShortener.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.217391
| 77
| 0.644444
|
9e0bc0e3381df61cb87b4c26e5241fef2a648c4d
| 7,975
|
py
|
Python
|
decryptoquote/cypherlettermap.py
|
ROldford/decryptoquote
|
daa30d5594fac4472e01ed3dd2f83f4520ff0e6d
|
[
"MIT"
] | null | null | null |
decryptoquote/cypherlettermap.py
|
ROldford/decryptoquote
|
daa30d5594fac4472e01ed3dd2f83f4520ff0e6d
|
[
"MIT"
] | 6
|
2020-02-26T19:23:49.000Z
|
2021-08-04T03:38:35.000Z
|
decryptoquote/cypherlettermap.py
|
ROldford/decryptoquote
|
daa30d5594fac4472e01ed3dd2f83f4520ff0e6d
|
[
"MIT"
] | null | null | null |
from typing import Dict, Optional, List, Tuple
from decryptoquote.constants import LETTERS, PUNCTUATION
class CypherLetterMap:
"""
This class maps coded letters to matching decoded letters, or to `None` if
no matching value has been determined.
"""
def __init__(self):
self._clmap: Dict[str, Optional[str]] = {}
self._past_coded_words: List[Tuple[str, str]] = [] # coded, decoded
for letter in LETTERS:
self._clmap[letter] = None
def __repr__(self):
return (f'{self.__class__.__name__}('
f'{self._clmap!r})')
def __str__(self):
keystring: str = self.keystring()
return f"Decoder:\n{LETTERS}\n{keystring}"
def __eq__(self, other):
try:
if self is other:
return True
else:
for key in self._clmap.keys():
if self.get_letter_for_cypher(key) \
!= other.get_letter_for_cypher(key):
return False
return True
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_letter_for_cypher(self, coded_letter: str) -> Optional[str]:
"""
Get the matching decoded letter for the given coded letter
:param coded_letter: given coded letter
:return: matching decoded letter, or `None` if no match
"""
return self._clmap[coded_letter.upper()]
def decode(self, coded_text: str) -> str:
"""
Decrypts coded text based on current cypher letter map. If coded
letters do not have decoded matches, the underscore ("_") will be used
in its place.
:param coded_text: coded text to decrypt
:return: decrypted version of coded text, which may have underscores if
letters are not decoded
"""
decoded_text = []
for letter in coded_text.upper():
if letter in self._clmap.keys():
if self._clmap[letter] is None:
decoded_text.append('_')
else:
decoded_text.append(self._clmap[letter])
else:
decoded_text.append(letter)
return "".join(decoded_text)
def add_word_to_mapping(self,
coded_word: str,
decoded_word: str):
"""
Updates the cypher letter map using the given coded word and its
decoded form.
:param coded_word: word from coded puzzle to add
:param decoded_word: matching decoded word
"""
coded_word, decoded_word = self._validate_words(coded_word,
decoded_word)
self._past_coded_words.append((coded_word, decoded_word))
self._add_word_to_mapping_no_save(coded_word, decoded_word)
def remove_last_word_from_mapping(self):
"""
Updates the cypher letter dictionary by undoing the last word addition.
"""
# current strategy: remove last word, clear map, rebuild
# could be costly?
# possible improvement: also store number of "codings"
# ie. number of times we see that coded/decoded letter pair
# reduce that number each time we remove
# only remove letter when codings count <= 0
self._past_coded_words = self._past_coded_words[:-1] # remove last
for letter in self._clmap.keys():
self._clmap[letter] = None # clear cl_map
for word_pair in self._past_coded_words:
coded_word, decoded_word = word_pair
self._add_word_to_mapping_no_save(coded_word, decoded_word)
def does_word_coding_work(
self,
coded_word: str,
possible_decoded_word: str
) -> bool:
"""
Checks if this "word coding", or pair of coded word and possible
matching decoded word, can be safely added to the mapping without
causing coding inconsistencies. Coding inconsistencies include:
* Mapping a coded letter that already has a matching decoded letter
to a new letter
* Adding a decoded letter that already exists in the mapping
:param coded_word: coded word to check
:param possible_decoded_word: possible decoded word to check
:return: True if coded word and possible decoded word could be safely
added to the mapping
"""
try:
word_pair = self._validate_words(coded_word, possible_decoded_word)
except ValueError:
return False
coded_word, possible_decoded_word = word_pair
for letter_pair in zip(coded_word, possible_decoded_word):
coded_letter, decoded_letter = letter_pair
if coded_letter in PUNCTUATION or coded_letter == "'":
if decoded_letter == coded_letter:
continue
else:
return False
if self._clmap[coded_letter] is None:
if decoded_letter in self._clmap.values():
return False
if decoded_letter in PUNCTUATION:
return False
else:
if self._clmap[coded_letter] != decoded_letter:
return False
return True
def clear(self):
self._past_coded_words: List[Tuple[str, str]] = [] # coded, decoded
for letter in LETTERS:
self._clmap[letter] = None
def keystring(self):
key: List[str] = []
for letter in LETTERS:
if self._clmap[letter] is None:
key.append("_")
else:
key.append(self._clmap[letter])
return ''.join(key)
def _add_word_to_mapping_no_save(self,
coded_word: str,
decoded_word: str):
coded_word, decoded_word = self._validate_words(coded_word,
decoded_word)
word_matches = zip(coded_word, decoded_word)
for letter_match in word_matches:
coded_letter, decoded_letter = letter_match
if coded_letter not in PUNCTUATION \
and decoded_letter not in PUNCTUATION:
# pair exists in map: no action
if self._clmap[coded_letter] == decoded_letter:
continue
# old key, new value
if self._clmap[coded_letter] is not None:
raise ValueError(
f"Coded letter {coded_letter} already has a match")
# same value for 2 keys
if decoded_letter in self._clmap.values():
raise ValueError(
f"Decoded letter {decoded_letter} is already mapped to "
f"another coded letter")
self._clmap[coded_letter] = decoded_letter
else:
if coded_letter != decoded_letter:
raise ValueError(
f"Coded word {coded_word} and decoded word {decoded_word} "
f"have different punctuation locations")
def _validate_words(self,
coded_word: str,
decoded_word: str):
"""
Ensures that coded and decoded words are uppercase and of equal length.
:param coded_word: coded word to check
:param decoded_word: decoded word to check
:return: coded and decoded words in upper case
:raises ValueError: if words have different lengths
"""
if len(coded_word) != len(decoded_word):
raise ValueError("Coded and decoded words must "
"have the same length")
return coded_word.upper(), decoded_word.upper()
| 39.285714
| 83
| 0.573417
|
17c0a1c2c539f118d7131dab17ea861cd4934694
| 1,221
|
py
|
Python
|
podcast-backend/src/app/__init__.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
podcast-backend/src/app/__init__.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
podcast-backend/src/app/__init__.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
import os
import datetime
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
import config
# Configure Flask app
app = Flask(__name__, static_url_path='/templates')
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# Database
db = SQLAlchemy(app)
# Import + Register Blueprints
from app.pcasts import pcasts as pcasts
app.register_blueprint(pcasts)
# HTTP error handling
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
from app.pcasts_logger import PcastsJsonFormatter
# Initialize log handler
if not app.config['TESTING']:
date_tag = datetime.datetime.now().strftime('%Y-%b-%d')
logs_path = '{}/logs'.format(app.root_path)
if not os.path.exists(logs_path):
os.makedirs(logs_path)
log_handler = RotatingFileHandler(
'{}/info-{}.log'.format(logs_path, date_tag),
maxBytes=10*1024*1024, backupCount=5
)
formatter = PcastsJsonFormatter('(timestamp) (level) (message)')
log_handler.setFormatter(formatter)
log_handler.setLevel(logging.INFO)
app.logger.addHandler(log_handler)
| 29.780488
| 66
| 0.773137
|
78e955ad94734a28b5874d2109859be64aca8802
| 1,965
|
py
|
Python
|
tests/models/test_clf_evaluation.py
|
WadhwaniAI/cac-test-release
|
066c554a354f0043f8ac83c9d6421a8397b17ba5
|
[
"Apache-2.0"
] | 24
|
2021-05-13T13:34:27.000Z
|
2022-03-02T13:52:29.000Z
|
tests/models/test_clf_evaluation.py
|
WadhwaniAI/cac-test-release
|
066c554a354f0043f8ac83c9d6421a8397b17ba5
|
[
"Apache-2.0"
] | 2
|
2021-07-09T08:21:31.000Z
|
2022-02-11T15:30:55.000Z
|
tests/models/test_clf_evaluation.py
|
WadhwaniAI/cac-test-release
|
066c554a354f0043f8ac83c9d6421a8397b17ba5
|
[
"Apache-2.0"
] | 6
|
2021-06-10T07:47:48.000Z
|
2022-01-27T07:36:09.000Z
|
"""Tests cac.models.classification.ClassificationModel evaluation"""
import os
from os.path import dirname, join, exists
from copy import deepcopy
import torch
import wandb
import unittest
from tqdm import tqdm
import numpy as np
from torch import optim
from torch.nn import Conv2d, BatchNorm2d, LeakyReLU
from cac.config import Config
from cac.utils.logger import set_logger, color
from cac.data.dataloader import get_dataloader
from cac.models.classification import ClassificationModel
from cac.models.utils import get_saved_checkpoint_path
class ClassificationModelEvaluationTestCase(unittest.TestCase):
"""Class to check the evaluation of ClassificationModel"""
@classmethod
def setUpClass(cls):
version = 'default.yml'
cls.cfg = Config(version)
cls.cfg.data['dataset']['params']['train']['fraction'] = 0.01
cls.cfg.data['dataset']['params']['val']['fraction'] = 0.03
cls.cfg.num_workers = 1 if torch.cuda.is_available() else 10
def test_1_model_fitting(self):
"""Test model.fit()"""
set_logger(join(self.cfg.log_dir, 'train.log'))
tester_cfg = deepcopy(self.cfg)
tester_cfg.model['epochs'] = 1
classifier = ClassificationModel(tester_cfg)
classifier.fit(debug=True, use_wandb=False)
def test_2_evaluate(self):
"""Test model.evaluate()"""
set_logger(join(self.cfg.log_dir, 'train.log'))
tester_cfg = deepcopy(self.cfg)
tester_cfg.model['load']['version'] = 'default'
tester_cfg.model['load']['load_best'] = True
kwargs = {'threshold': 0.5}
model = ClassificationModel(tester_cfg)
dataloader, _ = get_dataloader(
tester_cfg.data, 'val',
tester_cfg.model['batch_size'],
num_workers=4,
shuffle=False,
drop_last=False)
model.evaluate(dataloader, 'val', False, **kwargs)
if __name__ == "__main__":
unittest.main()
| 33.87931
| 69
| 0.678372
|
28e224389be540007d9d07ecf53ed229b77c3e85
| 1,928
|
py
|
Python
|
frontend/amundsen_application/proxy/issue_tracker_clients/__init__.py
|
defendercrypt/amundsen
|
83c728b646020f60cf2270c12e766fe4af8c9948
|
[
"Apache-2.0"
] | 2,072
|
2020-08-11T20:16:48.000Z
|
2022-03-31T07:04:05.000Z
|
frontend/amundsen_application/proxy/issue_tracker_clients/__init__.py
|
defendercrypt/amundsen
|
83c728b646020f60cf2270c12e766fe4af8c9948
|
[
"Apache-2.0"
] | 795
|
2020-08-11T15:24:39.000Z
|
2022-03-31T18:56:13.000Z
|
frontend/amundsen_application/proxy/issue_tracker_clients/__init__.py
|
defendercrypt/amundsen
|
83c728b646020f60cf2270c12e766fe4af8c9948
|
[
"Apache-2.0"
] | 671
|
2020-08-11T20:39:56.000Z
|
2022-03-31T08:39:07.000Z
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from flask import current_app as app
from threading import Lock
from werkzeug.utils import import_string
from amundsen_application.base.base_issue_tracker_client import BaseIssueTrackerClient
_issue_tracker_client = None
_issue_tracker_client_lock = Lock()
def get_issue_tracker_client() -> BaseIssueTrackerClient:
"""
Provides singleton proxy client based on the config
:return: Proxy instance of any subclass of BaseProxy
"""
global _issue_tracker_client
if _issue_tracker_client:
return _issue_tracker_client
with _issue_tracker_client_lock:
if _issue_tracker_client:
return _issue_tracker_client
else:
# Gather all the configuration to create an IssueTrackerClient
if app.config['ISSUE_TRACKER_CLIENT_ENABLED']:
url = app.config['ISSUE_TRACKER_URL']
user = app.config['ISSUE_TRACKER_USER']
password = app.config['ISSUE_TRACKER_PASSWORD']
project_id = app.config['ISSUE_TRACKER_PROJECT_ID']
max_results = app.config['ISSUE_TRACKER_MAX_RESULTS']
issue_labels = app.config['ISSUE_LABELS']
if app.config['ISSUE_TRACKER_CLIENT']:
client = import_string(app.config['ISSUE_TRACKER_CLIENT'])
_issue_tracker_client = client(issue_labels=issue_labels,
issue_tracker_url=url,
issue_tracker_user=user,
issue_tracker_password=password,
issue_tracker_project_id=project_id,
issue_tracker_max_results=max_results)
return _issue_tracker_client
| 41.021277
| 89
| 0.624481
|
1a84bb98f04894507afbc980aecf632e023b2343
| 373
|
py
|
Python
|
setup.py
|
mcflugen/roms-lite
|
9c226314d6c81227d311e3348193b66acaf548fe
|
[
"MIT"
] | null | null | null |
setup.py
|
mcflugen/roms-lite
|
9c226314d6c81227d311e3348193b66acaf548fe
|
[
"MIT"
] | null | null | null |
setup.py
|
mcflugen/roms-lite
|
9c226314d6c81227d311e3348193b66acaf548fe
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(name='roms_lite',
version='0.1.0',
author='Eric Hutton',
author_email='eric.hutton@colorado.edu',
description='Python BMI for ROMS-Lite',
long_description=open('README.md').read(),
packages=find_packages(),
)
| 24.866667
| 48
| 0.697051
|
f1e30e8d2e10a88ac72831ee99db3b728ac645ca
| 2,072
|
py
|
Python
|
Collect/GLDASnew/__init__.py
|
TimHessels/watertools
|
77bb412a72f068d255d614f4f8a8f2cfb7d78a26
|
[
"Apache-2.0"
] | 3
|
2021-01-26T11:21:31.000Z
|
2021-12-31T21:28:18.000Z
|
Collect/GLDAS/__init__.py
|
TimHessels/watertools
|
77bb412a72f068d255d614f4f8a8f2cfb7d78a26
|
[
"Apache-2.0"
] | null | null | null |
Collect/GLDAS/__init__.py
|
TimHessels/watertools
|
77bb412a72f068d255d614f4f8a8f2cfb7d78a26
|
[
"Apache-2.0"
] | 4
|
2019-01-02T06:45:55.000Z
|
2021-06-30T11:51:38.000Z
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
Module: Collect/GLDAS
Description:
This script automatically downloads GLDAS 2.0 or 2.1 data with a 0.25 degree
resolution for different extends based on their opendap server. The time
interval available are: three-hourly ('3hour'), daily ('day'), and monthly
('month'). A list of the variables can be printed with the command:
GLDAS.VarInfo('daily').descriptions.keys()
Futher information of the variable can be printed with the following commands:
GLDAS.VarInfo('daily').descriptions['evap']
GLDAS.VarInfo('daily').units['evap']
GLDAS.VarInfo('daily').names['evap']
GLDAS.VarInfo('daily').factors['evap']
Examples:
from watertools.Collect import GLDAS
GLDAS.three_hourly(Dir='C:/Temp/', Vars=['qair','tair'], Startdate='2004-12-20', Enddate='2005-01-10',
latlim=[38, 41], lonlim=[-76, -73], Periods=[4, 5], gldas_version = '2.1')
GLDAS.daily(Dir='C:/Temp/', Vars=['qair'], Startdate='2004-12-20', Enddate='2005-01-01',
latlim=[38, 41], lonlim=[-76, -73],
SumMean=1, Min=1, Max=1, gldas_version = '2.1')
GLDAS.monthly(Dir='C:/TempGLDAS', Vars=['swnet'], Startdate='2004-12-20', Enddate='2005-03-10',latlim=[38, 41], lonlim=[-76, -73], gldas_version = '2.1')
"""
from .three_hourly import main as three_hourly
from .daily import main as daily
from .monthly import main as monthly
from .DataAccess import VariablesInfo as VarInfo
from .CLSM_DataAccess import VariablesInfo as CLSM_VarInfo
from .CLSM_daily import main as CLSM_daily
from .CLSM_monthly import main as CLSM_monthly
from .CLSM_three_hourly import main as CLSM_three_hourly
from .NOAH_DataAccess import VariablesInfo as NOAH_VarInfo
from .NOAH_daily import main as NOAH_daily
from .NOAH_monthly import main as NOAH_monthly
from .NOAH_three_hourly import main as NOAH_three_hourly
__all__ = ['three_hourly', 'daily', 'monthly', 'VarInfo', 'CLSM_VarInfo', 'CLSM_daily', 'CLSM_monthly', 'CLSM_three_hourly', 'NOAH_VarInfo', 'NOAH_daily', 'NOAH_monthly', 'NOAH_three_hourly']
__version__ = '0.1'
| 45.043478
| 191
| 0.725869
|
b651fdd242c348963262636ab0abc81c3f4bdbd0
| 1,085
|
py
|
Python
|
libs/astro_data/__init__.py
|
crazygmr101/Almanac
|
2309325f21ef06f96c700740162875b2f085678a
|
[
"MIT"
] | null | null | null |
libs/astro_data/__init__.py
|
crazygmr101/Almanac
|
2309325f21ef06f96c700740162875b2f085678a
|
[
"MIT"
] | null | null | null |
libs/astro_data/__init__.py
|
crazygmr101/Almanac
|
2309325f21ef06f96c700740162875b2f085678a
|
[
"MIT"
] | null | null | null |
"""
Copyright 2021 crazygmr101
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from .api import *
| 60.277778
| 119
| 0.795392
|
e9c8ed11528ac1a7a03a9e2e5c4609a301717f9f
| 7,876
|
py
|
Python
|
test/kernel/test_kernel_cordiv_kernel_in_stream.py
|
jingjieli95/UnarySim
|
775b38fa2d6b05a69fd73acb4766e50200a5cc37
|
[
"MIT"
] | 17
|
2020-04-26T19:38:03.000Z
|
2022-02-23T02:05:08.000Z
|
test/kernel/test_kernel_cordiv_kernel_in_stream.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | 3
|
2021-11-03T18:20:29.000Z
|
2022-02-11T16:30:16.000Z
|
test/kernel/test_kernel_cordiv_kernel_in_stream.py
|
pan185/UnarySim
|
c03386efdbb8151f3c33f34b44d1d6a6fc960434
|
[
"MIT"
] | 9
|
2019-12-03T05:08:55.000Z
|
2022-01-04T20:24:55.000Z
|
# %%
import torch
from UnarySim.kernel.div import FSUDiv
from UnarySim.stream.gen import RNG, SourceGen, BSGen
from UnarySim.metric.metric import ProgError
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import ticker, cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import time
import math
import numpy as np
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# %%
def test(mode="unipolar",
depth_abs=4,
depth_kernel=4,
depth_sync=2,
shiftreg=False,
rng="Sobol",
rng_dim=4,
bitwidth=8,
total_cnt=100,
savepdf=False):
stype = torch.float
btype = torch.float
rtype = torch.float
print("========================================================")
print(mode)
print("========================================================")
if mode == "unipolar":
# all values in unipolar are non-negative
# dividend is always non greater than divisor
# divisor is non-zero
low_bound = 0
up_bound = 2**bitwidth
elif mode == "bipolar":
# values in bipolar are arbitrarily positive or negative
# abs of dividend is always non greater than abs of divisor
# abs of divisor is non-zero
low_bound = -2**(bitwidth-1)
up_bound = 2**(bitwidth-1)
divisor_list = []
dividend_list = []
for divisor_val in range(up_bound, low_bound-1, -1):
divisor_list.append([])
dividend_list.append([])
for dividend_val in range(low_bound, up_bound+1, 1):
divisor_list[up_bound-divisor_val].append(divisor_val)
dividend_list[up_bound-divisor_val].append(dividend_val)
dividend = torch.tensor(dividend_list).type(torch.float).div(up_bound).to(device)
divisor = torch.tensor(divisor_list).type(torch.float).div(up_bound).to(device)
quotient = dividend.div(divisor)
# find the invalid postions in quotient
quotient_nan = torch.isnan(quotient)
quotient_inf = torch.isinf(quotient)
quotient_mask = quotient_nan + quotient_inf
quotient[quotient_mask] = 0
quotient = quotient.clamp(-1, 1)
result_pe_total = []
for rand_idx in range(1, total_cnt+1):
quotientPE = ProgError(quotient, mode=mode).to(device)
dividendPE = ProgError(dividend, mode=mode).to(device)
dividendSRC = SourceGen(dividend, bitwidth, mode=mode, rtype=rtype)().to(device)
divisorPE = ProgError(divisor, mode=mode).to(device)
divisorSRC = SourceGen(divisor, bitwidth, mode=mode, rtype=rtype)().to(device)
dut_div = FSUDiv(depth_abs=depth_abs,
depth_kernel=depth_kernel,
depth_sync=depth_sync,
shiftreg_abs=shiftreg,
mode=mode,
rng=rng,
rng_dim=rng_dim,
stype=stype,
btype=btype).to(device)
# define the bit stream regen for dividend and divisor
regenRNG = RNG(bitwidth, rand_idx+2, rng, rtype)().to(device)
maxCNT = 2**bitwidth - 1
dividendCNT = torch.zeros_like(dividend) + 2**(bitwidth - 1)
dividendBS_regen = BSGen(dividendCNT, regenRNG, stype).to(device)
divisorCNT = torch.zeros_like(dividend) + 2**(bitwidth - 1)
divisorBS_regen = BSGen(divisorCNT, regenRNG, stype).to(device)
dividendRNG = RNG(bitwidth, rand_idx, rng, rtype)().to(device)
dividendBS = BSGen(dividendSRC, dividendRNG, stype).to(device)
divisorRNG = RNG(bitwidth, rand_idx+1, rng, rtype)().to(device)
divisorBS = BSGen(divisorSRC, divisorRNG, stype).to(device)
with torch.no_grad():
start_time = time.time()
for i in range(2**bitwidth):
dividend_bs = dividendBS(torch.tensor([i]))
dividendPE.Monitor(dividend_bs)
divisor_bs = divisorBS(torch.tensor([i]))
divisorPE.Monitor(divisor_bs)
dividendCNT = (dividendCNT + dividend_bs*2 - 1).clamp(0, maxCNT)
dividendBS_regen.source = dividendCNT.clone().detach()
dividend_bs_regen = dividendBS_regen(torch.tensor([i]))
divisorCNT = ( divisorCNT + divisor_bs*2 - 1).clamp(0, maxCNT)
divisorBS_regen.source = divisorCNT.clone().detach()
divisor_bs_regen = divisorBS_regen(torch.tensor([i]))
quotient_bs = dut_div(dividend_bs_regen, divisor_bs_regen)
quotientPE.Monitor(quotient_bs)
# get the result for different rng
result_pe = quotientPE()[1].cpu().numpy()
result_pe[quotient_mask.cpu().numpy()] = np.nan
result_pe_total.append(result_pe)
# get the result for different rng
result_pe_total = np.array(result_pe_total)
#######################################################################
# check the error of all simulation
#######################################################################
result_pe_total_no_nan = result_pe_total[~np.isnan(result_pe_total)]
print("RMSE:{:1.4}".format(math.sqrt(np.mean(result_pe_total_no_nan**2))))
print("MAE: {:1.4}".format(np.mean(np.abs(result_pe_total_no_nan))))
print("bias:{:1.4}".format(np.mean(result_pe_total_no_nan)))
print("max: {:1.4}".format(np.max(result_pe_total_no_nan)))
print("min: {:1.4}".format(np.min(result_pe_total_no_nan)))
#######################################################################
# check the error according to input value
#######################################################################
avg_total = np.mean(result_pe_total, axis=0)
avg_total[quotient_mask.cpu().numpy()] = 0
fig, ax = plt.subplots()
fig.set_size_inches(5.5, 4)
axis_len = quotientPE()[1].size()[0]
divisor_y_axis = []
dividend_x_axis = []
for axis_index in range(axis_len):
divisor_y_axis.append((up_bound-axis_index/(axis_len-1)*(up_bound-low_bound))/up_bound)
dividend_x_axis.append((axis_index/(axis_len-1)*(up_bound-low_bound)+low_bound)/up_bound)
X, Y = np.meshgrid(dividend_x_axis, divisor_y_axis)
Z = avg_total
levels = [-0.09, -0.06, -0.03, 0.00, 0.03, 0.06, 0.09]
cs = plt.contourf(X, Y, Z, levels, cmap=cm.RdBu, extend="both")
cbar = fig.colorbar(cs)
# plt.tight_layout()
plt.xticks(np.arange(low_bound/up_bound, up_bound/up_bound+0.1, step=0.5))
# ax.xaxis.set_ticklabels([])
plt.yticks(np.arange(low_bound/up_bound, up_bound/up_bound+0.1, step=0.5))
# ax.yaxis.set_ticklabels([])
if savepdf is True:
plt.savefig("div-"+mode+"-bw"+str(bitwidth)+"-cordivkernel-in-stream"+".pdf",
dpi=300,
bbox_inches='tight')
plt.show()
plt.close()
# %%
test(mode="unipolar", depth_abs=3, depth_kernel=2, depth_sync=2, shiftreg=False, rng="Sobol", rng_dim=4, bitwidth=8, total_cnt=100, savepdf=False)
test(mode="bipolar", depth_abs=3, depth_kernel=2, depth_sync=2, shiftreg=False, rng="Sobol", rng_dim=4, bitwidth=8, total_cnt=100, savepdf=False)
# %%
fig, ax = plt.subplots()
fig.set_size_inches(0.1, 1.6)
cmap = cm.RdBu
bounds = [-0.12, -0.09, -0.06, -0.03, 0.00, 0.03, 0.06, 0.09, 0.12]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap,
norm=norm,
boundaries=bounds,
extend='both',
spacing='uniform',
orientation='vertical')
# plt.tight_layout()
# plt.savefig("colorbar.pdf", dpi=300, bbox_inches='tight')
plt.show()
# %%
| 39.979695
| 146
| 0.5871
|
91c7bc5c1f05ac5a89d6ff3e0448e83520e7bf5d
| 3,570
|
py
|
Python
|
google/ads/googleads/v6/services/services/topic_view_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/services/services/topic_view_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/services/services/topic_view_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import topic_view
from google.ads.googleads.v6.services.types import topic_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class TopicViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for TopicViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_topic_view: gapic_v1.method.wrap_method(
self.get_topic_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_topic_view(
self,
) -> typing.Callable[
[topic_view_service.GetTopicViewRequest], topic_view.TopicView
]:
raise NotImplementedError
__all__ = ("TopicViewServiceTransport",)
| 35
| 78
| 0.67619
|
60e02f1b6c501c976e585b2565c484a9ffbfb59d
| 7,543
|
py
|
Python
|
battleship/battleship.py
|
TrustyJAID/FlameCogs
|
8a3bb1b54fea760b2f9c019e4fb6782453ea4f29
|
[
"MIT"
] | null | null | null |
battleship/battleship.py
|
TrustyJAID/FlameCogs
|
8a3bb1b54fea760b2f9c019e4fb6782453ea4f29
|
[
"MIT"
] | null | null | null |
battleship/battleship.py
|
TrustyJAID/FlameCogs
|
8a3bb1b54fea760b2f9c019e4fb6782453ea4f29
|
[
"MIT"
] | null | null | null |
import discord
from redbot.core import commands
from redbot.core import Config
from redbot.core import checks
import asyncio
class Battleship(commands.Cog):
"""Play battleship with one other person."""
def __init__(self, bot):
self.bot = bot
self.runningin = []
self.config = Config.get_conf(self, identifier=7345167901)
self.config.register_guild(
extraHit = True
)
@commands.guild_only()
@commands.command()
async def battleship(self, ctx):
"""Start a game of battleship."""
if ctx.channel.id in self.runningin:
return await ctx.send('There is already a game running in this channel.')
self.runningin.append(ctx.channel.id)
channel = ctx.message.channel
name = [ctx.message.author.display_name]
board = [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
let = ['A','B','C','D','E','F','G','H','I','J']
letnum = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7, 'i': 8, 'j': 9}
bkey = [{0:'· ',1:'O ',2:'X ',3:'· '},{0:'· ',1:'O ',2:'X ',3:'# '}]
pswap = {1:0,0:1}
key = [[],[]]
key2 = {0:0,1:0,2:0,3:0,4:0}
namekey = {0:'5',1:'4',2:'3',3:'3',4:'2'}
pid = [ctx.message.author]
pmsg = []
def bprint(player,bt): #creates a display of the board for printing
b = ' '
for z in range(10): b += let[z]+' '
b += '\n'
for y in range(10): #vertical positions
b += str(y)+' '
for x in range(10): b += bkey[bt][board[player][(y*10)+x]] #horizontal positions
b += '\n'
return '```'+b+'```'
async def place(player,length,value): #create a ship for player of length at position value
hold = {}
try:
x = letnum[value[0]]
except:
await pid[player].send('Invalid input, x cord must be a letter from A-J.')
return False
try:
y = int(value[1])
except:
await pid[player].send('Invalid input, y cord must be a number from 0-9.')
return False
try:
d = value[2]
except:
await pid[player].send('Invalid input, d cord must be a direction of d or r.')
return False
try:
if d == 'r': #right
if 10 - length < x: #ship would wrap over right edge
await pid[player].send('Invalid input, too far to the right.')
return False
for z in range(length):
if board[player][(y*10)+x+z] != 0: #a spot taken by another ship
await pid[player].send('Invalid input, another ship is in that range.')
return False
for z in range(length):
board[player][(y*10)+x+z] = 3
hold[(y*10)+x+z] = 0
elif d == 'd': #down
for z in range(length):
if board[player][((y+z)*10)+x] != 0: #a spot taken by another ship
await pid[player].send('Invalid input, another ship is in that range.')
return False
for z in range(length):
board[player][((y+z)*10)+x] = 3
hold[((y+z)*10)+x] = 0
else:
await pid[player].send('Invalid input, d cord must be a direction of d or r.')
return False
except:
await pid[player].send('Invalid input, too far down.')
return False
key[player].append(hold)
return True
#RUN CODE
check = lambda m: m.author != ctx.message.author and m.author.bot == False and m.channel == ctx.message.channel and m.content.lower() == 'i'
await ctx.send('Second player, say I.')
try:
r = await self.bot.wait_for('message', timeout=60, check=check)
except asyncio.TimeoutError:
self.runningin.remove(ctx.channel.id)
return await ctx.send('You took too long, shutting down.')
name.append(r.author.display_name)
pid.append(r.author)
await ctx.send('A game of battleship will be played between '+name[0]+' and '+name[1]+'.')
for x in range(2): #each player
await ctx.send('Messaging '+name[x]+' for setup now.')
await pid[x].send(str(name[x]+', it is your turn to set up your ships.\nPlace ships by entering the top left coordinate and the direction of (r)ight or (d)own in xyd format.'))
for k in [5,4,3,3,2]: #each ship length
stupid = await pid[x].send(bprint(x,1)+'Place your '+str(k)+' length ship.')
while True:
try:
t = await self.bot.wait_for('message', timeout=120, check=lambda m:m.channel == stupid.channel and m.author.bot == False)
except asyncio.TimeoutError:
self.runningin.remove(ctx.channel.id)
return await ctx.send(name[x]+' took too long, shutting down.')
if await place(x,k,t.content.lower()) == True:
break
m = await pid[x].send(bprint(x,1))
pmsg.append(m)
###############################################################
game = True
p = 1
while game:
p = pswap[p]
await ctx.send(name[p]+'\'s turn!\n'+bprint(pswap[p],0)+name[p]+', take your shot.')
i = 0
while i == 0:
try:
s = await self.bot.wait_for('message', timeout=120, check=lambda m: m.author == pid[p] and m.channel == channel and len(m.content) == 2)
except asyncio.TimeoutError:
self.runningin.remove(ctx.channel.id)
return await ctx.send('You took too long, shutting down.')
try: #makes sure input is valid
x = letnum[s.content[0].lower()]
y = int(s.content[1])
board[pswap[p]][(y*10)+x]
except:
continue
if board[pswap[p]][(y*10)+x] == 0:
board[pswap[p]][(y*10)+x] = 1
await pmsg[pswap[p]].edit(content=bprint(pswap[p],1))
await ctx.send(bprint(pswap[p],0)+'Miss!')
i = 1
elif board[pswap[p]][(y*10)+x] in [1,2]:
await ctx.send('You already shot there!')
elif board[pswap[p]][(y*10)+x] == 3:
board[pswap[p]][(y*10)+x] = 2
await pmsg[pswap[p]].edit(content=bprint(pswap[p],1))
await ctx.send(bprint(pswap[p],0)+'Hit!')
l = -1
for a in range(5):
if ((y*10)+x) in key[pswap[p]][a]:
key[pswap[p]][a][(y*10)+x] = 1
l = 0
for b in key[pswap[p]][a]:
if key[pswap[p]][a][b] == 0: #if any position in the ship is still there, l = 1
l = 1
break
if l == 0: #if ship destroyed
await ctx.send(name[pswap[p]]+'\'s '+namekey[a]+' length ship was destroyed!')
key2[a] = 1 #mark ship as destroyed
for c in key2:
if key2[c] == 0: #if any ship is not destroyed, l = 1
l = 1
break
if l == 0: #if all ships destroyed
await ctx.send(name[p]+' wins!')
self.runningin.remove(ctx.channel.id)
game = False
i = 1
if game == True:
if await self.config.guild(ctx.guild).extraHit() == True:
await ctx.send('Take another shot.')
else:
i = 1
@commands.guild_only()
@checks.guildowner()
@commands.command()
async def battleshipset(self, ctx, value: bool=None):
"""
Set if an extra shot should be given after a hit.
Defaults to True.
This value is server specific.
"""
if value == None:
v = await self.config.guild(ctx.guild).extraHit()
if v == True:
await ctx.send('You are currently able to shoot again after a hit.')
else:
await ctx.send('You are currently not able to shoot again after a hit.')
else:
await self.config.guild(ctx.guild).extraHit.set(value)
if value == True:
await ctx.send('You will now be able to shoot again after a hit.')
else:
await ctx.send('You will no longer be able to shoot again after a hit.')
| 38.28934
| 415
| 0.593663
|
a7ce3239986414c113f9cf29d9137991d40366a5
| 301
|
py
|
Python
|
onnx_coreml/__init__.py
|
karfly/onnx-coreml
|
0e423d0b4779af21479c338ed82f3864434e4c21
|
[
"MIT"
] | null | null | null |
onnx_coreml/__init__.py
|
karfly/onnx-coreml
|
0e423d0b4779af21479c338ed82f3864434e4c21
|
[
"MIT"
] | null | null | null |
onnx_coreml/__init__.py
|
karfly/onnx-coreml
|
0e423d0b4779af21479c338ed82f3864434e4c21
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .converter import convert
# onnx-coreml version
__version__ = '1.3'
__all__ = ['convert']
print("You're using onnx-coreml hacked by @karfly!")
| 21.5
| 52
| 0.803987
|
9c8028af6de8b854bce77af04ff149d397be3ff9
| 14,819
|
py
|
Python
|
RobotMovingPanel/interface/interfaceLogic.py
|
Vlad12344/Pulseapi_Integration
|
2acf93a17dd2911328141886b8724134fff84f00
|
[
"MIT"
] | null | null | null |
RobotMovingPanel/interface/interfaceLogic.py
|
Vlad12344/Pulseapi_Integration
|
2acf93a17dd2911328141886b8724134fff84f00
|
[
"MIT"
] | null | null | null |
RobotMovingPanel/interface/interfaceLogic.py
|
Vlad12344/Pulseapi_Integration
|
2acf93a17dd2911328141886b8724134fff84f00
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
from math import radians
from PyQt5 import QtWidgets, QtCore, QtGui
# from RobotMovingPanel.features import tcp
from pulseapi_integration import NewRobotPulse, jog, PulseApiException
from pulseapi_integration.utils import position_2_xyzrpw
from pulseapi import tool_info, tool_shape, position, pose
from PyQt5.QtWidgets import QApplication, QDialog, QFileDialog
from .interface import Ui_Dialog
from RobotMovingPanel import config
from RobotMovingPanel.logger import file_existing_check, append_list_as_row
from RobotMovingPanel.utils.jsonWorker import openJson
from RobotMovingPanel.interface import model
from RobotMovingPanel.model import tcpmodel
from RobotMovingPanel.model import robotModel
from RobotMovingPanel.model.treewidgetmodel import selectTreeItem
class PositionChenger(QtCore.QThread):
def __init__(self, mainwindow, parent=None):
super().__init__()
self.mainwindow = mainwindow
def run(self):
while True:
XYZ = self.mainwindow.robot.get_position()['point'].values()
RPW = self.mainwindow.robot.get_position()['rotation'].values()
for lineEdit, value in zip(self.mainwindow.lineEdit_position_mapping, [*XYZ, *RPW]):
lineEdit.setText(str(round(value, 4)))
time.sleep(0.2)
class MovingPanel(QtWidgets.QMainWindow, Ui_Dialog):
def __init__(self):
super().__init__()
self.PROG: str = ''
self.PROG_PATH: str = ''
self.NUMBER_OF_TOOL_CALIBRATION_POINTS = set()
self.CURRENT_TOOL_CALIBRATION_NAME = ''
self.setupUi(self)
self.lineEdit.setVisible(False)
self.pushButton_2.setVisible(False)
self.pushButton_3.setVisible(False)
file_existing_check(config.PATH_2_CSV)
# self.move_buttons_maping = [
# (self.button_xUp, {'direction': 1, 'axis': 'x'}),
# (self.button_xDown, {'direction': -1, 'axis': 'x'}),
# (self.button_yUp, {'direction': 1, 'axis': 'y'}),
# (self.button_yDown, {'direction': -1, 'axis': 'y'}),
# (self.button_zUp, {'direction': 1, 'axis': 'z'}),
# (self.button_zDown, {'direction': -1, 'axis': 'z'}),
# (self.button_rollUp, {'direction': 1, 'axis': 'rx'}),
# (self.button_rollDown, {'direction': -1, 'axis': 'rx'}),
# (self.button_pitchUp, {'direction': 1, 'axis': 'ry'}),
# (self.button_pitchDown, {'direction': -1, 'axis': 'ry'}),
# (self.button_yawUp, {'direction': 1, 'axis': 'rz'}),
# (self.button_yawDown, {'direction': -1, 'axis': 'rz'})
# ]
self.move_buttons_maping = [
(self.button_xUp, self.xUp),
(self.button_xDown, self.xDown),
(self.button_yUp, self.yUp),
(self.button_yDown, self.yDown),
(self.button_zUp, self.zUp),
(self.button_zDown, self.zDown),
(self.button_rollUp, self.rxUp),
(self.button_rollDown, self.rxDown),
(self.button_pitchUp, self.ryUp),
(self.button_pitchDown, self.ryDown),
(self.button_yawUp, self.rzUp),
(self.button_yawDown, self.rzDown)
]
self.lineEdit_position_mapping = [
self.lineEdit_x,
self.lineEdit_y,
self.lineEdit_z,
self.lineEdit_roll,
self.lineEdit_pitch,
self.lineEdit_yaw
]
self.lineEdit_joints_mapping = [
self.lineEdit_J_1,
self.lineEdit_J_2,
self.lineEdit_J_3,
self.lineEdit_J_4,
self.lineEdit_J_5,
self.lineEdit_J_6
]
self.lineEdit_tool_maping = [
self.lineEdit_tollX,
self.lineEdit_tool_Y,
self.lineEdit_tool_Z,
self.lineEdit_tool_RX,
self.lineEdit_tool_RY,
self.lineEdit_tool_RZ
]
# self.positionalChenger_instance = PositionChenger(mainwindow=self)
# self.launchPositionChanger()
self.move_buttons_handler()
self.setToolComboBox()
def closeGraphWindow(self):
self.stackedWidget_3.setCurrentIndex(0)
def openFullScreen(self):
self.stackedWidget_3.setCurrentIndex(1)
def infoFromLineEdit(self, mapping: list) -> list:
return [float(line_edit.text()) for line_edit in mapping]
#-------------------TOOL-------------------
def besidesDefaultTool(func):
def wrapper(self):
if self.tollsComboBox.currentIndex() != 0:
func(self)
pass
return wrapper
@besidesDefaultTool
def openToolSetPoints(self):
self.stackedWidget_2.setCurrentIndex(1)
self.tollsComboBox.setEnabled(False)
def closeTeachTCPWindow(self):
self.stackedWidget_2.setCurrentIndex(0)
self.tollsComboBox.setEnabled(True)
def addToolHandler(self):
count = self.tollsComboBox.count()
coordinates = self.infoFromLineEdit(self.lineEdit_tool_maping)
# tool is a standard name like TCP_0
tool = tcpmodel.addTool(toolNum=count, tcpCoordinates=coordinates)
self.tollsComboBox.addItem(tool)
self.tollsComboBox.setCurrentText(tool)
@besidesDefaultTool
def delToolHandler(self):
tool_name = self.tollsComboBox.currentText()
item_num = self.tollsComboBox.currentIndex()
# if not tool_name == 'default':
self.tollsComboBox.removeItem(item_num)
tcpmodel.delTool(toolName=tool_name)
# else: pass
@besidesDefaultTool
def resetTool(self):
tool_name = self.tollsComboBox.currentText()
tcpmodel.resetTool(tool_name)
for line_edit in self.lineEdit_tool_maping:
line_edit.setText('0.0')
def setToolComboBox(self):
names = tcpmodel.getToolsNames()
for name in names:
self.tollsComboBox.addItem(name)
def setToolHandler(self):
coordinates = self.infoFromLineEdit(self.lineEdit_tool_maping)
tool_name = self.tollsComboBox.currentText()
tcpmodel.updateTCPCoordinates(tool_name, tcpCoordinates=coordinates)
# tcpCoordinates = tcpmodel.getTCPCoordinates(tool_name)
# robotModel.setTool(tcpCoordinates=tcpCoordinates)
def setToolFromItem(self, itemText: str):
coords = tcpmodel.getTCPCoordinates(itemText)
for line, coord in zip(self.lineEdit_tool_maping, coords):
line.setText(str(coord))
def setToolName(self):
if self.lineEdit.isVisible() and self.lineEdit.text() != '':
self.lineEdit.setCursorPosition(0)
new_name = self.lineEdit.text()
current_name = self.tollsComboBox.currentText()
if not tcpmodel.checkSimilarity(new_name):
tcpmodel.updateToolName(new_name, current_name)
item_index = self.tollsComboBox.currentIndex()
self.tollsComboBox.setItemText(item_index, new_name)
self.lineEdit.setVisible(False)
else:
self.lineEdit.setVisible(False)
elif self.lineEdit.isVisible() and self.lineEdit.text() == '':
self.lineEdit.setVisible(False)
elif self.tollsComboBox.currentIndex() == 0:
self.lineEdit.setVisible(False)
else:
self.lineEdit.clear()
self.lineEdit.setVisible(True)
def openMoveTabWithSetPointsMode(self, button: str):
"""param: buttonNum: int number from -2 and etc."""
self.tabWidget.setCurrentIndex(0)
self.enableOkCancelButtonOnMovingPanel(True)
self.CURRENT_TOOL_CALIBRATION_NAME = button.text()
def setCalibratedPoint(self):
self.enableOkCancelButtonOnMovingPanel(False)
buttonText = self.CURRENT_TOOL_CALIBRATION_NAME
toolName = self.tollsComboBox.currentText()
xyzrpw = robotModel.getPosition()
tcpmodel.updateCalibratedPoints(
toolName,
buttonText,
[*xyzrpw['point'].values(), *xyzrpw['rotation'].values()]
)
self.CURRENT_TOOL_CALIBRATION_NAME = ''
self.closeMoveTab()
def setCalibratedTool(self):
tool_name = self.tollsComboBox.currentText()
tcpmodel.calculateTCP(tool_name)
self.setToolFromItem(tool_name)
self.tollsComboBox.setEnabled(True)
def teachToolPointsChecker(self, buttonID: int) -> None:
self.NUMBER_OF_TOOL_CALIBRATION_POINTS.add(buttonID)
if len(self.NUMBER_OF_TOOL_CALIBRATION_POINTS) == 4:
self.SetToolPointsButton.setEnabled(True)
#--------------------------------------
def closeMoveTab(self):
self.tabWidget.setCurrentIndex(1)
def enableOkCancelButtonOnMovingPanel(self, state: bool):
if state:
self.pushButton_2.setVisible(state)
self.pushButton_3.setVisible(state)
self.pushButton.setVisible(state-1)
else:
self.pushButton_2.setVisible(state)
self.pushButton_3.setVisible(state)
self.pushButton.setVisible(state+1)
def enableStepEnter(self):
state = False
if self.radioButton_Step.isChecked():
state = True
self.doubleSpinBox_step.setEnabled(state)
self.doubleSpinBox_rotation.setEnabled(state)
def launchPositionChanger(self):
self.positionalChenger_instance.start()
def getSpeed(self):
return self.horizontalSlider_speed.value()/100
def getLinearStep(self):
return self.doubleSpinBox_step.value()/1000
def getRotationStep(self):
return self.doubleSpinBox_rotation.value()
def move_buttons_handler(self):
try:
for button, move_func in self.move_buttons_maping:
button.pressed.connect(move_func)
button.released.connect(self.stopJog)
except PulseApiException:
print('loh')
# def check_state(self):
# return self.radioButton_Jogging.isChecked()
# def move_button_handler(self):
# try:
# for button, params in self.move_buttons_maping:
# if axis in ['x', 'y', 'z']:
# button.pressed.connect(
# lambda: robotModel.move(
# state=self.check_state(), axis=axis, speed=self.getSpeed(),
# moveStep=self.getLinearStep()*direction)
# )
# elif axis in ['rx', 'ry', 'rz']:
# button.pressed.connect(
# lambda: robotModel.move(
# state=self.check_state(), axis=axis, speed=self.getSpeed(),
# moveStep=self.getRotationStep()*direction)
# )
# button.released.connect(self.stopJog)
# except PulseApiException:
# print('loh')
def xUp(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='x', speed=self.getSpeed(), moveStep=self.getLinearStep())
def xDown(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='x', speed=self.getSpeed(), moveStep=self.getLinearStep(), direction=-1)
def yUp(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='y', speed=self.getSpeed(), moveStep=self.getLinearStep())
def yDown(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='y', speed=self.getSpeed(), moveStep=self.getLinearStep(), direction=-1)
def zUp(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='z', speed=self.getSpeed(), moveStep=self.getLinearStep())
def zDown(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='z', speed=self.getSpeed(), moveStep=self.getLinearStep(), direction=-1)
def rxUp(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='rx', speed=self.getSpeed(), moveStep=self.getRotationStep())
def rxDown(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='rx', speed=self.getSpeed(), moveStep=self.getRotationStep(), direction=-1)
def ryUp(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='ry', speed=self.getSpeed(), moveStep=self.getRotationStep())
def ryDown(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='ry', speed=self.getSpeed(), moveStep=-self.getRotationStep(), direction=-1)
def rzUp(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='rz', speed=self.getSpeed(), moveStep=self.getRotationStep())
def rzDown(self):
state = self.radioButton_Jogging.isChecked()
robotModel.move(
state=state, axis='rz', speed=self.getSpeed(), moveStep=-self.getRotationStep(), direction=-1)
def stopJog(self):
if self.radioButton_Jogging.isChecked():
robotModel.stopJog()
else: pass
def getButton_handler(self):
robotModel.appendPositionToCSV(self.robot, config.PATH_2_CSV)
def getPose_handler(self):
print(robotModel.get_pose())
def relaxRobotHandler(self):
robotModel.relax()
def freezeRobotHandler(self):
robotModel.freeze()
def enableRelaxButton(self):
if self.enableRelaxRadioButton.isChecked():
self.relaxButton.setEnabled(True)
else:
self.relaxButton.setEnabled(False)
def showFileDialog(self):
self.PROG_PATH = QFileDialog.getOpenFileName(self, 'Open file', '/home')[0]
self.PROG = model.read(self.PROG_PATH)
self.textEdit.setText(self.PROG)
def saveProgramHandler(self):
model.saveProgramFromTextEditor(self.PROG, self.PROG_PATH)
def runCode(self):
model.runCodeFromTextEditor(self.PROG, self.PROG_PATH)
def showProgramEditor(self, item):
state, itemNum = selectTreeItem(item.text(0))
if state:
self.stackedWidget.setCurrentIndex(itemNum)
else: pass
def goHome(self):
tcp_velocity = self.getSpeed()
# print(tcp_velocity)
robotModel.goHome(speed=tcp_velocity)
| 36.680693
| 106
| 0.623186
|
56036887d4a57e51cac65632b323abebb9772fe7
| 8,626
|
py
|
Python
|
opytimizer/optimizers/swarm/sfo.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 528
|
2018-10-01T20:00:09.000Z
|
2022-03-27T11:15:31.000Z
|
opytimizer/optimizers/swarm/sfo.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 17
|
2019-10-30T00:47:03.000Z
|
2022-03-21T11:39:28.000Z
|
opytimizer/optimizers/swarm/sfo.py
|
anukaal/opytimizer
|
5f1ccc0da80e6a4cabd99578fa24cf4f6466f9b9
|
[
"Apache-2.0"
] | 35
|
2018-10-01T20:03:23.000Z
|
2022-03-20T03:54:15.000Z
|
"""Sailfish Optimizer.
"""
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.exception as ex
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class SFO(Optimizer):
"""A SFO class, inherited from Optimizer.
This is the designed class to define SFO-related
variables and methods.
References:
S. Shadravan, H. Naji and V. Bardsiri.
The Sailfish Optimizer: A novel nature-inspired metaheuristic algorithm
for solving constrained engineering optimization problems.
Engineering Applications of Artificial Intelligence (2019).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> SFO.')
# Overrides its parent class with the receiving params
super(SFO, self).__init__()
# Percentage of initial sailfishes
self.PP = 0.1
# Attack power coefficient
self.A = 4
# Attack power decrease
self.e = 0.001
# Builds the class
self.build(params)
logger.info('Class overrided.')
@property
def PP(self):
"""float: Percentage of initial sailfishes.
"""
return self._PP
@PP.setter
def PP(self, PP):
if not isinstance(PP, (float, int)):
raise ex.TypeError('`PP` should be a float or integer')
if PP < 0 or PP > 1:
raise ex.ValueError('`PP` should be between 0 and 1')
self._PP = PP
@property
def A(self):
"""int: Attack power coefficient.
"""
return self._A
@A.setter
def A(self, A):
if not isinstance(A, int):
raise ex.TypeError('`A` should be an integer')
if A <= 0:
raise ex.ValueError('`A` should be > 0')
self._A = A
@property
def e(self):
"""float: Attack power decrease.
"""
return self._e
@e.setter
def e(self, e):
if not isinstance(e, (float, int)):
raise ex.TypeError('`e` should be a float or integer')
if e < 0:
raise ex.ValueError('`e` should be >= 0')
self._e = e
@property
def sardines(self):
"""list: List of sardines.
"""
return self._sardines
@sardines.setter
def sardines(self, sardines):
if not isinstance(sardines, list):
raise ex.TypeError('`sardines` should be a list')
self._sardines = sardines
def compile(self, space):
"""Compiles additional information that is used by this optimizer.
Args:
space (Space): A Space object containing meta-information.
"""
# List of sardines
self.sardines = [self._generate_random_agent(space.best_agent)
for _ in range(int(space.n_agents / self.PP))]
# Sorts the population of sardines
self.sardines.sort(key=lambda x: x.fit)
def _generate_random_agent(self, agent):
"""Generates a new random-based agent.
Args:
agent (Agent): Agent to be copied.
Returns:
Random-based agent.
"""
# Makes a deep copy of agent
a = copy.deepcopy(agent)
# Fills agent with new random positions
a.fill_with_uniform()
return a
def _calculate_lambda_i(self, n_sailfishes, n_sardines):
"""Calculates the lambda value (eq. 7).
Args:
n_sailfishes (int): Number of sailfishes.
n_sardines (int): Number of sardines.
Returns:
Lambda value from current iteration.
"""
# Calculates the prey density (eq. 8)
PD = 1 - (n_sailfishes / (n_sailfishes + n_sardines))
# Generates a random uniform number
r1 = r.generate_uniform_random_number()
# Calculates lambda
lambda_i = 2 * r1 * PD - PD
return lambda_i
def _update_sailfish(self, agent, best_agent, best_sardine, lambda_i):
"""Updates the sailfish's position (eq. 6).
Args:
agent (Agent): Current agent's.
best_agent (Agent): Best sailfish.
best_sardine (Agent): Best sardine.
lambda_i (float): Lambda value.
Returns:
An updated position.
"""
# Generates a random uniform number
r1 = r.generate_uniform_random_number()
# Calculates the new position
new_position = best_sardine.position - lambda_i * \
(r1 * (best_agent.position - best_sardine.position) / 2 - agent.position)
return new_position
def update(self, space, function, iteration):
"""Wraps Sailfish Optimizer over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
iteration (int): Current iteration.
"""
# Gathers the best sardine
best_sardine = self.sardines[0]
# Calculates the number of sailfishes and sardines
n_sailfishes = len(space.agents)
n_sardines = len(self.sardines)
# Calculates the number of decision variables
n_variables = space.agents[0].n_variables
# Iterates through every agent
for agent in space.agents:
# Calculates the lambda value
lambda_i = self._calculate_lambda_i(n_sailfishes, n_sardines)
# Updates agent's position
agent.position = self._update_sailfish(agent, space.best_agent, best_sardine, lambda_i)
# Clips agent's limits
agent.clip_by_bound()
# Re-evaluates agent's fitness
agent.fit = function(agent.position)
# Calculates the attack power (eq. 10)
AP = np.fabs(self.A * (1 - 2 * iteration * self.e))
# Checks if attack power is smaller than 0.5
if AP < 0.5:
# Calculates the number of sardines possible replacements (eq. 11)
alpha = int(len(self.sardines) * AP)
# Calculates the number of variables possible replacements (eq. 12)
beta = int(n_variables * AP)
# Generates a list of selected sardines
selected_sardines = r.generate_integer_random_number(0, n_sardines, size=alpha)
# Iterates through every selected sardine
for i in selected_sardines:
# Generates a list of selected variables
selected_vars = r.generate_integer_random_number(0, n_variables, size=beta)
# Iterates through every selected variable
for j in selected_vars:
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# Updates the sardine's position (eq. 9)
self.sardines[i].position[j] = r1 * \
(space.best_agent.position[j] - self.sardines[i].position[j] + AP)
# Clips sardine's limits
self.sardines[i].clip_by_bound()
# Re-calculates its fitness
self.sardines[i].fit = function(self.sardines[i].position)
# If attack power is bigger than 0.5
else:
# Iterates through every sardine
for sardine in self.sardines:
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# Updates the sardine's position (eq. 9)
sardine.position = r1 * (space.best_agent.position - sardine.position + AP)
# Clips sardine's limits
sardine.clip_by_bound()
# Re-calculates its fitness
sardine.fit = function(sardine.position)
# Sorts the population of agents (sailfishes) and sardines
space.agents.sort(key=lambda x: x.fit)
self.sardines.sort(key=lambda x: x.fit)
# Iterates through every agent
for agent in space.agents:
# Iterates through every sardine
for sardine in self.sardines:
# If agent is worse than sardine (eq. 13)
if agent.fit > sardine.fit:
# Copies sardine to agent
agent = copy.deepcopy(sardine)
break
| 28.657807
| 99
| 0.585092
|
a2890b3e014659ba9de1d3be0cdba0b646ed80e1
| 1,562
|
py
|
Python
|
pype/plugins/maya/create/create_rendersetup.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/create/create_rendersetup.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/create/create_rendersetup.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
import avalon.maya
import pype.maya.lib as lib
from maya import cmds
class CreateRenderSetup(avalon.maya.Creator):
"""Create rendersetup template json data"""
name = "rendersetup"
label = "Render Setup Preset"
family = "rendersetup"
icon = "tablet"
def __init__(self, *args, **kwargs):
super(CreateRenderSetup, self).__init__(*args, **kwargs)
# here we can pre-create renderSetup layers, possibly utlizing
# presets for it.
# _____
# / __\__
# | / __\__
# | | / \
# | | | |
# \__| | |
# \__| |
# \_____/
# from pypeapp import config
# import maya.app.renderSetup.model.renderSetup as renderSetup
# presets = config.get_presets(project=os.environ['AVALON_PROJECT'])
# layer = presets['plugins']['maya']['create']['renderSetup']["layer"]
# rs = renderSetup.instance()
# rs.createRenderLayer(layer)
self.options = {"useSelection": False} # Force no content
def process(self):
exists = cmds.ls(self.name)
assert len(exists) <= 1, (
"More than one renderglobal exists, this is a bug"
)
if exists:
return cmds.warning("%s already exists." % exists[0])
with lib.undo_chunk():
instance = super(CreateRenderSetup, self).process()
self.data["renderSetup"] = "42"
null = cmds.sets(name="null_SET", empty=True)
cmds.sets([null], forceElement=instance)
| 28.925926
| 78
| 0.567862
|
dabdebf702e5ca56463c2a35a369cb463da8098a
| 15,501
|
py
|
Python
|
src/toast/pipeline_tools/madam.py
|
ziotom78/toast
|
66aef04c833a28f0928a0bbc221da45882aae475
|
[
"BSD-2-Clause"
] | null | null | null |
src/toast/pipeline_tools/madam.py
|
ziotom78/toast
|
66aef04c833a28f0928a0bbc221da45882aae475
|
[
"BSD-2-Clause"
] | null | null | null |
src/toast/pipeline_tools/madam.py
|
ziotom78/toast
|
66aef04c833a28f0928a0bbc221da45882aae475
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2019 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import argparse
import copy
import os
import re
import numpy as np
from ..timing import function_timer, Timer
from ..utils import Logger, Environment
from ..todmap import OpMadam
def add_madam_args(parser):
""" Add libmadam arguments
"""
parser.add_argument(
"--madam-prefix", required=False, default="toast", help="Output map prefix"
)
parser.add_argument(
"--madam-iter-max",
required=False,
default=1000,
type=np.int,
help="Maximum number of CG iterations in Madam",
)
parser.add_argument(
"--madam-precond-width",
required=False,
default=100,
type=np.int,
help="Width of the Madam band preconditioner",
)
parser.add_argument(
"--madam-precond-width-min",
required=False,
type=np.int,
help="Minimum width of the Madam band preconditioner",
)
parser.add_argument(
"--madam-precond-width-max",
required=False,
type=np.int,
help="Maximum width of the Madam band preconditioner",
)
parser.add_argument(
"--madam-baseline-length",
required=False,
default=10000.0,
type=np.float,
help="Destriping baseline length (seconds)",
)
parser.add_argument(
"--madam-baseline-order",
required=False,
default=0,
type=np.int,
help="Destriping baseline polynomial order",
)
parser.add_argument(
"--madam-noisefilter",
required=False,
default=False,
action="store_true",
help="Destripe with the noise filter enabled",
)
parser.add_argument(
"--madam-parfile", required=False, default=None, help="Madam parameter file"
)
parser.add_argument(
"--madam-allreduce",
required=False,
action="store_true",
help="Use the allreduce commucation pattern in Madam",
dest="madam_allreduce",
)
parser.add_argument(
"--no-madam-allreduce",
required=False,
action="store_false",
help="Do not use the allreduce commucation pattern in Madam",
dest="madam_allreduce",
)
parser.set_defaults(madam_allreduce=False)
parser.add_argument(
"--madam-concatenate-messages",
required=False,
action="store_true",
help="Use the alltoallv commucation pattern in Madam",
dest="madam_concatenate_messages",
)
parser.add_argument(
"--no-madam-concatenate-messages",
required=False,
action="store_false",
help="Use the point-to-point commucation pattern in Madam",
dest="madam_concatenate_messages",
)
parser.set_defaults(madam_concatenate_messages=True)
try:
parser.add_argument(
"--destripe",
required=False,
action="store_true",
help="Write destriped maps [default]",
dest="destripe",
)
parser.add_argument(
"--no-destripe",
required=False,
action="store_false",
help="Do not write destriped maps",
dest="destripe",
)
parser.set_defaults(destripe=True)
except argparse.ArgumentError:
pass
try:
parser.add_argument(
"--binmap",
required=False,
action="store_true",
help="Write binned maps [default]",
dest="write_binmap",
)
parser.add_argument(
"--no-binmap",
required=False,
action="store_false",
help="Do not write binned maps",
dest="write_binmap",
)
parser.set_defaults(write_binmap=True)
except argparse.ArgumentError:
pass
try:
parser.add_argument(
"--hits",
required=False,
action="store_true",
help="Write hit maps [default]",
dest="write_hits",
)
parser.add_argument(
"--no-hits",
required=False,
action="store_false",
help="Do not write hit maps",
dest="write_hits",
)
parser.set_defaults(write_hits=True)
except argparse.ArgumentError:
pass
try:
parser.add_argument(
"--wcov",
required=False,
action="store_true",
help="Write white noise covariance [default]",
dest="write_wcov",
)
parser.add_argument(
"--no-wcov",
required=False,
action="store_false",
help="Do not write white noise covariance",
dest="write_wcov",
)
parser.set_defaults(write_wcov=True)
except argparse.ArgumentError:
pass
try:
parser.add_argument(
"--wcov-inv",
required=False,
action="store_true",
help="Write inverse white noise covariance [default]",
dest="write_wcov_inv",
)
parser.add_argument(
"--no-wcov-inv",
required=False,
action="store_false",
help="Do not write inverse white noise covariance",
dest="write_wcov_inv",
)
parser.set_defaults(write_wcov_inv=True)
except argparse.ArgumentError:
pass
parser.add_argument(
"--conserve-memory",
dest="conserve_memory",
required=False,
action="store_true",
help="Conserve memory when staging libMadam buffers [default]",
)
parser.add_argument(
"--no-conserve-memory",
dest="conserve_memory",
required=False,
action="store_false",
help="Do not conserve memory when staging libMadam buffers",
)
parser.set_defaults(conserve_memory=True)
# `nside` may already be added
try:
parser.add_argument(
"--nside", required=False, default=512, type=np.int, help="Healpix NSIDE"
)
except argparse.ArgumentError:
pass
# Common flag mask may already be added
try:
parser.add_argument(
"--common-flag-mask",
required=False,
default=1,
type=np.uint8,
help="Common flag mask",
)
except argparse.ArgumentError:
pass
# `sample-rate` may be already added
try:
parser.add_argument(
"--sample-rate",
required=False,
default=100.0,
type=np.float,
help="Detector sample rate (Hz)",
)
except argparse.ArgumentError:
pass
return
@function_timer
def setup_madam(args):
""" Create a Madam parameter dictionary.
Initialize the Madam parameters from the command line arguments.
"""
pars = {}
cross = args.nside // 2
submap = 16
if submap > args.nside:
submap = args.nside
pars["temperature_only"] = False
pars["force_pol"] = True
pars["kfirst"] = args.destripe
pars["write_map"] = args.destripe
pars["write_binmap"] = args.write_binmap
pars["write_matrix"] = args.write_wcov_inv
pars["write_wcov"] = args.write_wcov
pars["write_hits"] = args.write_hits
pars["nside_cross"] = cross
pars["nside_submap"] = submap
if args.madam_concatenate_messages:
# Collective communication is fast but requires memory
pars["concatenate_messages"] = True
if args.madam_allreduce:
# Every process will allocate a copy of every observed submap.
pars["allreduce"] = True
else:
# Every process will allocate complete send and receive buffers
pars["allreduce"] = False
else:
# Slow but memory-efficient point-to-point communication. Allocate
# only enough memory to communicate with one process at a time.
pars["concatenate_messages"] = False
pars["allreduce"] = False
pars["reassign_submaps"] = True
pars["pixlim_cross"] = 1e-3
pars["pixmode_cross"] = 2
pars["pixlim_map"] = 1e-2
pars["pixmode_map"] = 2
# Instead of fixed detector weights, we'll want to use scaled noise
# PSD:s that include the atmospheric noise
pars["radiometers"] = True
pars["noise_weights_from_psd"] = True
if args.madam_parfile is not None:
# Parse all available parameters from the supplied
# Madam parameter file
pat = re.compile(r"\s*(\S+)\s*=\s*(\S+(\s+\S+)*)\s*")
comment = re.compile(r"^#.*")
with open(args.madam_parfile, "r") as f:
for line in f:
if comment.match(line) is None:
result = pat.match(line)
if result is not None:
key, value = result.group(1), result.group(2)
pars[key] = value
pars["base_first"] = args.madam_baseline_length
pars["basis_order"] = args.madam_baseline_order
# Adaptive preconditioner width
width_min = args.madam_precond_width_min
width_max = args.madam_precond_width_max
if width_min is None:
# madam-precond-width has a default value
width_min = args.madam_precond_width
if width_max is None:
# madam-precond-width has a default value
width_max = args.madam_precond_width
if width_min > width_max:
# it is not an error for these two to match
width_min = width_max
pars["precond_width_min"] = width_min
pars["precond_width_max"] = width_max
#
pars["nside_map"] = args.nside
if args.madam_noisefilter:
if args.madam_baseline_order != 0:
raise RuntimeError(
"Madam cannot build a noise filter when baseline"
"order is higher than zero."
)
pars["kfilter"] = True
else:
pars["kfilter"] = False
pars["fsample"] = args.sample_rate
pars["iter_max"] = args.madam_iter_max
pars["file_root"] = args.madam_prefix
return pars
@function_timer
def apply_madam(
args,
comm,
data,
madampars,
outpath,
detweights,
cache_name,
freq=None,
time_comms=None,
telescope_data=None,
first_call=True,
extra_prefix=None,
verbose=True,
bin_only=False,
):
""" Use libmadam to bin and optionally destripe data.
Bin and optionally destripe all conceivable subsets of the data.
Args:
freq (str) : Frequency identifier to append to the file prefix
time_comms (iterable) : Series of disjoint communicators that
map, e.g., seasons and days. Each entry is a tuple of
the form (`name`, `communicator`)
telescope_data (iterable) : series of disjoint TOAST data
objects. Each entry is tuple of the form (`name`, `data`).
bin_only (bool) : Disable destriping and only bin the signal,
Useful when running Madam as a part of a filter+bin pipeline.
"""
if comm.comm_world is None:
raise RuntimeError("Madam requires MPI")
log = Logger.get()
total_timer = Timer()
total_timer.start()
if comm.world_rank == 0 and verbose:
log.info("Making maps")
pars = copy.deepcopy(madampars)
pars["path_output"] = outpath
if comm.world_rank == 0:
os.makedirs(outpath, exist_ok=True)
file_root = pars["file_root"]
if extra_prefix is not None:
if len(file_root) > 0 and not file_root.endswith("_"):
file_root += "_"
file_root += "{}".format(extra_prefix)
if freq is not None:
if len(file_root) > 0 and not file_root.endswith("_"):
file_root += "_"
file_root += "{:03}".format(int(freq))
if not first_call:
# Only the first MC iteration should produce the hits and
# white noise matrices
pars["write_matrix"] = False
pars["write_wcov"] = False
pars["write_hits"] = False
if bin_only:
pars["kfirst"] = False
pars["write_map"] = False
pars["write_binmap"] = True
# Sanity check, is any of the Madam outputs required?
outputs = [
pars["write_map"],
pars["write_binmap"],
pars["write_hits"],
pars["write_wcov"],
pars["write_matrix"],
]
if not np.any(outputs):
if comm.world_rank == 0:
log.info("No Madam outputs requested. Skipping.")
return
if args.madam_noisefilter or not pars["kfirst"]:
# With the noise filter enabled, we want to enforce continuity
# across the Observation. Otherwise we fit each interval
# separately.
madam_intervals = None
else:
madam_intervals = "intervals"
madam = OpMadam(
params=pars,
detweights=detweights,
name=cache_name,
common_flag_mask=args.common_flag_mask,
purge_tod=False,
intervals=madam_intervals,
conserve_memory=args.conserve_memory,
)
if "info" in madam.params:
info = madam.params["info"]
else:
info = 3
if time_comms is None:
time_comms = [("all", comm.comm_world)]
if telescope_data is None:
telescope_data = [("all", data)]
timer = Timer()
for time_name, time_comm in time_comms:
for tele_name, tele_data in telescope_data:
if len(time_name.split("-")) == 3:
# Special rules for daily maps
if args.do_daymaps:
continue
if len(telescope_data) > 1 and tele_name == "all":
# Skip daily maps over multiple telescopes
continue
if first_call:
# Do not destripe daily maps
kfirst_save = pars["kfirst"]
write_map_save = pars["write_map"]
write_binmap_save = pars["write_binmap"]
pars["kfirst"] = False
pars["write_map"] = False
pars["write_binmap"] = True
timer.start()
madam.params["file_root"] = "{}_telescope_{}_time_{}".format(
file_root, tele_name, time_name
)
if time_comm == comm.comm_world:
madam.params["info"] = info
else:
# Cannot have verbose output from concurrent mapmaking
madam.params["info"] = 0
if (time_comm is None or time_comm.rank == 0) and verbose:
log.info("Mapping {}".format(madam.params["file_root"]))
madam.exec(tele_data, time_comm)
if time_comm is not None:
time_comm.barrier()
if comm.world_rank == 0 and verbose:
timer.report_clear("Mapping {}".format(madam.params["file_root"]))
if len(time_name.split("-")) == 3 and first_call:
# Restore destriping parameters
pars["kfirst"] = kfirst_save
pars["write_map"] = write_map_save
pars["write_binmap"] = write_binmap_save
if comm.comm_world is not None:
comm.comm_world.barrier()
total_timer.stop()
if comm.world_rank == 0 and verbose:
total_timer.report("Madam total")
return
| 30.817097
| 85
| 0.582737
|
f02943c6aeebccab135dab55c6c24578010567d6
| 1,479
|
py
|
Python
|
multiAgentEnv/MARL/DDPG/agents/replay_buffer.py
|
eranbTAU/Closing-the-Reality-Gap-for-a-Multi-Agent-System-Using-GAN
|
3df5f8ba1069ce3f16f1ab743da9cbdd3bddd43c
|
[
"MIT"
] | null | null | null |
multiAgentEnv/MARL/DDPG/agents/replay_buffer.py
|
eranbTAU/Closing-the-Reality-Gap-for-a-Multi-Agent-System-Using-GAN
|
3df5f8ba1069ce3f16f1ab743da9cbdd3bddd43c
|
[
"MIT"
] | null | null | null |
multiAgentEnv/MARL/DDPG/agents/replay_buffer.py
|
eranbTAU/Closing-the-Reality-Gap-for-a-Multi-Agent-System-Using-GAN
|
3df5f8ba1069ce3f16f1ab743da9cbdd3bddd43c
|
[
"MIT"
] | 1
|
2022-02-22T11:06:40.000Z
|
2022-02-22T11:06:40.000Z
|
import numpy as np
class ReplayBuffer():
def __init__(self, size=10**6, batch_size = 64, state_dim = 14, action_dim = 2):
self.buffer_size = size
self.states_buffer = np.zeros((size, state_dim), dtype=np.float)
self.actions_buffer = np.zeros((size, action_dim), dtype=np.float)
self.reward_buffer = np.zeros(size, dtype=np.float)
self._states_buffer = np.zeros((size, state_dim), dtype=np.float)
self.dones_buffer = np.zeros(size, dtype=np.bool)
self.batch_size = batch_size
self.cntr = 0
self.rng = np.random.default_rng()
def store(self, state, action, reward, state_, done):
idx = self.cntr % self.buffer_size
self.states_buffer[idx]= state
self.actions_buffer[idx]= action
self.reward_buffer[idx]= reward
self._states_buffer[idx]= state_
self.dones_buffer[idx] = done
self.cntr+=1
def sample(self):
indices = self.rng.choice(min(self.cntr, self.buffer_size), self.batch_size, replace=False)
states_batch = np.take(self.states_buffer, indices, axis=0)
actions_batch = np.take(self.actions_buffer, indices, axis=0)
reward_batch = np.take(self.reward_buffer, indices, axis=0)
_states_batch = np.take(self._states_buffer, indices, axis=0)
dones_batch = np.take(self.dones_buffer, indices, axis=0)
return states_batch, actions_batch, reward_batch, _states_batch, dones_batch
| 44.818182
| 99
| 0.665991
|
9fe8dafcc8edd6b80625c61a4a0e783e65b44720
| 56,526
|
py
|
Python
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 59
|
2019-06-17T09:37:49.000Z
|
2022-01-19T01:21:34.000Z
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib import lookup
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class HashTableOpTest(test.TestCase):
def testHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
exported_keys_tensor.eval())
self.assertItemsEqual([0, 1, 2], exported_values_tensor.eval())
def testHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.cached_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64),
default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.cached_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.cached_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table2 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table3 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
lookup_ops.tables_initializer().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.cached_session() as sess:
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
variables.global_variables_initializer().run()
self.assertEqual(0, table.lookup(input_string_ref).eval())
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.cached_session():
default_val = -1
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
def testNotInitialized(self):
with self.cached_session():
default_val = -1
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
["a"], [1], value_dtype=dtypes.int64),
default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
with self.assertRaisesOpError("Table already initialized"):
table.initializer.run()
def testInitializationWithInvalidDimensions(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
def testMultipleSessions(self):
# Start a server
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
def testHashTableInt32String(self):
with self.cached_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_tensor = constant_op.constant([0, 1, -1])
output = table.lookup(input_tensor)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
ids.eval())
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=None)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, -1, -1), ids.eval())
self.assertEqual(2, table.size().eval())
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", table.initializer.run)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), ids.eval())
self.assertEqual(3, table.size().eval())
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class KeyValueTensorInitializerTest(test.TestCase):
def test_string(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int64(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int32(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int32, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
with self.assertRaisesRegexp(
errors_impl.OpError, "No OpKernel was registered"):
table.initializer.run()
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(table.lookup(
constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"], default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_tensor_missing_mapping(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "mapping must be specified"):
lookup.index_table_from_tensor(mapping=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_mapping(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=np.array([], dtype=np.str_), num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "brain"]))
self.assertRaises(errors_impl.OpError, ids.eval)
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
lookup_ops.tables_initializer().run()
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class StringToIndexTest(test.TestCase):
def test_string_to_index(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), indices.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
feats = constant_op.constant(["hello", "hola"])
_ = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_string_to_index_with_default_value(self):
default_value = -42
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(
feats, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_file = self._createVocabFile("i2f_vocab1.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", default_value, default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
init = lookup_ops.tables_initializer()
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", init.run)
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval())
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
class IndexToStringTest(test.TestCase):
def test_index_to_string(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
feats.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
indices = constant_op.constant([0, 1, 4], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([1, 2, 4], dtypes.int64)
feats = lookup.index_to_string(
indices, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
class InitializeTableFromFileOpTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_in_graph_and_eager_modes
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
self.evaluate(table.initializer)
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
table.initializer.run()
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.LINE_NUMBER
value_index = lookup.TextFileIndex.WHOLE_LINE
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string, value_index),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 1
value_index = 2
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 2
value_index = 1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("is not a valid"):
table.initializer.run()
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.WHOLE_LINE
value_index = lookup.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index), default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.cached_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup.TextFileIndex.LINE_NUMBER
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("Invalid number of columns"):
table.initializer.run()
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.cached_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.cached_session():
default_value = -1
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
"", dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testInitializeWithVocabSize(self):
with self.cached_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
table1 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Initialize from file.
table1.initializer.run()
self.assertEquals(vocab_size, table1.size().eval())
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
table2 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
with self.assertRaisesOpError("Invalid vocab_size"):
table2.initializer.run()
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
table3 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Smaller vocab size reads only vocab_size records.
table3.initializer.run()
self.assertEquals(vocab_size, table3.size().eval())
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer("old_file.txt", dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.initializer.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.initializer.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.cached_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
other_type, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
filenames, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = "UNK"
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value)
table.initializer.run()
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), out.eval())
self.assertEquals(vocab_size, table.size().eval())
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value),
oov_buckets)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testStringIdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(None, oov_buckets)
table.initializer.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testInt32IdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.initializer.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table1 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.FastHashSpec,
name="table1")
table2 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
shared_name = "across-sessions"
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
table1.initializer.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], out1.eval())
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.initializer.run()
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], out2.eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.cached_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=3),
-1),
1)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int32)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int64)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
with self.assertRaises(TypeError):
lookup.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([None, 2]))
if __name__ == "__main__":
test.main()
| 38.01345
| 80
| 0.657768
|
0351b80de5a7bdd33a3a4c42bcbf248e41f2532f
| 54,240
|
py
|
Python
|
biothings/hub/databuild/differ.py
|
sirloon/biothings.api
|
8a981fa2151e368d0ca76aaf226eb565d794d4fb
|
[
"Apache-2.0"
] | null | null | null |
biothings/hub/databuild/differ.py
|
sirloon/biothings.api
|
8a981fa2151e368d0ca76aaf226eb565d794d4fb
|
[
"Apache-2.0"
] | null | null | null |
biothings/hub/databuild/differ.py
|
sirloon/biothings.api
|
8a981fa2151e368d0ca76aaf226eb565d794d4fb
|
[
"Apache-2.0"
] | null | null | null |
import os, re, shutil, copy
import time, hashlib
import pickle, json
from datetime import datetime
from pprint import pformat, pprint
import asyncio
from functools import partial
import glob, random
from biothings.utils.common import timesofar, iter_n, get_timestamp, \
dump, rmdashfr, loadobj, md5sum
from biothings.utils.mongo import id_feeder, get_target_db, get_previous_collection
from biothings.utils.hub_db import get_src_build, get_source_fullname
from biothings.utils.loggers import get_logger
from biothings.utils.diff import diff_docs_jsonpatch
from biothings.hub.databuild.backend import generate_folder
from biothings import config as btconfig
from biothings.utils.manager import BaseManager, ManagerError
from .backend import create_backend, merge_src_build_metadata
from .syncer import SyncerManager
from biothings.utils.backend import DocMongoBackend
import biothings.utils.aws as aws
from biothings.utils.jsondiff import make as jsondiff
from biothings.utils.hub import publish_data_version
from biothings.hub import DIFFER_CATEGORY, DIFFMANAGER_CATEGORY
from biothings.hub.datarelease import set_pending_to_release_note
logging = btconfig.logger
class DifferException(Exception):
pass
class BaseDiffer(object):
# diff type name, identifying the diff algorithm
# must be set in sub-class
diff_type = None
def __init__(self, diff_func, job_manager, log_folder):
self.old = None
self.new = None
self.log_folder = log_folder
self.job_manager = job_manager
self.diff_func = diff_func
self.timestamp = datetime.now()
self.logfile = None
self.setup_log()
self.ti = time.time()
self.metadata = {} # diff metadata
self.metadata_filename = None
def setup_log(self):
self.logger, self.logfile = get_logger('diff_%s' % self.__class__.diff_type,self.log_folder)
def get_predicates(self):
return []
def get_pinfo(self):
"""
Return dict containing information about the current process
(used to report in the hub)
"""
pinfo = {"category" : DIFFER_CATEGORY,
"source" : "",
"step" : "",
"description" : ""}
preds = self.get_predicates()
if preds:
pinfo["__predicates__"] = preds
return pinfo
def register_status(self,status,transient=False,init=False,**extra):
src_build = get_src_build()
job_info = {
'status': status,
'step_started_at': datetime.now(),
'logfile': self.logfile,
}
diff_key = self.old.target_name
diff_info = {
"diff": {
diff_key : {
}
}
}
if transient:
# record some "in-progress" information
job_info['pid'] = os.getpid()
else:
# only register time when it's a final state
job_info["time"] = timesofar(self.ti)
t1 = round(time.time() - self.ti, 0)
job_info["time_in_s"] = t1
diff_info["diff"][diff_key]["created_at"] = datetime.now()
if "diff" in extra:
diff_info["diff"][diff_key].update(extra["diff"])
if "job" in extra:
job_info.update(extra["job"])
# since the base is the merged collection, we register info there
# as the new collection (diff results are associated to the most recent colleciton)
build = src_build.find_one({'_id': self.new.target_name})
if not build:
self.logger.info("Can't find build document '%s', no status to register" % self.new.target_name)
return
if init:
# init timer for this step
self.ti = time.time()
src_build.update({'_id': self.new.target_name}, {"$push": {'jobs': job_info}})
# now refresh/sync
build = src_build.find_one({'_id': self.new.target_name})
else:
# merge extra at root level
# (to keep building data...) and update the last one
# (it's been properly created before when init=True)
build["jobs"] and build["jobs"][-1].update(job_info)
def merge_info(target,d):
if "__REPLACE__" in d.keys():
d.pop("__REPLACE__")
target = d
else:
for k,v in d.items():
if type(v) == dict:
# only merge if both are dict (otherwise replace/set with d)
if k in target and type(target[k]) == dict:
target[k] = merge_info(target[k],v)
else:
v.pop("__REPLACE__",None)
# merge v with "nothing" just to make sure to remove any "__REPLACE__"
v = merge_info({},v)
target[k] = v
else:
target[k] = v
return target
build = merge_info(build,diff_info)
src_build.replace_one({"_id" : build["_id"]}, build)
@asyncio.coroutine
def diff_cols(self, old_db_col_names, new_db_col_names, batch_size, steps, mode=None, exclude=[]):
"""
Compare new with old collections and produce diff files. Root keys can be excluded from
comparison with "exclude" parameter
*_db_col_names can be:
1. a colleciton name (as a string) asusming they are
in the target database.
2. tuple with 2 elements, the first one is then either "source" or "target"
to respectively specify src or target database, and the second element is
the collection name.
3. tuple with 3 elements (URI,db,collection), looking like:
("mongodb://user:pass@host","dbname","collection"), allowing to specify
any connection on any server
steps: - 'content' will perform diff on actual content.
- 'mapping' will perform diff on ES mappings (if target collection involved)
- 'reduce' will merge diff files, trying to avoid having many small files
- 'post' is a hook to do stuff once everything is merged (override method post_diff_cols)
mode: 'purge' will remove any existing files for this comparison while 'resume' will happily ignore
existing data and to whatever it's requested (like running steps="post" on existing diff folder...)
"""
# these ones are used to point to the build doc, not the underlying backned
# (ie. if link builder has been used, it refers a colleciton in src_db, but
# we need the metadata from the build doc too)
self.new = create_backend(new_db_col_names,follow_ref=False)
self.old = create_backend(old_db_col_names,follow_ref=False)
# these point to the actual collection containing data
content_new = create_backend(new_db_col_names,follow_ref=True)
content_old = create_backend(old_db_col_names,follow_ref=True)
# check what to do
if type(steps) == str:
steps = [steps]
diff_folder = generate_folder(btconfig.DIFF_PATH,old_db_col_names,new_db_col_names)
if mode != "force" and os.path.exists(diff_folder) and "content" in steps:
if mode == "purge" and os.path.exists(diff_folder):
rmdashfr(diff_folder)
elif mode != "resume":
raise FileExistsError("Found existing files in '%s', use mode='purge'" % diff_folder)
if not os.path.exists(diff_folder):
os.makedirs(diff_folder)
# create metadata file storing info about how we created the diff
# and some summary data
diff_stats = {"update":0, "add":0, "delete":0, "mapping_changed": False}
self.metadata_filename = os.path.join(diff_folder,"metadata.json")
if os.path.exists(self.metadata_filename):
# load previous metadata in case we try to update/complete diff
self.metadata = json.load(open(self.metadata_filename))
else:
assert self.old.version, "Version for 'old' collection not defined"
self.metadata = {
"diff" : {
"type" : self.diff_type,
"func" : self.diff_func.__name__,
"version" : "%s.%s" % (self.old.version,self.new.version),
"stats": diff_stats, # ref to diff_stats
"files": [],
# when "new" is a target collection:
"mapping_file": None,
"info" : {
"generated_on": str(datetime.now()),
"exclude": exclude,
"steps": steps,
"mode": mode,
"batch_size": batch_size
}
},
"old": {
"backend" : old_db_col_names,
"version" : self.old.version
},
"new": {
"backend" : new_db_col_names,
"version": self.new.version
},
# when "new" is a mongodb target collection:
"_meta" : {},
"build_config": {},
}
if isinstance(self.new,DocMongoBackend) and self.new.target_collection.database.name == btconfig.DATA_TARGET_DATABASE:
new_doc = get_src_build().find_one({"_id":self.new.target_collection.name})
if not new_doc:
raise DifferException("Collection '%s' has no corresponding build document" % \
self.new.target_collection.name)
self.metadata["_meta"] = self.get_metadata()
self.metadata["build_config"] = new_doc.get("build_config")
got_error = False
if "mapping" in steps:
def diff_mapping(old,new,diff_folder):
summary = {}
old_build = get_src_build().find_one({"_id":old.target_collection.name})
new_build = get_src_build().find_one({"_id":new.target_collection.name})
if old_build and new_build:
# mapping diff always in jsondiff
mapping_diff = jsondiff(old_build["mapping"], new_build["mapping"])
if mapping_diff:
file_name = os.path.join(diff_folder,"mapping.pyobj")
dump(mapping_diff, file_name)
md5 = md5sum(file_name)
summary["mapping_file"] = {
"name" : os.path.basename(file_name),
"md5sum" : md5,
"size" : os.stat(file_name).st_size
}
else:
self.logger.info("Neither '%s' nor '%s' have mappings associated to them, skip" % \
(old.target_collection.name,new.target_collection.name))
return summary
def mapping_diffed(f):
res = f.result()
self.register_status("success",job={"step":"diff-mapping"})
if res.get("mapping_file"):
nonlocal got_error
# check mapping differences: only "add" ops are allowed, as any others actions would be
# ingored by ES once applied (you can't update/delete elements of an existing mapping)
mf = os.path.join(diff_folder,res["mapping_file"]["name"])
ops = loadobj(mf)
for op in ops:
if op["op"] != "add":
err = DifferException("Found diff operation '%s' in mapping file, " % op["op"] + \
" only 'add' operations are allowed. You can still produce the " + \
"diff by removing 'mapping' from 'steps' arguments. " + \
"Ex: steps=['content']. Diff operation was: %s" % op)
got_error = err
self.metadata["diff"]["mapping_file"] = res["mapping_file"]
diff_stats["mapping_changed"] = True
json.dump(self.metadata,open(self.metadata_filename,"w"),indent=True)
self.logger.info("Diff file containing mapping differences generated: %s" % res.get("mapping_file"))
pinfo = self.get_pinfo()
pinfo["source"] = "%s vs %s" % (self.new.target_name,self.old.target_name)
pinfo["step"] = "mapping: old vs new"
self.register_status("diffing",transient=True,init=True,job={"step":"diff-mapping"})
job = yield from self.job_manager.defer_to_thread(pinfo,
partial(diff_mapping, self.old, self.new, diff_folder))
job.add_done_callback(mapping_diffed)
yield from job
if got_error:
raise got_error
if content_old == content_new:
self.logger.info("Old and new collections are the same, skipping 'content' step")
elif "content" in steps:
skip = 0
cnt = 0
jobs = []
pinfo = self.get_pinfo()
pinfo["source"] = "%s vs %s" % (content_new.target_name,content_old.target_name)
pinfo["step"] = "content: new vs old"
data_new = id_feeder(content_new, batch_size=batch_size)
selfcontained = "selfcontained" in self.diff_type
self.register_status("diffing",transient=True,init=True,job={"step":"diff-content"})
for id_list_new in data_new:
cnt += 1
pinfo["description"] = "batch #%s" % cnt
def diffed(f):
res = f.result()
diff_stats["update"] += res["update"]
diff_stats["add"] += res["add"]
if res.get("diff_file"):
self.metadata["diff"]["files"].append(res["diff_file"])
self.logger.info("(Updated: {}, Added: {})".format(res["update"], res["add"]))
self.register_status("success",job={"step":"diff-content"})
self.logger.info("Creating diff worker for batch #%s" % cnt)
job = yield from self.job_manager.defer_to_process(pinfo,
partial(diff_worker_new_vs_old, id_list_new, old_db_col_names,
new_db_col_names, cnt , diff_folder, self.diff_func, exclude, selfcontained))
job.add_done_callback(diffed)
jobs.append(job)
yield from asyncio.gather(*jobs)
self.logger.info("Finished calculating diff for the new collection. Total number of docs updated: {}, added: {}".format(diff_stats["update"], diff_stats["add"]))
data_old = id_feeder(content_old, batch_size=batch_size)
jobs = []
pinfo = self.get_pinfo()
pinfo["source"] = "%s vs %s" % (content_old.target_name,content_new.target_name)
pinfo["step"] = "content: old vs new"
for id_list_old in data_old:
cnt += 1
pinfo["description"] = "batch #%s" % cnt
def diffed(f):
res = f.result()
diff_stats["delete"] += res["delete"]
if res.get("diff_file"):
self.metadata["diff"]["files"].append(res["diff_file"])
self.logger.info("(Deleted: {})".format(res["delete"]))
self.logger.info("Creating diff worker for batch #%s" % cnt)
job = yield from self.job_manager.defer_to_process(pinfo,
partial(diff_worker_old_vs_new, id_list_old, new_db_col_names, cnt , diff_folder))
job.add_done_callback(diffed)
jobs.append(job)
yield from asyncio.gather(*jobs)
self.logger.info("Finished calculating diff for the old collection. Total number of docs deleted: {}".format(diff_stats["delete"]))
json.dump(self.metadata,open(self.metadata_filename,"w"),indent=True)
self.logger.info("Summary: (Updated: {}, Added: {}, Deleted: {}, Mapping changed: {})".format(
diff_stats["update"], diff_stats["add"], diff_stats["delete"], diff_stats["mapping_changed"]))
if "reduce" in steps:
@asyncio.coroutine
def merge_diff():
self.logger.info("Reduce/merge diff files")
max_diff_size = getattr(btconfig,"MAX_DIFF_SIZE",10*1024**2)
current_size = 0
cnt = 0
final_res = []
tomerge = []
# .done contains original diff files
done_folder = os.path.join(diff_folder,".done")
try:
os.mkdir(done_folder)
except FileExistsError:
pass
def merged(f,cnt):
nonlocal got_error
nonlocal final_res
try:
res = f.result()
final_res.extend(res)
self.logger.info("Diff file #%s created" % cnt)
except Exception as e:
got_error = e
diff_files = [f for f in glob.glob(os.path.join(diff_folder,"*.pyobj")) \
if not os.path.basename(f).startswith("mapping")]
self.logger.info("%d diff files to process in total" % len(diff_files))
jobs = []
total = len(diff_files)
while diff_files:
if len(diff_files) % 100 == 0:
self.logger.info("%d diff files to process" % len(diff_files))
if current_size > max_diff_size:
job = yield from self.job_manager.defer_to_process(pinfo,
partial(reduce_diffs,tomerge,cnt,diff_folder,done_folder))
job.add_done_callback(partial(merged,cnt=cnt))
jobs.append(job)
current_size = 0
cnt += 1
tomerge = []
else:
diff_file = diff_files.pop()
current_size += os.stat(diff_file).st_size
tomerge.append(diff_file)
assert not diff_files
if tomerge:
job = yield from self.job_manager.defer_to_process(pinfo,
partial(reduce_diffs,tomerge,cnt,diff_folder,done_folder))
job.add_done_callback(partial(merged,cnt=cnt))
jobs.append(job)
yield from job
yield from asyncio.gather(*jobs)
return final_res
pinfo = self.get_pinfo()
pinfo["source"] = "diff_folder"
pinfo["step"] = "reduce"
#job = yield from self.job_manager.defer_to_thread(pinfo,merge_diff)
self.register_status("diffing",transient=True,init=True,job={"step":"diff-reduce"})
res = yield from merge_diff()
self.metadata["diff"]["files"] = res
json.dump(self.metadata,open(self.metadata_filename,"w"),indent=True)
if got_error:
self.logger.exception("Failed to reduce diff files: %s" % got_error,extra={"notify":True})
raise got_error
self.register_status("success",job={"step":"diff-reduce"})
if "post" in steps:
pinfo = self.get_pinfo()
pinfo["source"] = "diff_folder"
pinfo["step"] = "post"
self.register_status("diffing",transient=True,init=True,job={"step":"diff-post"})
job = yield from self.job_manager.defer_to_thread(pinfo,
partial(self.post_diff_cols, old_db_col_names, new_db_col_names,
batch_size, steps, mode=mode, exclude=exclude))
def posted(f):
nonlocal got_error
try:
res = f.result()
self.register_status("success",job={"step":"diff-post"},diff={"post": res})
self.logger.info("Post diff process successfully run: %s" % res)
except Exception as e:
got_error = e
job.add_done_callback(posted)
yield from job
json.dump(self.metadata,open(self.metadata_filename,"w"),indent=True)
if got_error:
self.logger.exception("Failed to run post diff process: %s" % got_error,extra={"notify":True})
raise got_error
strargs = "[old=%s,new=%s,steps=%s,diff_stats=%s]" % (old_db_col_names,new_db_col_names,steps,diff_stats)
self.logger.info("success %s" % strargs,extra={"notify":True})
# remove some metadata key for diff registering (some are already in build doc, it would be duplication)
self.metadata.pop("_meta",None)
self.metadata.pop("build_config",None)
# record diff_folder so it's available for later without re-computing it
self.metadata["diff_folder"] = diff_folder
self.register_status("success",diff=self.metadata)
return diff_stats
def diff(self,old_db_col_names, new_db_col_names, batch_size=100000, steps=["content","mapping","reduce","post"], mode=None, exclude=[]):
"""wrapper over diff_cols() coroutine, return a task"""
job = asyncio.ensure_future(self.diff_cols(old_db_col_names, new_db_col_names, batch_size, steps, mode, exclude))
return job
def get_metadata(self):
new_doc = get_src_build().find_one({"_id":self.new.target_collection.name})
if not new_doc:
raise DifferException("Collection '%s' has no corresponding build document" % \
self.new.target_collection.name)
return new_doc.get("_meta",{})
def post_diff_cols(self, old_db_col_names, new_db_col_names, batch_size, steps, mode=None, exclude=[]):
"""Post diff process hook. This coroutine will in a dedicated thread"""
return
class ColdHotDiffer(BaseDiffer):
@asyncio.coroutine
def diff_cols(self, old_db_col_names, new_db_col_names, *args,**kwargs):
self.new = create_backend(new_db_col_names)
new_doc = get_src_build().find_one({"_id":self.new.target_collection.name})
assert "cold_collection" in new_doc.get("build_config",{}), "%s document doesn't have " % self.new.target_collection.name + \
"a premerge collection declared. Is it really a hot merges collection ?"
self.cold = create_backend(new_doc["build_config"]["cold_collection"])
return super(ColdHotDiffer,self).diff_cols(old_db_col_names, new_db_col_names, *args, **kwargs)
def get_metadata(self):
new_doc = get_src_build().find_one({"_id":self.new.target_collection.name})
cold_doc = get_src_build().find_one({"_id":self.cold.target_collection.name})
if not new_doc:
raise DifferException("Collection '%s' has no corresponding build document" % \
self.new.target_collection.name)
if not cold_doc:
raise DifferException("Collection '%s' has no corresponding build document" % \
self.cold.target_collection.name)
return merge_src_build_metadata([cold_doc,new_doc])
class ColdHotJsonDifferBase(ColdHotDiffer):
def post_diff_cols(self, old_db_col_names, new_db_col_names, batch_size, steps, mode=None, exclude=[]):
"""
Post-process the diff files by adjusting some jsondiff operation. Here's the process.
For updated documents, some operations might illegal in the context of cold/hot merged collections.
Case #1: "remove" op in an update
from a cold/premerge collection, we have that doc:
coldd = {"_id":1, "A":"123", "B":"456", "C":True}
from the previous hot merge we have this doc:
prevd = {"_id":1, "D":"789", "C":True, "E":"abc"}
At that point, the final document, fully merged and indexed is:
finald = {"_id":1, "A":"123", "B":"456", "C":True, "D":"789", "E":"abc"}
We can notice field "C" is common to coldd and prevd.
from the new hot merge, we have:
newd = {"_id":1, "E","abc"} # C and D don't exist anymore
Diffing prevd vs. newd will give jssondiff operations:
[{'op': 'remove', 'path': '/C'}, {'op': 'remove', 'path': '/D'}]
The problem here is 'C' is removed while it was already in cold merge, it should stay because it has come
with some resource involved in the premerge (dependent keys, eg. myvariant, "observed" key comes with certain sources)
=> the jsondiff opetation on "C" must be discarded.
Note: If operation involved a root key (not '/a/c' for instance) and if that key is found in the premerge, then
then remove the operation. (note we just consider root keys, if the deletion occurs deeper in the document,
it's just a legal operation updating innder content)
For deleted documents, the same kind of logic applies
Case #2: "delete"
from a cold/premerge collection, we have that doc:
coldd = {"_id":1, "A":"123", "B":"456", "C":True}
from the previous hot merge we have this doc:
prevd = {"_id":1, "D":"789", "C":True}
fully merged doc:
finald = {"_id":1, "A":"123", "B":"456", "C":True, "D":"789"}
from the new hot merge, we have:
newd = {} # document doesn't exist anymore
Diffing prevd vs. newd will mark document with _id == 1 to be deleted
The problem is we have data for _id=1 on the premerge collection, if we delete the whole document we'd loose too much
information.
=> the deletion must converted into specific "remove" jsondiff operations, for the root keys found in prevd on not in coldd
(in that case: [{'op':'remove', 'path':'/D'}], and not "C" as C is in premerge)
"""
# we should be able to find a cold_collection definition in the src_build doc
# and it should be the same for both old and new
old_doc = get_src_build().find_one({"_id":old_db_col_names})
new_doc = get_src_build().find_one({"_id":new_db_col_names})
assert "build_config" in old_doc and "cold_collection" in old_doc["build_config"], \
"No cold collection defined in src_build for %s" % old_db_col_names
assert "build_config" in new_doc and "cold_collection" in new_doc["build_config"], \
"No cold collection defined in src_build for %s" % new_db_col_names
assert old_doc["build_config"]["cold_collection"] == new_doc["build_config"]["cold_collection"], \
"Cold collections are different in src_build docs %s and %s" % (old_db_col_names,new_db_col_names)
coldcol = get_target_db()[new_doc["build_config"]["cold_collection"]]
assert coldcol.count() > 0, "Cold collection is empty..."
diff_folder = generate_folder(btconfig.DIFF_PATH,old_db_col_names,new_db_col_names)
diff_files = glob.glob(os.path.join(diff_folder,"diff_*.pyobj"))
fixed = 0
for diff_file in diff_files:
dirty = False
self.logger.info("Post-processing diff file %s" % diff_file)
data = loadobj(diff_file)
# update/remove case #1
for updt in data["update"]:
toremove = []
for patch in updt["patch"]:
pathk = patch["path"].split("/")[1:] # remove / at the beginning of the path
if patch["op"] == "remove" and \
len(pathk) == 1:
# let's query the premerge
coldd = coldcol.find_one({"_id" : updt["_id"]})
if coldd and pathk[0] in coldd:
self.logger.info("Fixed a root key in cold collection that should be preserved: '%s' (for doc _id '%s')" % (pathk[0],updt["_id"]))
toremove.append(patch)
fixed += 1
dirty = True
for p in toremove:
updt["patch"].remove(p)
# delete case #2
toremove = []
prevcol = get_target_db()[old_doc["target_name"]]
for delid in data["delete"]:
coldd = coldcol.find_one({"_id":delid})
if not coldd:
# true deletion is required
continue
else:
prevd = prevcol.find_one({"_id":delid})
prevs = set(prevd.keys())
colds = set(coldd.keys())
keys = prevs.difference(colds) # keys exclusively in prevd that should be removed
patches = []
for k in keys:
patches.append({"op":"remove","path":"/%s" % k})
data["update"].append({"_id":delid,"patch":patches})
self.logger.info("Fixed a delete document by converting to update/remove jsondiff operations for keys: %s (_id: '%s')" % (keys,delid))
fixed += 1
dirty = True
toremove.append(delid)
for i in toremove:
data["delete"].remove(i)
if dirty:
dump(data,diff_file,compress="lzma")
name = os.path.basename(diff_file)
md5 = md5sum(diff_file)
# find info to adjust md5sum
found = False
for i,df in enumerate(self.metadata["diff"]["files"]):
if df["name"] == name:
found = True
break
assert found, "Couldn't find file information in metadata (with md5 value), try to rebuild_diff_file_list() ?"
size = os.stat(diff_ile).st_size
self.metadata["diff"]["files"][i] = {"name":name,"md5sum":md5,"size":size}
self.logger.info(self.metadata["diff"]["files"])
self.logger.info("Post-diff process fixing jsondiff operations done: %s fixed" % fixed)
return {"fixed":fixed}
class JsonDiffer(BaseDiffer):
diff_type = "jsondiff"
def __init__(self, diff_func=diff_docs_jsonpatch, *args, **kwargs):
super(JsonDiffer,self).__init__(diff_func=diff_func,*args,**kwargs)
class SelfContainedJsonDiffer(JsonDiffer):
diff_type = "jsondiff-selfcontained"
class ColdHotJsonDiffer(ColdHotJsonDifferBase, JsonDiffer):
diff_type = "coldhot-jsondiff"
class ColdHotSelfContainedJsonDiffer(ColdHotJsonDifferBase, SelfContainedJsonDiffer):
diff_type = "coldhot-jsondiff-selfcontained"
def diff_worker_new_vs_old(id_list_new, old_db_col_names, new_db_col_names,
batch_num, diff_folder, diff_func, exclude=[], selfcontained=False):
new = create_backend(new_db_col_names,follow_ref=True)
old = create_backend(old_db_col_names,follow_ref=True)
docs_common = old.mget_from_ids(id_list_new)
ids_common = [_doc['_id'] for _doc in docs_common]
id_in_new = list(set(id_list_new) - set(ids_common))
_updates = []
if len(ids_common) > 0:
_updates = diff_func(old, new, list(ids_common), exclude_attrs=exclude)
file_name = os.path.join(diff_folder,"%s.pyobj" % str(batch_num))
_result = {'add': id_in_new,
'update': _updates,
'delete': [],
'source': new.target_name,
'timestamp': get_timestamp()}
if selfcontained:
# consume generator as result will be pickled
_result["add"] = [d for d in new.mget_from_ids(id_in_new)]
summary = {"add" : len(id_in_new), "update" : len(_updates), "delete" : 0}
if len(_updates) != 0 or len(id_in_new) != 0:
dump(_result, file_name)
# compute md5 so when downloaded, users can check integreity
md5 = md5sum(file_name)
summary["diff_file"] = {
"name" : os.path.basename(file_name),
"md5sum" : md5,
"size" : os.stat(file_name).st_size
}
return summary
def diff_worker_old_vs_new(id_list_old, new_db_col_names, batch_num, diff_folder):
new = create_backend(new_db_col_names,follow_ref=True)
docs_common = new.mget_from_ids(id_list_old)
ids_common = [_doc['_id'] for _doc in docs_common]
id_in_old = list(set(id_list_old)-set(ids_common))
file_name = os.path.join(diff_folder,"%s.pyobj" % str(batch_num))
_result = {'delete': id_in_old,
'add': [],
'update': [],
'source': new.target_name,
'timestamp': get_timestamp()}
summary = {"add" : 0, "update": 0, "delete" : len(id_in_old)}
if len(id_in_old) != 0:
dump(_result, file_name)
# compute md5 so when downloaded, users can check integreity
md5 = md5sum(file_name)
summary["diff_file"] = {
"name" : os.path.basename(file_name),
"md5sum" : md5,
"size" : os.stat(file_name).st_size
}
return summary
def diff_worker_count(id_list, db_col_names, batch_num):
col = create_backend(db_col_names,follow_ref=True)
docs = col.mget_from_ids(id_list)
res = {}
for doc in docs:
for k in doc:
res.setdefault(k,0)
res[k] += 1
return res
class DiffReportRendererBase(object):
def __init__(self,
max_reported_ids=None,
max_randomly_picked=None,
detailed=False):
self.max_reported_ids = max_reported_ids or hasattr(btconfig,"MAX_REPORTED_IDS") and \
btconfig.MAX_REPORTED_IDS or 1000
self.max_randomly_picked = max_randomly_picked or hasattr(btconfig,"MAX_RANDOMLY_PICKED") and \
btconfig.MAX_RANDOMLY_PICKED or 10
self.detailed = detailed
def save(self,report,filename):
"""
Save report output (rendered) into filename
"""
raise NotImplementedError("implement me")
class DiffReportTxt(DiffReportRendererBase):
def save(self, report, filename="report.txt"):
try:
import prettytable
except ImportError:
raise ImportError("Please install prettytable to use this rendered")
def build_id_table(subreport):
if self.detailed:
table = prettytable.PrettyTable(["IDs","Root keys"])
table.align["IDs"] = "l"
table.align["Root keys"] = "l"
else:
table = prettytable.PrettyTable(["IDs"])
table.align["IDs"] = "l"
if subreport["count"] <= self.max_reported_ids:
ids = subreport["ids"]
else:
ids = [random.choice(subreport["ids"]) for i in range(self.max_reported_ids)]
for dat in ids:
if self.detailed:
# list of [_id,[keys]]
table.add_row([dat[0],", ".join(dat[1])])
else:
table.add_row([dat])
return table
txt = ""
title = "Diff report (generated on %s)" % datetime.now()
txt += title + "\n"
txt += "".join(["="] * len(title)) + "\n"
txt += "\n"
txt += "Metadata\n"
txt += "--------\n"
if report.get("metadata",{}):
txt += "Old collection: %s\n" % repr(report["metadata"].get("old"))
txt += "New collection: %s\n" % repr(report["metadata"].get("new"))
txt += "Batch size: %s\n" % report["metadata"]["diff"]["info"].get("batch_size")
txt += "Steps: %s\n" % report["metadata"]["diff"]["info"].get("steps")
txt += "Key(s) excluded: %s\n" % report["metadata"]["diff"]["info"].get("exclude")
txt += "Diff generated on: %s\n" % report["metadata"]["diff"]["info"].get("generated_on")
else:
txt+= "No metadata found in report\n"
txt += "\n"
txt += "Summary\n"
txt += "-------\n"
txt += "Added documents: %s\n" % report["added"]["count"]
txt += "Deleted documents: %s\n" % report["deleted"]["count"]
txt += "Updated documents: %s\n" % report["updated"]["count"]
txt += "\n"
root_keys = report.get("metadata",{}).get("diff",{}).get("stats",{}).get("root_keys",{})
if root_keys:
for src in sorted(root_keys):
txt += "%s: %s\n" % (src,root_keys[src])
else:
txt += "No root keys count found in report\n"
txt += "\n"
txt += "Added documents (%s randomly picked from report)\n" % self.max_reported_ids
txt += "------------------------------------------------\n"
if report["added"]["count"]:
table = build_id_table(report["added"])
txt += table.get_string()
txt += "\n"
else:
txt += "No added document found in report\n"
txt += "\n"
txt += "Deleted documents (%s randomly picked from report)\n" % self.max_reported_ids
txt += "--------------------------------------------------\n"
if report["deleted"]["count"]:
table = build_id_table(report["deleted"])
txt += table.get_string()
txt += "\n"
else:
txt += "No deleted document found in report\n"
txt += "\n"
txt += "Updated documents (%s examples randomly picked from report)\n" % self.max_randomly_picked
txt += "-----------------------------------------------------------\n"
txt += "\n"
for op in sorted(report["updated"]):
if op == "count":
continue # already displayed
if report["updated"][op]:
table = prettytable.PrettyTable([op,"Count","Examples"])
table.sortby = "Count"
table.reversesort = True
table.align[op] = "l"
table.align["Count"] = "r"
table.align["Examples"] = "l"
for path in report["updated"][op]:
info = report["updated"][op][path]
row = [path,info["count"]]
if info["count"] <= self.max_randomly_picked:
row.append(", ".join(info["ids"]))
else:
row.append(", ".join([random.choice(info["ids"]) for i in range(self.max_randomly_picked)]))
table.add_row(row)
txt += table.get_string()
txt += "\n"
else:
txt += "No content found for diff operation '%s'\n" % op
txt += "\n"
txt += "\n"
with open(os.path.join(report["diff_folder"],filename),"w") as fout:
fout.write(txt)
return txt
class DifferManager(BaseManager):
def __init__(self, poll_schedule=None, *args,**kwargs):
"""
DifferManager deals with the different differ objects used to create and
analyze diff between datasources.
"""
super(DifferManager,self).__init__(*args,**kwargs)
self.log_folder = btconfig.LOG_FOLDER
self.timestamp = datetime.now()
self.poll_schedule = poll_schedule
self.setup_log()
def clean_stale_status(self):
src_build = get_src_build()
for build in src_build.find():
for job in build.get("jobs",[]):
if job.get("status") == "diffing":
logging.warning("Found stale build '%s', marking diff status as 'canceled'" % build["_id"])
job["status"] = "canceled"
src_build.replace_one({"_id":build["_id"]},build)
def register_differ(self,klass):
if klass.diff_type == None:
raise DifferException("diff_type must be defined in %s" % klass)
self.register[klass.diff_type] = partial(klass,log_folder=btconfig.LOG_FOLDER,
job_manager=self.job_manager)
def configure(self, partial_differs=[JsonDiffer,SelfContainedJsonDiffer]):
for pdiffer in partial_differs:
self.register_differ(pdiffer)
def setup_log(self):
self.logger, self.logfile = get_logger('diffmanager')
def get_predicates(self):
def no_other_diffmanager_step_running(job_manager):
"""DiffManager deals with diff report, release note, publishing,
none of them should run more than one at a time"""
# Note: report output is part a publish_diff, release_note is impacted by diff content,
# overall we keep things simple and don't allow more than one diff manager job to run
# at the same time
return len([j for j in job_manager.jobs.values() if j["category"] == DIFFMANAGER_CATEGORY]) == 0
return [no_other_diffmanager_step_running]
def get_pinfo(self):
"""
Return dict containing information about the current process
(used to report in the hub)
"""
pinfo = {"category" : DIFFMANAGER_CATEGORY,
"source" : "",
"step" : "",
"description" : ""}
preds = self.get_predicates()
if preds:
pinfo["__predicates__"] = preds
return pinfo
def __getitem__(self,diff_type):
"""
Return an instance of a builder for the build named 'build_name'
Note: each call returns a different instance (factory call behind the scene...)
"""
# we'll get a partial class but will return an instance
pclass = BaseManager.__getitem__(self,diff_type)
return pclass()
def diff(self, diff_type, old, new, batch_size=100000, steps=["content","mapping","reduce","post"],
mode=None, exclude=["_timestamp"]):
"""
Run a diff to compare old vs. new collections. using differ algorithm diff_type. Results are stored in
a diff folder.
Steps can be passed to choose what to do:
- count: will count root keys in new collections and stores them as statistics.
- content: will diff the content between old and new. Results (diff files) format depends on diff_type
"""
# Note: _timestamp is excluded by default since it's an internal field (and exists in mongo doc,
# but not in ES "_source" document (there's a root timestamp under control of
# _timestamp : {enable:true} in mapping
try:
differ = self[diff_type]
old = old or get_previous_collection(new)
job = differ.diff(old, new,
batch_size=batch_size,
steps=steps,
mode=mode,
exclude=exclude)
def diffed(f):
try:
res = f.result()
set_pending_to_release_note(new)
except Exception as e:
self.logger.error("Error during diff: %s" % e)
raise
job.add_done_callback(diffed)
return job
except KeyError as e:
raise DifferException("No such differ '%s' (error: %s)" % (diff_type,e))
def diff_report(self, old_db_col_names, new_db_col_names, report_filename="report.txt", format="txt", detailed=True,
max_reported_ids=None, max_randomly_picked=None, mode=None):
max_reported_ids = max_reported_ids or hasattr(btconfig,"MAX_REPORTED_IDS") and \
btconfig.MAX_REPORTED_IDS or 1000
max_randomly_picked = max_randomly_picked or hasattr(btconfig,"MAX_RANDOMLY_PICKED") and \
btconfig.MAX_RANDOMLY_PICKED or 10
def do():
if mode == "purge" or not os.path.exists(reportfilepath):
assert format == "txt", "Only 'txt' format supported for now"
report = self.build_diff_report(diff_folder, detailed, max_reported_ids)
render = DiffReportTxt(max_reported_ids=max_reported_ids,
max_randomly_picked=max_randomly_picked,
detailed=detailed)
return render.save(report,report_filename)
else:
self.logger.debug("Report already generated, now using it")
return open(reportfilepath).read()
@asyncio.coroutine
def main(diff_folder):
got_error = False
pinfo = self.get_pinfo()
pinfo["step"] = "report"
pinfo["source"] = diff_folder
pinfo["description"] = report_filename
job = yield from self.job_manager.defer_to_thread(pinfo,do)
def reported(f):
nonlocal got_error
try:
res = f.result()
self.logger.info("Diff report ready, saved in %s" % reportfilepath,extra={"notify":True,"attach":reportfilepath})
except Exception as e:
got_error = e
job.add_done_callback(reported)
yield from job
if got_error:
self.logger.exception("Failed to create diff report: %s" % got_error,extra={"notify":True})
raise got_error
diff_folder = generate_folder(btconfig.DIFF_PATH,old_db_col_names,new_db_col_names)
reportfilepath = os.path.join(diff_folder,report_filename)
job = asyncio.ensure_future(main(diff_folder))
return job
def build_diff_report(self, diff_folder, detailed=True, max_reported_ids=None):
"""
Analyze diff files in diff_folder and give a summy of changes.
max_reported_ids is the number of IDs contained in the report for each part.
detailed will trigger a deeper analysis, takes more time.
"""
max_reported_ids = max_reported_ids or hasattr(btconfig,"MAX_REPORTED_IDS") and \
btconfig.MAX_REPORTED_IDS or 1000
update_details = {
"add": {},# "count": 0, "data": {} },
"remove": {}, # "count": 0, "data": {} },
"replace": {}, # "count": 0, "data": {} },
"move": {}, # "count": 0, "data": {} },
"count": 0,
}
adds = {"count": 0, "ids": []}
dels = {"count": 0, "ids": []}
sources = {}
if os.path.isabs(diff_folder):
data_folder = diff_folder
else:
data_folder = os.path.join(btconfig.DIFF_PATH,diff_folder)
metadata = {}
try:
metafile = os.path.join(data_folder,"metadata.json")
metadata = json.load(open(metafile))
except FileNotFoundError:
logging.warning("Not metadata found in diff folder")
if detailed:
raise Exception("Can't perform detailed analysis without a metadata file")
def analyze(diff_file, detailed):
data = loadobj(diff_file)
sources[data["source"]] = 1
if detailed:
# TODO: if self-contained, no db connection needed
new_col = create_backend(metadata["new"]["backend"],follow_ref=True)
old_col = create_backend(metadata["old"]["backend"],follow_ref=True)
if len(adds["ids"]) < max_reported_ids:
if detailed:
# look for which root keys were added in new collection
for _id in data["add"]:
# selfcontained = dict for whole doc (see TODO above)
if type(_id) == dict:
_id = _id["_id"]
doc = new_col.get_from_id(_id)
rkeys = sorted(doc.keys())
adds["ids"].append([_id,rkeys])
else:
if data["add"] and type(data["add"][0]) == dict:
adds["ids"].extend([d["_id"] for d in data["add"]])
else:
adds["ids"].extend(data["add"])
adds["count"] += len(data["add"])
if len(dels["ids"]) < max_reported_ids:
if detailed:
# look for which root keys were deleted in old collection
for _id in data["delete"]:
doc = old_col.get_from_id(_id)
rkeys = sorted(doc.keys())
dels["ids"].append([_id,rkeys])
else:
dels["ids"].extend(data["delete"])
dels["count"] += len(data["delete"])
for up in data["update"]:
for patch in up["patch"]:
update_details[patch["op"]].setdefault(patch["path"],{"count": 0, "ids": []})
if len(update_details[patch["op"]][patch["path"]]["ids"]) < max_reported_ids:
update_details[patch["op"]][patch["path"]]["ids"].append(up["_id"])
update_details[patch["op"]][patch["path"]]["count"] += 1
update_details["count"] += len(data["update"])
assert len(sources) == 1, "Should have one datasource from diff files, got: %s" % [s for s in sources]
# we randomize files order b/c we randomly pick some examples from those
# files. If files contains data in order (like chrom 1, then chrom 2)
# we won't have a representative sample
files = glob.glob(os.path.join(data_folder,"*.pyobj"))
random.shuffle(files)
total = len(files)
for i,f in enumerate(files):
if os.path.basename(f).startswith("mapping"):
logging.debug("Skip mapping file")
continue
logging.info("Running report worker for '%s' (%d/%d)" % (f,i+1,total))
analyze(f, detailed)
return {"added" : adds, "deleted": dels, "updated" : update_details,
"diff_folder" : diff_folder, "detailed": detailed,
"metadata": metadata}
def poll(self,state,func):
super(DifferManager,self).poll(state,func,col=get_src_build())
def trigger_diff(self,diff_type,doc,**kwargs):
"""
Launch a diff given a src_build document. In order to
know the first collection to diff against, get_previous_collection()
method is used.
"""
new_db_col_names = doc["_id"]
old_db_col_names = get_previous_collection(new_db_col_names)
self.diff(diff_type, old_db_col_names, new_db_col_names, **kwargs)
def rebuild_diff_file_list(self,diff_folder):
diff_files = glob.glob(os.path.join(diff_folder,"*.pyobj"))
metadata = json.load(open(os.path.join(diff_folder,"metadata.json")))
try:
metadata["diff"]["files"] = []
if not "mapping_file" in metadata["diff"]:
metadata["diff"]["mapping_file"] = None
for diff_file in diff_files:
name = os.path.basename(diff_file)
md5 = md5sum(diff_file)
info = {"md5sum" : md5,"name" : name,"size" : os.stat(diff_file).st_size}
if "mapping" in diff_file: # dirty...
metadata["diff"]["mapping"] = info
else:
metadata["diff"]["files"].append(info)
json.dump(metadata,open(os.path.join(diff_folder,"metadata.json"),"w"),indent=True)
self.logger.info("Successfully rebuild diff_files list with all files found in %s" % diff_folder)
except KeyError as e:
self.logging.error("Metadata is too much damaged, can't rebuild diff_files list: %s" % e)
def diff_info(self):
dtypes = self.register.keys()
res = {}
for typ in dtypes:
res[typ] = {}
return res
def reduce_diffs(diffs, num, diff_folder, done_folder):
assert diffs
res = []
fn = "diff_%s.pyobj" % num
logging.info("Merging %s => %s" % ([os.path.basename(f) for f in diffs],fn))
if len(diffs) == 1:
# just rename
outf = os.path.join(diff_folder,fn)
shutil.copyfile(diffs[0],outf)
res.append({"name":fn,"md5sum":md5sum(outf),"size" : os.stat(outf).st_size})
os.rename(diffs[0],os.path.join(done_folder,os.path.basename(diffs[0])))
return res
merged = loadobj(diffs[0])
os.rename(diffs[0],os.path.join(done_folder,os.path.basename(diffs[0])))
for diff_fn in diffs[1:]:
diff = loadobj(diff_fn)
assert merged["source"] == diff["source"]
for k in ["add","delete","update"]:
merged[k].extend(diff[k])
os.rename(diff_fn,os.path.join(done_folder,os.path.basename(diff_fn)))
dump(merged,os.path.join(diff_folder,fn),compress="lzma")
file_name = os.path.join(diff_folder,fn)
res.append({"name":fn,"md5sum":md5sum(file_name),"size" : os.stat(file_name).st_size})
return res
def set_pending_to_diff(col_name):
src_build = get_src_build()
src_build.update({"_id":col_name},{"$addToSet" : {"pending":"diff"} })
| 47.495622
| 173
| 0.556895
|
3e89a4be790a68625982bd59ec8d0dac460424e6
| 109,762
|
py
|
Python
|
sympy/core/expr.py
|
nishithshah2211/sympy
|
edc620ca662f7163637c7fb5823f22523b7f2fe9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/expr.py
|
nishithshah2211/sympy
|
edc620ca662f7163637c7fb5823f22523b7f2fe9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/expr.py
|
nishithshah2211/sympy
|
edc620ca662f7163637c7fb5823f22523b7f2fe9
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
from .core import C
from .sympify import sympify, _sympify, SympifyError
from .basic import Basic, Atom
from .singleton import S
from .evalf import EvalfMixin, pure_complex
from .decorators import _sympifyit, call_highest_priority
from .cache import cacheit
from .compatibility import reduce, as_int, default_sort_key, range
from mpmath.libmp import mpf_log, prec_to_dps
from collections import defaultdict
class Expr(Basic, EvalfMixin):
"""
Base class for algebraic expressions.
Everything that requires arithmetic operations to be defined
should subclass this class, instead of Basic (which should be
used only for argument storage and expression manipulation, i.e.
pattern matching, substitutions, etc).
See Also
========
sympy.core.basic.Basic
"""
__slots__ = []
@property
def _diff_wrt(self):
"""Is it allowed to take derivative wrt to this instance.
This determines if it is allowed to take derivatives wrt this object.
Subclasses such as Symbol, Function and Derivative should return True
to enable derivatives wrt them. The implementation in Derivative
separates the Symbol and non-Symbol _diff_wrt=True variables and
temporarily converts the non-Symbol vars in Symbols when performing
the differentiation.
Note, see the docstring of Derivative for how this should work
mathematically. In particular, note that expr.subs(yourclass, Symbol)
should be well-defined on a structural level, or this will lead to
inconsistent results.
Examples
========
>>> from sympy import Expr
>>> e = Expr()
>>> e._diff_wrt
False
>>> class MyClass(Expr):
... _diff_wrt = True
...
>>> (2*MyClass()).diff(MyClass())
2
"""
return False
@cacheit
def sort_key(self, order=None):
coeff, expr = self.as_coeff_Mul()
if expr.is_Pow:
expr, exp = expr.args
else:
expr, exp = expr, S.One
if expr.is_Dummy:
args = (expr.sort_key(),)
elif expr.is_Atom:
args = (str(expr),)
else:
if expr.is_Add:
args = expr.as_ordered_terms(order=order)
elif expr.is_Mul:
args = expr.as_ordered_factors(order=order)
else:
args = expr.args
args = tuple(
[ default_sort_key(arg, order=order) for arg in args ])
args = (len(args), tuple(args))
exp = exp.sort_key(order=order)
return expr.class_key(), args, exp, coeff
# ***************
# * Arithmetics *
# ***************
# Expr and its sublcasses use _op_priority to determine which object
# passed to a binary special method (__mul__, etc.) will handle the
# operation. In general, the 'call_highest_priority' decorator will choose
# the object with the highest _op_priority to handle the call.
# Custom subclasses that want to define their own binary special methods
# should set an _op_priority value that is higher than the default.
#
# **NOTE**:
# This is a temporary fix, and will eventually be replaced with
# something better and more powerful. See issue 5510.
_op_priority = 10.0
def __pos__(self):
return self
def __neg__(self):
return Mul(S.NegativeOne, self)
def __abs__(self):
return C.Abs(self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__radd__')
def __add__(self, other):
return Add(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__add__')
def __radd__(self, other):
return Add(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rsub__')
def __sub__(self, other):
return Add(self, -other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__sub__')
def __rsub__(self, other):
return Add(other, -self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmul__')
def __mul__(self, other):
return Mul(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mul__')
def __rmul__(self, other):
return Mul(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rpow__')
def __pow__(self, other):
return Pow(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__pow__')
def __rpow__(self, other):
return Pow(other, self)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rdiv__')
def __div__(self, other):
return Mul(self, Pow(other, S.NegativeOne))
@_sympifyit('other', NotImplemented)
@call_highest_priority('__div__')
def __rdiv__(self, other):
return Mul(other, Pow(self, S.NegativeOne))
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('other', NotImplemented)
@call_highest_priority('__rmod__')
def __mod__(self, other):
return Mod(self, other)
@_sympifyit('other', NotImplemented)
@call_highest_priority('__mod__')
def __rmod__(self, other):
return Mod(other, self)
def __int__(self):
# Although we only need to round to the units position, we'll
# get one more digit so the extra testing below can be avoided
# unless the rounded value rounded to an integer, e.g. if an
# expression were equal to 1.9 and we rounded to the unit position
# we would get a 2 and would not know if this rounded up or not
# without doing a test (as done below). But if we keep an extra
# digit we know that 1.9 is not the same as 1 and there is no
# need for further testing: our int value is correct. If the value
# were 1.99, however, this would round to 2.0 and our int value is
# off by one. So...if our round value is the same as the int value
# (regardless of how much extra work we do to calculate extra decimal
# places) we need to test whether we are off by one.
r = self.round(2)
if not r.is_Number:
raise TypeError("can't convert complex to int")
if r in (S.NaN, S.Infinity, S.NegativeInfinity):
raise TypeError("can't convert %s to int" % r)
i = int(r)
if not i:
return 0
# off-by-one check
if i == r and not (self - i).equals(0):
isign = 1 if i > 0 else -1
x = C.Dummy()
# in the following (self - i).evalf(2) will not always work while
# (self - r).evalf(2) and the use of subs does; if the test that
# was added when this comment was added passes, it might be safe
# to simply use sign to compute this rather than doing this by hand:
diff_sign = 1 if (self - x).evalf(2, subs={x: i}) > 0 else -1
if diff_sign != isign:
i -= isign
return i
__long__ = __int__
def __float__(self):
# Don't bother testing if it's a number; if it's not this is going
# to fail, and if it is we still need to check that it evalf'ed to
# a number.
result = self.evalf()
if result.is_Number:
return float(result)
if result.is_number and result.as_real_imag()[1]:
raise TypeError("can't convert complex to float")
raise TypeError("can't convert expression to float")
def __complex__(self):
result = self.evalf()
re, im = result.as_real_imag()
return complex(float(re), float(im))
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
for me in (self, other):
if me.is_complex and me.is_real is False:
raise TypeError("Invalid comparison of complex %s" % me)
if self.is_real and other.is_real:
dif = self - other
if dif.is_nonnegative is not None and \
dif.is_nonnegative is not dif.is_negative:
return sympify(dif.is_nonnegative)
return C.GreaterThan(self, other, evaluate=False)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
for me in (self, other):
if me.is_complex and me.is_real is False:
raise TypeError("Invalid comparison of complex %s" % me)
if self.is_real and other.is_real:
dif = self - other
if dif.is_nonpositive is not None and \
dif.is_nonpositive is not dif.is_positive:
return sympify(dif.is_nonpositive)
return C.LessThan(self, other, evaluate=False)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
for me in (self, other):
if me.is_complex and me.is_real is False:
raise TypeError("Invalid comparison of complex %s" % me)
if self.is_real and other.is_real:
dif = self - other
if dif.is_positive is not None and \
dif.is_positive is not dif.is_nonpositive:
return sympify(dif.is_positive)
return C.StrictGreaterThan(self, other, evaluate=False)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
for me in (self, other):
if me.is_complex and me.is_real is False:
raise TypeError("Invalid comparison of complex %s" % me)
if self.is_real and other.is_real:
dif = self - other
if dif.is_negative is not None and \
dif.is_negative is not dif.is_nonnegative:
return sympify(dif.is_negative)
return C.StrictLessThan(self, other, evaluate=False)
@staticmethod
def _from_mpmath(x, prec):
if hasattr(x, "_mpf_"):
return C.Float._new(x._mpf_, prec)
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
re = C.Float._new(re, prec)
im = C.Float._new(im, prec)*S.ImaginaryUnit
return re + im
else:
raise TypeError("expected mpmath number (mpf or mpc)")
@property
def is_number(self):
"""Returns True if 'self' has no free symbols.
It will be faster than `if not self.free_symbols`, however, since
`is_number` will fail as soon as it hits a free symbol.
Examples
========
>>> from sympy import log, Integral
>>> from sympy.abc import x
>>> x.is_number
False
>>> (2*x).is_number
False
>>> (2 + log(2)).is_number
True
>>> (2 + Integral(2, x)).is_number
False
>>> (2 + Integral(2, (x, 1, 2))).is_number
True
"""
return all(obj.is_number for obj in self.args)
def _random(self, n=None, re_min=-1, im_min=-1, re_max=1, im_max=1):
"""Return self evaluated, if possible, replacing free symbols with
random complex values, if necessary.
The random complex value for each free symbol is generated
by the random_complex_number routine giving real and imaginary
parts in the range given by the re_min, re_max, im_min, and im_max
values. The returned value is evaluated to a precision of n
(if given) else the maximum of 15 and the precision needed
to get more than 1 digit of precision. If the expression
could not be evaluated to a number, or could not be evaluated
to more than 1 digit of precision, then None is returned.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y
>>> x._random() # doctest: +SKIP
0.0392918155679172 + 0.916050214307199*I
>>> x._random(2) # doctest: +SKIP
-0.77 - 0.87*I
>>> (x + y/2)._random(2) # doctest: +SKIP
-0.57 + 0.16*I
>>> sqrt(2)._random(2)
1.4
See Also
========
sympy.utilities.randtest.random_complex_number
"""
free = self.free_symbols
prec = 1
if free:
from sympy.utilities.randtest import random_complex_number
a, c, b, d = re_min, re_max, im_min, im_max
reps = dict(list(zip(free, [random_complex_number(a, b, c, d, rational=True)
for zi in free])))
try:
nmag = abs(self.evalf(2, subs=reps))
except (ValueError, TypeError):
# if an out of range value resulted in evalf problems
# then return None -- XXX is there a way to know how to
# select a good random number for a given expression?
# e.g. when calculating n! negative values for n should not
# be used
return None
else:
reps = {}
nmag = abs(self.evalf(2))
if not hasattr(nmag, '_prec'):
# e.g. exp_polar(2*I*pi) doesn't evaluate but is_number is True
return None
if nmag._prec == 1:
# increase the precision up to the default maximum
# precision to see if we can get any significance
from mpmath.libmp.libintmath import giant_steps
from sympy.core.evalf import DEFAULT_MAXPREC as target
# evaluate
for prec in giant_steps(2, target):
nmag = abs(self.evalf(prec, subs=reps))
if nmag._prec != 1:
break
if nmag._prec != 1:
if n is None:
n = max(prec, 15)
return self.evalf(n, subs=reps)
# never got any significance
return None
def is_constant(self, *wrt, **flags):
"""Return True if self is constant, False if not, or None if
the constancy could not be determined conclusively.
If an expression has no free symbols then it is a constant. If
there are free symbols it is possible that the expression is a
constant, perhaps (but not necessarily) zero. To test such
expressions, two strategies are tried:
1) numerical evaluation at two random points. If two such evaluations
give two different values and the values have a precision greater than
1 then self is not constant. If the evaluations agree or could not be
obtained with any precision, no decision is made. The numerical testing
is done only if ``wrt`` is different than the free symbols.
2) differentiation with respect to variables in 'wrt' (or all free
symbols if omitted) to see if the expression is constant or not. This
will not always lead to an expression that is zero even though an
expression is constant (see added test in test_expr.py). If
all derivatives are zero then self is constant with respect to the
given symbols.
If neither evaluation nor differentiation can prove the expression is
constant, None is returned unless two numerical values happened to be
the same and the flag ``failing_number`` is True -- in that case the
numerical value will be returned.
If flag simplify=False is passed, self will not be simplified;
the default is True since self should be simplified before testing.
Examples
========
>>> from sympy import cos, sin, Sum, S, pi
>>> from sympy.abc import a, n, x, y
>>> x.is_constant()
False
>>> S(2).is_constant()
True
>>> Sum(x, (x, 1, 10)).is_constant()
True
>>> Sum(x, (x, 1, n)).is_constant()
False
>>> Sum(x, (x, 1, n)).is_constant(y)
True
>>> Sum(x, (x, 1, n)).is_constant(n)
False
>>> Sum(x, (x, 1, n)).is_constant(x)
True
>>> eq = a*cos(x)**2 + a*sin(x)**2 - a
>>> eq.is_constant()
True
>>> eq.subs({x:pi, a:2}) == eq.subs({x:pi, a:3}) == 0
True
>>> (0**x).is_constant()
False
>>> x.is_constant()
False
>>> (x**x).is_constant()
False
>>> one = cos(x)**2 + sin(x)**2
>>> one.is_constant()
True
>>> ((one - 1)**(x + 1)).is_constant() in (True, False) # could be 0 or 1
True
"""
simplify = flags.get('simplify', True)
# Except for expressions that contain units, only one of these should
# be necessary since if something is
# known to be a number it should also know that there are no
# free symbols. But is_number quits as soon as it hits a non-number
# whereas free_symbols goes until all free symbols have been collected,
# thus is_number should be faster. But a double check on free symbols
# is made just in case there is a discrepancy between the two.
free = self.free_symbols
if self.is_number or not free:
# if the following assertion fails then that object's free_symbols
# method needs attention: if an expression is a number it cannot
# have free symbols
assert not free
return True
# if we are only interested in some symbols and they are not in the
# free symbols then this expression is constant wrt those symbols
wrt = set(wrt)
if wrt and not wrt & free:
return True
wrt = wrt or free
# simplify unless this has already been done
expr = self
if simplify:
expr = expr.simplify()
# is_zero should be a quick assumptions check; it can be wrong for
# numbers (see test_is_not_constant test), giving False when it
# shouldn't, but hopefully it will never give True unless it is sure.
if expr.is_zero:
return True
# try numerical evaluation to see if we get two different values
failing_number = None
if wrt == free:
# try 0 (for a) and 1 (for b)
try:
a = expr.subs(list(zip(free, [0]*len(free))),
simultaneous=True)
if a is S.NaN:
# evaluation may succeed when substitution fails
a = expr._random(None, 0, 0, 0, 0)
except ZeroDivisionError:
a = None
if a is not None and a is not S.NaN:
try:
b = expr.subs(list(zip(free, [1]*len(free))),
simultaneous=True)
if b is S.NaN:
# evaluation may succeed when substitution fails
b = expr._random(None, 1, 0, 1, 0)
except ZeroDivisionError:
b = None
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random real
b = expr._random(None, -1, 0, 1, 0)
if b is not None and b is not S.NaN and b.equals(a) is False:
return False
# try random complex
b = expr._random()
if b is not None and b is not S.NaN:
if b.equals(a) is False:
return False
failing_number = a if a.is_number else b
# now we will test each wrt symbol (or all free symbols) to see if the
# expression depends on them or not using differentiation. This is
# not sufficient for all expressions, however, so we don't return
# False if we get a derivative other than 0 with free symbols.
for w in wrt:
deriv = expr.diff(w)
if simplify:
deriv = deriv.simplify()
if deriv != 0:
if not (deriv.is_Number or pure_complex(deriv)):
if flags.get('failing_number', False):
return failing_number
elif deriv.free_symbols:
# dead line provided _random returns None in such cases
return None
return False
return True
def equals(self, other, failing_expression=False):
"""Return True if self == other, False if it doesn't, or None. If
failing_expression is True then the expression which did not simplify
to a 0 will be returned instead of None.
If ``self`` is a Number (or complex number) that is not zero, then
the result is False.
If ``self`` is a number and has not evaluated to zero, evalf will be
used to test whether the expression evaluates to zero. If it does so
and the result has significance (i.e. the precision is either -1, for
a Rational result, or is greater than 1) then the evalf value will be
used to return True or False.
"""
from sympy.simplify.simplify import nsimplify, simplify
from sympy.solvers.solvers import solve
from sympy.polys.polyerrors import NotAlgebraic
from sympy.polys.numberfields import minimal_polynomial
other = sympify(other)
if self == other:
return True
# they aren't the same so see if we can make the difference 0;
# don't worry about doing simplification steps one at a time
# because if the expression ever goes to 0 then the subsequent
# simplification steps that are done will be very fast.
diff = factor_terms((self - other).simplify(), radical=True)
if not diff:
return True
if not diff.has(Add, Mod):
# if there is no expanding to be done after simplifying
# then this can't be a zero
return False
constant = diff.is_constant(simplify=False, failing_number=True)
if constant is False:
return False
if constant is None and (diff.free_symbols or not diff.is_number):
# e.g. unless the right simplification is done, a symbolic
# zero is possible (see expression of issue 6829: without
# simplification constant will be None).
return
if constant is True:
ndiff = diff._random()
if ndiff:
return False
# sometimes we can use a simplified result to give a clue as to
# what the expression should be; if the expression is *not* zero
# then we should have been able to compute that and so now
# we can just consider the cases where the approximation appears
# to be zero -- we try to prove it via minimal_polynomial.
if diff.is_number:
approx = diff.nsimplify()
if not approx:
# try to prove via self-consistency
surds = [s for s in diff.atoms(Pow) if s.args[0].is_Integer]
# it seems to work better to try big ones first
surds.sort(key=lambda x: -x.args[0])
for s in surds:
try:
# simplify is False here -- this expression has already
# been identified as being hard to identify as zero;
# we will handle the checking ourselves using nsimplify
# to see if we are in the right ballpark or not and if so
# *then* the simplification will be attempted.
sol = solve(diff, s, check=False, simplify=False)
if sol:
if s in sol:
return True
if any(nsimplify(si, [s]) == s and simplify(si) == s
for si in sol):
return True
except NotImplementedError:
pass
# try to prove with minimal_polynomial but know when
# *not* to use this or else it can take a long time. e.g. issue 8354
if True: # change True to condition that assures non-hang
try:
mp = minimal_polynomial(diff)
if mp.is_Symbol:
return True
return False
except (NotAlgebraic, NotImplementedError):
pass
# diff has not simplified to zero; constant is either None, True
# or the number with significance (prec != 1) that was randomly
# calculated twice as the same value.
if constant not in (True, None) and constant != 0:
return False
if failing_expression:
return diff
return None
def _eval_is_composite(self):
if self.is_integer and self.is_positive and self.is_prime is False:
return True
def _eval_is_positive(self):
from sympy.polys.numberfields import minimal_polynomial
from sympy.polys.polyerrors import NotAlgebraic
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
if n2 == S.NaN:
raise AttributeError
except (AttributeError, ValueError):
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if n._prec != 1 and i._prec != 1:
return bool(not i and n > 0)
elif n._prec == 1 and (not i or i._prec == 1) and \
self.is_algebraic and not self.has(Function):
try:
if minimal_polynomial(self).is_Symbol:
return False
except (NotAlgebraic, NotImplementedError):
pass
def _eval_is_negative(self):
from sympy.polys.numberfields import minimal_polynomial
from sympy.polys.polyerrors import NotAlgebraic
if self.is_number:
if self.is_real is False:
return False
try:
# check to see that we can get a value
n2 = self._eval_evalf(2)
if n2 is None:
raise AttributeError
if n2._prec == 1: # no significance
raise AttributeError
if n2 == S.NaN:
raise AttributeError
except (AttributeError, ValueError):
return None
n, i = self.evalf(2).as_real_imag()
if not i.is_Number or not n.is_Number:
return False
if n._prec != 1 and i._prec != 1:
return bool(not i and n < 0)
elif n._prec == 1 and (not i or i._prec == 1) and \
self.is_algebraic and not self.has(Function):
try:
if minimal_polynomial(self).is_Symbol:
return False
except (NotAlgebraic, NotImplementedError):
pass
def _eval_interval(self, x, a, b):
"""
Returns evaluation over an interval. For most functions this is:
self.subs(x, b) - self.subs(x, a),
possibly using limit() if NaN is returned from subs.
If b or a is None, it only evaluates -self.subs(x, a) or self.subs(b, x),
respectively.
"""
from sympy.series import limit, Limit
if (a is None and b is None):
raise ValueError('Both interval ends cannot be None.')
if a is None:
A = 0
else:
A = self.subs(x, a)
if A.has(S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
A = limit(self, x, a)
if A is S.NaN:
return A
if isinstance(A, Limit):
raise NotImplementedError("Could not compute limit")
if b is None:
B = 0
else:
B = self.subs(x, b)
if B.has(S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
B = limit(self, x, b)
if isinstance(B, Limit):
raise NotImplementedError("Could not compute limit")
return B - A
def _eval_power(self, other):
# subclass to compute self**other for cases when
# other is not NaN, 0, or 1
return None
def _eval_conjugate(self):
if self.is_real:
return self
elif self.is_imaginary:
return -self
def conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
return c(self)
def _eval_transpose(self):
from sympy.functions.elementary.complexes import conjugate
if self.is_complex:
return self
elif self.is_hermitian:
return conjugate(self)
elif self.is_antihermitian:
return -conjugate(self)
def transpose(self):
from sympy.functions.elementary.complexes import transpose
return transpose(self)
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import conjugate, transpose
if self.is_hermitian:
return self
elif self.is_antihermitian:
return -self
obj = self._eval_conjugate()
if obj is not None:
return transpose(obj)
obj = self._eval_transpose()
if obj is not None:
return conjugate(obj)
def adjoint(self):
from sympy.functions.elementary.complexes import adjoint
return adjoint(self)
@classmethod
def _parse_order(cls, order):
"""Parse and configure the ordering of terms. """
from sympy.polys.orderings import monomial_key
try:
reverse = order.startswith('rev-')
except AttributeError:
reverse = False
else:
if reverse:
order = order[4:]
monom_key = monomial_key(order)
def neg(monom):
result = []
for m in monom:
if isinstance(m, tuple):
result.append(neg(m))
else:
result.append(-m)
return tuple(result)
def key(term):
_, ((re, im), monom, ncpart) = term
monom = neg(monom_key(monom))
ncpart = tuple([ e.sort_key(order=order) for e in ncpart ])
coeff = ((bool(im), im), (re, im))
return monom, ncpart, coeff
return key, reverse
def as_ordered_factors(self, order=None):
"""Return list of ordered factors (if Mul) else [self]."""
return [self]
def as_ordered_terms(self, order=None, data=False):
"""
Transform an expression to an ordered list of terms.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> (sin(x)**2*cos(x) + sin(x)**2 + 1).as_ordered_terms()
[sin(x)**2*cos(x), sin(x)**2, 1]
"""
key, reverse = self._parse_order(order)
terms, gens = self.as_terms()
if not any(term.is_Order for term, _ in terms):
ordered = sorted(terms, key=key, reverse=reverse)
else:
_terms, _order = [], []
for term, repr in terms:
if not term.is_Order:
_terms.append((term, repr))
else:
_order.append((term, repr))
ordered = sorted(_terms, key=key, reverse=True) \
+ sorted(_order, key=key, reverse=True)
if data:
return ordered, gens
else:
return [ term for term, _ in ordered ]
def as_terms(self):
"""Transform an expression to a list of terms. """
from sympy.core import Add, Mul, S
from sympy.core.exprtools import decompose_power
gens, terms = set([]), []
for term in Add.make_args(self):
coeff, _term = term.as_coeff_Mul()
coeff = complex(coeff)
cpart, ncpart = {}, []
if _term is not S.One:
for factor in Mul.make_args(_term):
if factor.is_number:
try:
coeff *= complex(factor)
except TypeError:
pass
else:
continue
if factor.is_commutative:
base, exp = decompose_power(factor)
cpart[base] = exp
gens.add(base)
else:
ncpart.append(factor)
coeff = coeff.real, coeff.imag
ncpart = tuple(ncpart)
terms.append((term, (coeff, cpart, ncpart)))
gens = sorted(gens, key=default_sort_key)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
result = []
for term, (coeff, cpart, ncpart) in terms:
monom = [0]*k
for base, exp in cpart.items():
monom[indices[base]] = exp
result.append((term, (coeff, tuple(monom), ncpart)))
return result, gens
def removeO(self):
"""Removes the additive O(..) symbol if there is one"""
return self
def getO(self):
"""Returns the additive O(..) symbol if there is one, else None."""
return None
def getn(self):
"""
Returns the order of the expression.
The order is determined either from the O(...) term. If there
is no O(...) term, it returns None.
Examples
========
>>> from sympy import O
>>> from sympy.abc import x
>>> (1 + x + O(x**2)).getn()
2
>>> (1 + x).getn()
"""
o = self.getO()
if o is None:
return None
elif o.is_Order:
o = o.expr
if o is S.One:
return S.Zero
if o.is_Symbol:
return S.One
if o.is_Pow:
return o.args[1]
if o.is_Mul: # x**n*log(x)**n or x**n/log(x)**n
for oi in o.args:
if oi.is_Symbol:
return S.One
if oi.is_Pow:
syms = oi.atoms(C.Symbol)
if len(syms) == 1:
x = syms.pop()
oi = oi.subs(x, C.Dummy('x', positive=True))
if oi.base.is_Symbol and oi.exp.is_Rational:
return abs(oi.exp)
raise NotImplementedError('not sure of order of %s' % o)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from .function import count_ops
return count_ops(self, visual)
def args_cnc(self, cset=False, warn=True, split_1=True):
"""Return [commutative factors, non-commutative factors] of self.
self is treated as a Mul and the ordering of the factors is maintained.
If ``cset`` is True the commutative factors will be returned in a set.
If there were repeated factors (as may happen with an unevaluated Mul)
then an error will be raised unless it is explicitly supressed by
setting ``warn`` to False.
Note: -1 is always separated from a Number unless split_1 is False.
>>> from sympy import symbols, oo
>>> A, B = symbols('A B', commutative=0)
>>> x, y = symbols('x y')
>>> (-2*x*y).args_cnc()
[[-1, 2, x, y], []]
>>> (-2.5*x).args_cnc()
[[-1, 2.5, x], []]
>>> (-2*x*A*B*y).args_cnc()
[[-1, 2, x, y], [A, B]]
>>> (-2*x*A*B*y).args_cnc(split_1=False)
[[-2, x, y], [A, B]]
>>> (-2*x*y).args_cnc(cset=True)
[set([-1, 2, x, y]), []]
The arg is always treated as a Mul:
>>> (-2 + x + A).args_cnc()
[[], [x - 2 + A]]
>>> (-oo).args_cnc() # -oo is a singleton
[[-1, oo], []]
"""
if self.is_Mul:
args = list(self.args)
else:
args = [self]
for i, mi in enumerate(args):
if not mi.is_commutative:
c = args[:i]
nc = args[i:]
break
else:
c = args
nc = []
if c and split_1 and (
c[0].is_Number and
c[0].is_negative and
c[0] is not S.NegativeOne):
c[:1] = [S.NegativeOne, -c[0]]
if cset:
clen = len(c)
c = set(c)
if clen and warn and len(c) != clen:
raise ValueError('repeated commutative arguments: %s' %
[ci for ci in c if list(self.args).count(ci) > 1])
return [c, nc]
def coeff(self, x, n=1, right=False):
"""
Returns the coefficient from the term(s) containing ``x**n`` or None. If ``n``
is zero then all terms independent of ``x`` will be returned.
When x is noncommutative, the coeff to the left (default) or right of x
can be returned. The keyword 'right' is ignored when x is commutative.
See Also
========
as_coefficient: separate the expression into a coefficient and factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
Examples
========
>>> from sympy import symbols
>>> from sympy.abc import x, y, z
You can select terms that have an explicit negative in front of them:
>>> (-x + 2*y).coeff(-1)
x
>>> (x - 2*y).coeff(-1)
2*y
You can select terms with no Rational coefficient:
>>> (x + 2*y).coeff(1)
x
>>> (3 + 2*x + 4*x**2).coeff(1)
0
You can select terms independent of x by making n=0; in this case
expr.as_independent(x)[0] is returned (and 0 will be returned instead
of None):
>>> (3 + 2*x + 4*x**2).coeff(x, 0)
3
>>> eq = ((x + 1)**3).expand() + 1
>>> eq
x**3 + 3*x**2 + 3*x + 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 2]
>>> eq -= 2
>>> [eq.coeff(x, i) for i in reversed(range(4))]
[1, 3, 3, 0]
You can select terms that have a numerical term in front of them:
>>> (-x - 2*y).coeff(2)
-y
>>> from sympy import sqrt
>>> (x + sqrt(2)*x).coeff(sqrt(2))
x
The matching is exact:
>>> (3 + 2*x + 4*x**2).coeff(x)
2
>>> (3 + 2*x + 4*x**2).coeff(x**2)
4
>>> (3 + 2*x + 4*x**2).coeff(x**3)
0
>>> (z*(x + y)**2).coeff((x + y)**2)
z
>>> (z*(x + y)**2).coeff(x + y)
0
In addition, no factoring is done, so 1 + z*(1 + y) is not obtained
from the following:
>>> (x + z*(x + x*y)).coeff(x)
1
If such factoring is desired, factor_terms can be used first:
>>> from sympy import factor_terms
>>> factor_terms(x + z*(x + x*y)).coeff(x)
z*(y + 1) + 1
>>> n, m, o = symbols('n m o', commutative=False)
>>> n.coeff(n)
1
>>> (3*n).coeff(n)
3
>>> (n*m + m*n*m).coeff(n) # = (1 + m)*n*m
1 + m
>>> (n*m + m*n*m).coeff(n, right=True) # = (1 + m)*n*m
m
If there is more than one possible coefficient 0 is returned:
>>> (n*m + m*n).coeff(n)
0
If there is only one possible coefficient, it is returned:
>>> (n*m + x*m*n).coeff(m*n)
x
>>> (n*m + x*m*n).coeff(m*n, right=1)
1
"""
x = sympify(x)
if not isinstance(x, Basic):
return S.Zero
n = as_int(n)
if not x:
return S.Zero
if x == self:
if n == 1:
return S.One
return S.Zero
if x is S.One:
co = [a for a in Add.make_args(self)
if a.as_coeff_Mul()[0] is S.One]
if not co:
return S.Zero
return Add(*co)
if n == 0:
if x.is_Add and self.is_Add:
c = self.coeff(x, right=right)
if not c:
return S.Zero
if not right:
return self - Add(*[a*x for a in Add.make_args(c)])
return self - Add(*[x*a for a in Add.make_args(c)])
return self.as_independent(x, as_Add=True)[0]
# continue with the full method, looking for this power of x:
x = x**n
def incommon(l1, l2):
if not l1 or not l2:
return []
n = min(len(l1), len(l2))
for i in range(n):
if l1[i] != l2[i]:
return l1[:i]
return l1[:]
def find(l, sub, first=True):
""" Find where list sub appears in list l. When ``first`` is True
the first occurance from the left is returned, else the last
occurance is returned. Return None if sub is not in l.
>> l = range(5)*2
>> find(l, [2, 3])
2
>> find(l, [2, 3], first=0)
7
>> find(l, [2, 4])
None
"""
if not sub or not l or len(sub) > len(l):
return None
n = len(sub)
if not first:
l.reverse()
sub.reverse()
for i in range(0, len(l) - n + 1):
if all(l[i + j] == sub[j] for j in range(n)):
break
else:
i = None
if not first:
l.reverse()
sub.reverse()
if i is not None and not first:
i = len(l) - (i + n)
return i
co = []
args = Add.make_args(self)
self_c = self.is_commutative
x_c = x.is_commutative
if self_c and not x_c:
return S.Zero
if self_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs = a.args_cnc(cset=True, warn=False)[0]
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*resid))
if co == []:
return S.Zero
elif co:
return Add(*co)
elif x_c:
xargs = x.args_cnc(cset=True, warn=False)[0]
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append(Mul(*(list(resid) + nc)))
if co == []:
return S.Zero
elif co:
return Add(*co)
else: # both nc
xargs, nx = x.args_cnc(cset=True)
# find the parts that pass the commutative terms
for a in args:
margs, nc = a.args_cnc(cset=True)
if len(xargs) > len(margs):
continue
resid = margs.difference(xargs)
if len(resid) + len(xargs) == len(margs):
co.append((resid, nc))
# now check the non-comm parts
if not co:
return S.Zero
if all(n == co[0][1] for r, n in co):
ii = find(co[0][1], nx, right)
if ii is not None:
if not right:
return Mul(Add(*[Mul(*r) for r, c in co]), Mul(*co[0][1][:ii]))
else:
return Mul(*co[0][1][ii + len(nx):])
beg = reduce(incommon, (n[1] for n in co))
if beg:
ii = find(beg, nx, right)
if ii is not None:
if not right:
gcdc = co[0][0]
for i in range(1, len(co)):
gcdc = gcdc.intersection(co[i][0])
if not gcdc:
break
return Mul(*(list(gcdc) + beg[:ii]))
else:
m = ii + len(nx)
return Add(*[Mul(*(list(r) + n[m:])) for r, n in co])
end = list(reversed(
reduce(incommon, (list(reversed(n[1])) for n in co))))
if end:
ii = find(end, nx, right)
if ii is not None:
if not right:
return Add(*[Mul(*(list(r) + n[:-len(end) + ii])) for r, n in co])
else:
return Mul(*end[ii + len(nx):])
# look for single match
hit = None
for i, (r, n) in enumerate(co):
ii = find(n, nx, right)
if ii is not None:
if not hit:
hit = ii, r, n
else:
break
else:
if hit:
ii, r, n = hit
if not right:
return Mul(*(list(r) + n[:ii]))
else:
return Mul(*n[ii + len(nx):])
return S.Zero
def as_expr(self, *gens):
"""
Convert a polynomial to a SymPy expression.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> f = (x**2 + x*y).as_poly(x, y)
>>> f.as_expr()
x**2 + x*y
>>> sin(x).as_expr()
sin(x)
"""
return self
def as_coefficient(self, expr):
"""
Extracts symbolic coefficient at the given expression. In
other words, this functions separates 'self' into the product
of 'expr' and 'expr'-free coefficient. If such separation
is not possible it will return None.
Examples
========
>>> from sympy import E, pi, sin, I, Poly
>>> from sympy.abc import x
>>> E.as_coefficient(E)
1
>>> (2*E).as_coefficient(E)
2
>>> (2*sin(E)*E).as_coefficient(E)
Two terms have E in them so a sum is returned. (If one were
desiring the coefficient of the term exactly matching E then
the constant from the returned expression could be selected.
Or, for greater precision, a method of Poly can be used to
indicate the desired term from which the coefficient is
desired.)
>>> (2*E + x*E).as_coefficient(E)
x + 2
>>> _.args[0] # just want the exact match
2
>>> p = Poly(2*E + x*E); p
Poly(x*E + 2*E, x, E, domain='ZZ')
>>> p.coeff_monomial(E)
2
>>> p.nth(0,1)
2
Since the following cannot be written as a product containing
E as a factor, None is returned. (If the coefficient ``2*x`` is
desired then the ``coeff`` method should be used.)
>>> (2*E*x + x).as_coefficient(E)
>>> (2*E*x + x).coeff(E)
2*x
>>> (E*(x + 1) + x).as_coefficient(E)
>>> (2*pi*I).as_coefficient(pi*I)
2
>>> (2*I).as_coefficient(pi*I)
See Also
========
coeff: return sum of terms have a given factor
as_coeff_Add: separate the additive constant from an expression
as_coeff_Mul: separate the multiplicative constant from an expression
as_independent: separate x-dependent terms/factors from others
sympy.polys.polytools.coeff_monomial: efficiently find the single coefficient of a monomial in Poly
sympy.polys.polytools.nth: like coeff_monomial but powers of monomial terms are used
"""
r = self.extract_multiplicatively(expr)
if r and not r.has(expr):
return r
def as_independent(self, *deps, **hint):
"""
A mostly naive separation of a Mul or Add into arguments that are not
are dependent on deps. To obtain as complete a separation of variables
as possible, use a separation method first, e.g.:
* separatevars() to change Mul, Add and Pow (including exp) into Mul
* .expand(mul=True) to change Add or Mul into Add
* .expand(log=True) to change log expr into an Add
The only non-naive thing that is done here is to respect noncommutative
ordering of variables.
The returned tuple (i, d) has the following interpretation:
* i will has no variable that appears in deps
* d will be 1 or else have terms that contain variables that are in deps
* if self is an Add then self = i + d
* if self is a Mul then self = i*d
* if self is anything else, either tuple (self, S.One) or (S.One, self)
is returned.
To force the expression to be treated as an Add, use the hint as_Add=True
Examples
========
-- self is an Add
>>> from sympy import sin, cos, exp
>>> from sympy.abc import x, y, z
>>> (x + x*y).as_independent(x)
(0, x*y + x)
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> (2*x*sin(x) + y + x + z).as_independent(x)
(y + z, 2*x*sin(x) + x)
>>> (2*x*sin(x) + y + x + z).as_independent(x, y)
(z, 2*x*sin(x) + x + y)
-- self is a Mul
>>> (x*sin(x)*cos(y)).as_independent(x)
(cos(y), x*sin(x))
non-commutative terms cannot always be separated out when self is a Mul
>>> from sympy import symbols
>>> n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
>>> (n1 + n1*n2).as_independent(n2)
(n1, n1*n2)
>>> (n2*n1 + n1*n2).as_independent(n2)
(0, n1*n2 + n2*n1)
>>> (n1*n2*n3).as_independent(n1)
(1, n1*n2*n3)
>>> (n1*n2*n3).as_independent(n2)
(n1, n2*n3)
>>> ((x-n1)*(x-y)).as_independent(x)
(1, (x - y)*(x - n1))
-- self is anything else:
>>> (sin(x)).as_independent(x)
(1, sin(x))
>>> (sin(x)).as_independent(y)
(sin(x), 1)
>>> exp(x+y).as_independent(x)
(1, exp(x + y))
-- force self to be treated as an Add:
>>> (3*x).as_independent(x, as_Add=True)
(0, 3*x)
-- force self to be treated as a Mul:
>>> (3+x).as_independent(x, as_Add=False)
(1, x + 3)
>>> (-3+x).as_independent(x, as_Add=False)
(1, x - 3)
Note how the below differs from the above in making the
constant on the dep term positive.
>>> (y*(-3+x)).as_independent(x)
(y, x - 3)
-- use .as_independent() for true independence testing instead
of .has(). The former considers only symbols in the free
symbols while the latter considers all symbols
>>> from sympy import Integral
>>> I = Integral(x, (x, 1, 2))
>>> I.has(x)
True
>>> x in I.free_symbols
False
>>> I.as_independent(x) == (I, 1)
True
>>> (I + x).as_independent(x) == (I, x)
True
Note: when trying to get independent terms, a separation method
might need to be used first. In this case, it is important to keep
track of what you send to this routine so you know how to interpret
the returned values
>>> from sympy import separatevars, log
>>> separatevars(exp(x+y)).as_independent(x)
(exp(y), exp(x))
>>> (x + x*y).as_independent(y)
(x, x*y)
>>> separatevars(x + x*y).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).as_independent(y)
(x, y + 1)
>>> (x*(1 + y)).expand(mul=True).as_independent(y)
(x, x*y)
>>> a, b=symbols('a b',positive=True)
>>> (log(a*b).expand(log=True)).as_independent(b)
(log(a), log(b))
See also: .separatevars(), .expand(log=True),
.as_two_terms(), .as_coeff_add(), .as_coeff_mul()
"""
from sympy.utilities.iterables import sift
func = self.func
# sift out deps into symbolic and other and ignore
# all symbols but those that are in the free symbols
sym = set()
other = []
for d in deps:
if isinstance(d, C.Symbol): # Symbol.is_Symbol is True
sym.add(d)
else:
other.append(d)
def has(e):
"""return the standard has() if there are no literal symbols, else
check to see that symbol-deps are in the free symbols."""
has_other = e.has(*other)
if not sym:
return has_other
return has_other or e.has(*(e.free_symbols & sym))
if hint.get('as_Add', func is Add):
want = Add
else:
want = Mul
if (want is not func or
func is not Add and func is not Mul):
if has(self):
return (want.identity, self)
else:
return (self, want.identity)
else:
if func is Add:
args = list(self.args)
else:
args, nc = self.args_cnc()
d = sift(args, lambda x: has(x))
depend = d[True]
indep = d[False]
if func is Add: # all terms were treated as commutative
return (Add(*indep),
Add(*depend))
else: # handle noncommutative by stopping at first dependent term
for i, n in enumerate(nc):
if has(n):
depend.extend(nc[i:])
break
indep.append(n)
return Mul(*indep), Mul(*depend)
def as_real_imag(self, deep=True, **hints):
"""Performs complex expansion on 'self' and returns a tuple
containing collected both real and imaginary parts. This
method can't be confused with re() and im() functions,
which does not perform complex expansion at evaluation.
However it is possible to expand both re() and im()
functions and get exactly the same results as with
a single call to this function.
>>> from sympy import symbols, I
>>> x, y = symbols('x,y', real=True)
>>> (x + y*I).as_real_imag()
(x, y)
>>> from sympy.abc import z, w
>>> (z + w*I).as_real_imag()
(re(z) - im(w), re(w) + im(z))
"""
if hints.get('ignore') == self:
return None
else:
return (C.re(self), C.im(self))
def as_powers_dict(self):
"""Return self as a dictionary of factors with each factor being
treated as a power. The keys are the bases of the factors and the
values, the corresponding exponents. The resulting dictionary should
be used with caution if the expression is a Mul and contains non-
commutative factors since the order that they appeared will be lost in
the dictionary."""
d = defaultdict(int)
d.update(dict([self.as_base_exp()]))
return d
def as_coefficients_dict(self):
"""Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
Examples
========
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
"""
c, m = self.as_coeff_Mul()
if not c.is_Rational:
c = S.One
m = self
d = defaultdict(int)
d.update({m: c})
return d
def as_base_exp(self):
# a -> b ** e
return self, S.One
def as_coeff_mul(self, *deps, **kwargs):
"""Return the tuple (c, args) where self is written as a Mul, ``m``.
c should be a Rational multiplied by any terms of the Mul that are
independent of deps.
args should be a tuple of all other terms of m; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is a Mul or not but
you want to treat self as a Mul or if you want to process the
individual arguments of the tail of self as a Mul.
- if you know self is a Mul and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail;
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_mul()
(3, ())
>>> (3*x*y).as_coeff_mul()
(3, (x, y))
>>> (3*x*y).as_coeff_mul(x)
(3*y, (x,))
>>> (3*y).as_coeff_mul(x)
(3*y, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.One, (self,)
def as_coeff_add(self, *deps):
"""Return the tuple (c, args) where self is written as an Add, ``a``.
c should be a Rational added to any terms of the Add that are
independent of deps.
args should be a tuple of all other terms of ``a``; args is empty
if self is a Number or if self is independent of deps (when given).
This should be used when you don't know if self is an Add or not but
you want to treat self as an Add or if you want to process the
individual arguments of the tail of self as an Add.
- if you know self is an Add and want only the head, use self.args[0];
- if you don't want to process the arguments of the tail but need the
tail then use self.as_two_terms() which gives the head and tail.
- if you want to split self into an independent and dependent parts
use ``self.as_independent(*deps)``
>>> from sympy import S
>>> from sympy.abc import x, y
>>> (S(3)).as_coeff_add()
(3, ())
>>> (3 + x).as_coeff_add()
(3, (x,))
>>> (3 + x + y).as_coeff_add(x)
(y + 3, (x,))
>>> (3 + y).as_coeff_add(x)
(y + 3, ())
"""
if deps:
if not self.has(*deps):
return self, tuple()
return S.Zero, (self,)
def primitive(self):
"""Return the positive Rational that can be extracted non-recursively
from every term of self (i.e., self is treated like an Add). This is
like the as_coeff_Mul() method but primitive always extracts a positive
Rational (never a negative or a Float).
Examples
========
>>> from sympy.abc import x
>>> (3*(x + 1)**2).primitive()
(3, (x + 1)**2)
>>> a = (6*x + 2); a.primitive()
(2, 3*x + 1)
>>> b = (x/2 + 3); b.primitive()
(1/2, x + 6)
>>> (a*b).primitive() == (1, a*b)
True
"""
if not self:
return S.One, S.Zero
c, r = self.as_coeff_Mul(rational=True)
if c.is_negative:
c, r = -c, -r
return c, r
def as_content_primitive(self, radical=False):
"""This method should recursively remove a Rational from all arguments
and return that (content) and the new self (primitive). The content
should always be positive and ``Mul(*foo.as_content_primitive()) == foo``.
The primitive need no be in canonical form and should try to preserve
the underlying structure if possible (i.e. expand_mul should not be
applied to self).
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x, y, z
>>> eq = 2 + 2*x + 2*y*(3 + 3*y)
The as_content_primitive function is recursive and retains structure:
>>> eq.as_content_primitive()
(2, x + 3*y*(y + 1) + 1)
Integer powers will have Rationals extracted from the base:
>>> ((2 + 6*x)**2).as_content_primitive()
(4, (3*x + 1)**2)
>>> ((2 + 6*x)**(2*y)).as_content_primitive()
(1, (2*(3*x + 1))**(2*y))
Terms may end up joining once their as_content_primitives are added:
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(11, x*(y + 1))
>>> ((3*(x*(1 + y)) + 2*x*(3 + 3*y))).as_content_primitive()
(9, x*(y + 1))
>>> ((3*(z*(1 + y)) + 2.0*x*(3 + 3*y))).as_content_primitive()
(1, 6.0*x*(y + 1) + 3*z*(y + 1))
>>> ((5*(x*(1 + y)) + 2*x*(3 + 3*y))**2).as_content_primitive()
(121, x**2*(y + 1)**2)
>>> ((5*(x*(1 + y)) + 2.0*x*(3 + 3*y))**2).as_content_primitive()
(1, 121.0*x**2*(y + 1)**2)
Radical content can also be factored out of the primitive:
>>> (2*sqrt(2) + 4*sqrt(10)).as_content_primitive(radical=True)
(2, sqrt(2)*(1 + 2*sqrt(5)))
"""
return S.One, self
def as_numer_denom(self):
""" expression -> a/b -> a, b
This is just a stub that should be defined by
an object's class methods to get anything else.
See Also
========
normal: return a/b instead of a, b
"""
return self, S.One
def normal(self):
n, d = self.as_numer_denom()
if d is S.One:
return n
return n/d
def extract_multiplicatively(self, c):
"""Return None if it's not possible to make self in the form
c * something in a nice way, i.e. preserving the properties
of arguments of self.
>>> from sympy import symbols, Rational
>>> x, y = symbols('x,y', real=True)
>>> ((x*y)**3).extract_multiplicatively(x**2 * y)
x*y**2
>>> ((x*y)**3).extract_multiplicatively(x**4 * y)
>>> (2*x).extract_multiplicatively(2)
x
>>> (2*x).extract_multiplicatively(3)
>>> (Rational(1,2)*x).extract_multiplicatively(3)
x/6
"""
c = sympify(c)
if c is S.One:
return self
elif c == self:
return S.One
if c.is_Add:
cc, pc = c.primitive()
if cc is not S.One:
c = Mul(cc, pc, evaluate=False)
if c.is_Mul:
a, b = c.as_two_terms()
x = self.extract_multiplicatively(a)
if x is not None:
return x.extract_multiplicatively(b)
quotient = self / c
if self.is_Number:
if self is S.Infinity:
if c.is_positive:
return S.Infinity
elif self is S.NegativeInfinity:
if c.is_negative:
return S.Infinity
elif c.is_positive:
return S.NegativeInfinity
elif self is S.ComplexInfinity:
if not c.is_zero:
return S.ComplexInfinity
elif self is S.NaN:
return S.NaN
elif self.is_Integer:
if not quotient.is_Integer:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Rational:
if not quotient.is_Rational:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_Float:
if not quotient.is_Float:
return None
elif self.is_positive and quotient.is_negative:
return None
else:
return quotient
elif self.is_NumberSymbol or self.is_Symbol or self is S.ImaginaryUnit:
if quotient.is_Mul and len(quotient.args) == 2:
if quotient.args[0].is_Integer and quotient.args[0].is_positive and quotient.args[1] == self:
return quotient
elif quotient.is_Integer and c.is_Number:
return quotient
elif self.is_Add:
cs, ps = self.primitive()
if cs is not S.One:
return Mul(cs, ps, evaluate=False).extract_multiplicatively(c)
newargs = []
for arg in self.args:
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
newargs.append(newarg)
else:
return None
return Add(*newargs)
elif self.is_Mul:
args = list(self.args)
for i, arg in enumerate(args):
newarg = arg.extract_multiplicatively(c)
if newarg is not None:
args[i] = newarg
return Mul(*args)
elif self.is_Pow:
if c.is_Pow and c.base == self.base:
new_exp = self.exp.extract_additively(c.exp)
if new_exp is not None:
return self.base ** (new_exp)
elif c == self.base:
new_exp = self.exp.extract_additively(1)
if new_exp is not None:
return self.base ** (new_exp)
def extract_additively(self, c):
"""Return self - c if it's possible to subtract c from self and
make all matching coefficients move towards zero, else return None.
Examples
========
>>> from sympy.abc import x, y
>>> e = 2*x + 3
>>> e.extract_additively(x + 1)
x + 2
>>> e.extract_additively(3*x)
>>> e.extract_additively(4)
>>> (y*(x + 1)).extract_additively(x + 1)
>>> ((x + 1)*(x + 2*y + 1) + 3).extract_additively(x + 1)
(x + 1)*(x + 2*y) + 3
Sometimes auto-expansion will return a less simplified result
than desired; gcd_terms might be used in such cases:
>>> from sympy import gcd_terms
>>> (4*x*(y + 1) + y).extract_additively(x)
4*x*(y + 1) + x*(4*y + 3) - x*(4*y + 4) + y
>>> gcd_terms(_)
x*(4*y + 3) + y
See Also
========
extract_multiplicatively
coeff
as_coefficient
"""
c = sympify(c)
if c is S.Zero:
return self
elif c == self:
return S.Zero
elif self is S.Zero:
return None
if self.is_Number:
if not c.is_Number:
return None
co = self
diff = co - c
# XXX should we match types? i.e should 3 - .1 succeed?
if (co > 0 and diff > 0 and diff < co or
co < 0 and diff < 0 and diff > co):
return diff
return None
if c.is_Number:
co, t = self.as_coeff_Add()
xa = co.extract_additively(c)
if xa is None:
return None
return xa + t
# handle the args[0].is_Number case separately
# since we will have trouble looking for the coeff of
# a number.
if c.is_Add and c.args[0].is_Number:
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
h, t = c.as_coeff_Add()
sh, st = self.as_coeff_Add()
xa = sh.extract_additively(h)
if xa is None:
return None
xa2 = st.extract_additively(t)
if xa2 is None:
return None
return xa + xa2
# whole term as a term factor
co = self.coeff(c)
xa0 = (co.extract_additively(1) or 0)*c
if xa0:
diff = self - co*c
return (xa0 + (diff.extract_additively(c) or diff)) or None
# term-wise
coeffs = []
for a in Add.make_args(c):
ac, at = a.as_coeff_Mul()
co = self.coeff(at)
if not co:
return None
coc, cot = co.as_coeff_Add()
xa = coc.extract_additively(ac)
if xa is None:
return None
self -= co*at
coeffs.append((cot + xa)*at)
coeffs.append(self)
return Add(*coeffs)
def could_extract_minus_sign(self):
"""Canonical way to choose an element in the set {e, -e} where
e is any expression. If the canonical element is e, we have
e.could_extract_minus_sign() == True, else
e.could_extract_minus_sign() == False.
For any expression, the set ``{e.could_extract_minus_sign(),
(-e).could_extract_minus_sign()}`` must be ``{True, False}``.
>>> from sympy.abc import x, y
>>> (x-y).could_extract_minus_sign() != (y-x).could_extract_minus_sign()
True
"""
negative_self = -self
self_has_minus = (self.extract_multiplicatively(-1) is not None)
negative_self_has_minus = (
(negative_self).extract_multiplicatively(-1) is not None)
if self_has_minus != negative_self_has_minus:
return self_has_minus
else:
if self.is_Add:
# We choose the one with less arguments with minus signs
all_args = len(self.args)
negative_args = len([False for arg in self.args if arg.could_extract_minus_sign()])
positive_args = all_args - negative_args
if positive_args > negative_args:
return False
elif positive_args < negative_args:
return True
elif self.is_Mul:
# We choose the one with an odd number of minus signs
num, den = self.as_numer_denom()
args = Mul.make_args(num) + Mul.make_args(den)
arg_signs = [arg.could_extract_minus_sign() for arg in args]
negative_args = list(filter(None, arg_signs))
return len(negative_args) % 2 == 1
# As a last resort, we choose the one with greater value of .sort_key()
return bool(self.sort_key() < negative_self.sort_key())
def extract_branch_factor(self, allow_half=False):
"""
Try to write self as ``exp_polar(2*pi*I*n)*z`` in a nice way.
Return (z, n).
>>> from sympy import exp_polar, I, pi
>>> from sympy.abc import x, y
>>> exp_polar(I*pi).extract_branch_factor()
(exp_polar(I*pi), 0)
>>> exp_polar(2*I*pi).extract_branch_factor()
(1, 1)
>>> exp_polar(-pi*I).extract_branch_factor()
(exp_polar(I*pi), -1)
>>> exp_polar(3*pi*I + x).extract_branch_factor()
(exp_polar(x + I*pi), 1)
>>> (y*exp_polar(-5*pi*I)*exp_polar(3*pi*I + 2*pi*x)).extract_branch_factor()
(y*exp_polar(2*pi*x), -1)
>>> exp_polar(-I*pi/2).extract_branch_factor()
(exp_polar(-I*pi/2), 0)
If allow_half is True, also extract exp_polar(I*pi):
>>> exp_polar(I*pi).extract_branch_factor(allow_half=True)
(1, 1/2)
>>> exp_polar(2*I*pi).extract_branch_factor(allow_half=True)
(1, 1)
>>> exp_polar(3*I*pi).extract_branch_factor(allow_half=True)
(1, 3/2)
>>> exp_polar(-I*pi).extract_branch_factor(allow_half=True)
(1, -1/2)
"""
from sympy import exp_polar, pi, I, ceiling, Add
n = S(0)
res = S(1)
args = Mul.make_args(self)
exps = []
for arg in args:
if arg.func is exp_polar:
exps += [arg.exp]
else:
res *= arg
piimult = S(0)
extras = []
while exps:
exp = exps.pop()
if exp.is_Add:
exps += exp.args
continue
if exp.is_Mul:
coeff = exp.as_coefficient(pi*I)
if coeff is not None:
piimult += coeff
continue
extras += [exp]
if not piimult.free_symbols:
coeff = piimult
tail = ()
else:
coeff, tail = piimult.as_coeff_add(*piimult.free_symbols)
# round down to nearest multiple of 2
branchfact = ceiling(coeff/2 - S(1)/2)*2
n += branchfact/2
c = coeff - branchfact
if allow_half:
nc = c.extract_additively(1)
if nc is not None:
n += S(1)/2
c = nc
newexp = pi*I*Add(*((c, ) + tail)) + Add(*extras)
if newexp != 0:
res *= exp_polar(newexp)
return res, n
def _eval_is_polynomial(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_polynomial(self, *syms):
"""
Return True if self is a polynomial in syms and False otherwise.
This checks if self is an exact polynomial in syms. This function
returns False for expressions that are "polynomials" with symbolic
exponents. Thus, you should be able to apply polynomial algorithms to
expressions for which this returns True, and Poly(expr, \*syms) should
work if and only if expr.is_polynomial(\*syms) returns True. The
polynomial does not have to be in expanded form. If no symbols are
given, all free symbols in the expression will be used.
This is not part of the assumptions system. You cannot do
Symbol('z', polynomial=True).
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> ((x**2 + 1)**4).is_polynomial(x)
True
>>> ((x**2 + 1)**4).is_polynomial()
True
>>> (2**x + 1).is_polynomial(x)
False
>>> n = Symbol('n', nonnegative=True, integer=True)
>>> (x**n + 1).is_polynomial(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a polynomial to
become one.
>>> from sympy import sqrt, factor, cancel
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)
>>> a.is_polynomial(y)
False
>>> factor(a)
y + 1
>>> factor(a).is_polynomial(y)
True
>>> b = (y**2 + 2*y + 1)/(y + 1)
>>> b.is_polynomial(y)
False
>>> cancel(b)
y + 1
>>> cancel(b).is_polynomial(y)
True
See also .is_rational_function()
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant polynomial
return True
else:
return self._eval_is_polynomial(syms)
def _eval_is_rational_function(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_rational_function(self, *syms):
"""
Test whether function is a ratio of two polynomials in the given
symbols, syms. When syms is not given, all free symbols will be used.
The rational function does not have to be in expanded or in any kind of
canonical form.
This function returns False for expressions that are "rational
functions" with symbolic exponents. Thus, you should be able to call
.as_numer_denom() and apply polynomial algorithms to the result for
expressions for which this returns True.
This is not part of the assumptions system. You cannot do
Symbol('z', rational_function=True).
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.abc import x, y
>>> (x/y).is_rational_function()
True
>>> (x**2).is_rational_function()
True
>>> (x/sin(y)).is_rational_function(y)
False
>>> n = Symbol('n', integer=True)
>>> (x**n + 1).is_rational_function(x)
False
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be a rational function
to become one.
>>> from sympy import sqrt, factor
>>> y = Symbol('y', positive=True)
>>> a = sqrt(y**2 + 2*y + 1)/y
>>> a.is_rational_function(y)
False
>>> factor(a)
(y + 1)/y
>>> factor(a).is_rational_function(y)
True
See also is_algebraic_expr().
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant rational function
return True
else:
return self._eval_is_rational_function(syms)
def _eval_is_algebraic_expr(self, syms):
if self.free_symbols.intersection(syms) == set([]):
return True
return False
def is_algebraic_expr(self, *syms):
"""
This tests whether a given expression is algebraic or not, in the
given symbols, syms. When syms is not given, all free symbols
will be used. The rational function does not have to be in expanded
or in any kind of canonical form.
This function returns False for expressions that are "algebraic
expressions" with symbolic exponents. This is a simple extension to the
is_rational_function, including rational exponentiation.
Examples
========
>>> from sympy import Symbol, sqrt
>>> x = Symbol('x', real=True)
>>> sqrt(1 + x).is_rational_function()
False
>>> sqrt(1 + x).is_algebraic_expr()
True
This function does not attempt any nontrivial simplifications that may
result in an expression that does not appear to be an algebraic
expression to become one.
>>> from sympy import exp, factor
>>> a = sqrt(exp(x)**2 + 2*exp(x) + 1)/(exp(x) + 1)
>>> a.is_algebraic_expr(x)
False
>>> factor(a).is_algebraic_expr()
True
See Also
========
is_rational_function()
References
==========
- http://en.wikipedia.org/wiki/Algebraic_expression
"""
if syms:
syms = set(map(sympify, syms))
else:
syms = self.free_symbols
if syms.intersection(self.free_symbols) == set([]):
# constant algebraic expression
return True
else:
return self._eval_is_algebraic_expr(syms)
###################################################################################
##################### SERIES, LEADING TERM, LIMIT, ORDER METHODS ##################
###################################################################################
def series(self, x=None, x0=0, n=6, dir="+", logx=None):
"""
Series expansion of "self" around ``x = x0`` yielding either terms of
the series one by one (the lazy series given when n=None), else
all the terms at once when n != None.
Returns the series expansion of "self" around the point ``x = x0``
with respect to ``x`` up to ``O((x - x0)**n, x, x0)`` (default n is 6).
If ``x=None`` and ``self`` is univariate, the univariate symbol will
be supplied, otherwise an error will be raised.
>>> from sympy import cos, exp
>>> from sympy.abc import x, y
>>> cos(x).series()
1 - x**2/2 + x**4/24 + O(x**6)
>>> cos(x).series(n=4)
1 - x**2/2 + O(x**4)
>>> cos(x).series(x, x0=1, n=2)
cos(1) - (x - 1)*sin(1) + O((x - 1)**2, (x, 1))
>>> e = cos(x + exp(y))
>>> e.series(y, n=2)
cos(x + 1) - y*sin(x + 1) + O(y**2)
>>> e.series(x, n=2)
cos(exp(y)) - x*sin(exp(y)) + O(x**2)
If ``n=None`` then a generator of the series terms will be returned.
>>> term=cos(x).series(n=None)
>>> [next(term) for i in range(2)]
[1, -x**2/2]
For ``dir=+`` (default) the series is calculated from the right and
for ``dir=-`` the series from the left. For smooth functions this
flag will not alter the results.
>>> abs(x).series(dir="+")
x
>>> abs(x).series(dir="-")
-x
"""
from sympy import collect
if x is None:
syms = self.atoms(C.Symbol)
if not syms:
return self
elif len(syms) > 1:
raise ValueError('x must be given for multivariate functions.')
x = syms.pop()
if not self.has(x):
if n is None:
return (s for s in [self])
else:
return self
if len(dir) != 1 or dir not in '+-':
raise ValueError("Dir must be '+' or '-'")
if x0 in [S.Infinity, S.NegativeInfinity]:
dir = {S.Infinity: '+', S.NegativeInfinity: '-'}[x0]
s = self.subs(x, 1/x).series(x, n=n, dir=dir)
if n is None:
return (si.subs(x, 1/x) for si in s)
return s.subs(x, 1/x)
# use rep to shift origin to x0 and change sign (if dir is negative)
# and undo the process with rep2
if x0 or dir == '-':
if dir == '-':
rep = -x + x0
rep2 = -x
rep2b = x0
else:
rep = x + x0
rep2 = x
rep2b = -x0
s = self.subs(x, rep).series(x, x0=0, n=n, dir='+', logx=logx)
if n is None: # lseries...
return (si.subs(x, rep2 + rep2b) for si in s)
return s.subs(x, rep2 + rep2b)
# from here on it's x0=0 and dir='+' handling
if x.is_positive is x.is_negative is None or x.is_Symbol is not True:
# replace x with an x that has a positive assumption
xpos = C.Dummy('x', positive=True, finite=True)
rv = self.subs(x, xpos).series(xpos, x0, n, dir, logx=logx)
if n is None:
return (s.subs(xpos, x) for s in rv)
else:
return rv.subs(xpos, x)
if n is not None: # nseries handling
s1 = self._eval_nseries(x, n=n, logx=logx)
o = s1.getO() or S.Zero
if o:
# make sure the requested order is returned
ngot = o.getn()
if ngot > n:
# leave o in its current form (e.g. with x*log(x)) so
# it eats terms properly, then replace it below
if n != 0:
s1 += o.subs(x, x**C.Rational(n, ngot))
else:
s1 += C.Order(1, x)
elif ngot < n:
# increase the requested number of terms to get the desired
# number keep increasing (up to 9) until the received order
# is different than the original order and then predict how
# many additional terms are needed
for more in range(1, 9):
s1 = self._eval_nseries(x, n=n + more, logx=logx)
newn = s1.getn()
if newn != ngot:
ndo = n + (n - ngot)*more/(newn - ngot)
s1 = self._eval_nseries(x, n=ndo, logx=logx)
while s1.getn() < n:
s1 = self._eval_nseries(x, n=ndo, logx=logx)
ndo += 1
break
else:
raise ValueError('Could not calculate %s terms for %s'
% (str(n), self))
s1 += C.Order(x**n, x)
o = s1.getO()
s1 = s1.removeO()
else:
o = C.Order(x**n, x)
if (s1 + o).removeO() == s1:
o = S.Zero
try:
return collect(s1, x) + o
except NotImplementedError:
return s1 + o
else: # lseries handling
def yield_lseries(s):
"""Return terms of lseries one at a time."""
for si in s:
if not si.is_Add:
yield si
continue
# yield terms 1 at a time if possible
# by increasing order until all the
# terms have been returned
yielded = 0
o = C.Order(si, x)*x
ndid = 0
ndo = len(si.args)
while 1:
do = (si - yielded + o).removeO()
o *= x
if not do or do.is_Order:
continue
if do.is_Add:
ndid += len(do.args)
else:
ndid += 1
yield do
if ndid == ndo:
break
yielded += do
return yield_lseries(self.removeO()._eval_lseries(x, logx=logx))
def taylor_term(self, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
x = sympify(x)
_x = C.Dummy('x')
return self.subs(x, _x).diff(_x, n).subs(_x, x).subs(x, 0) * x**n / C.factorial(n)
def lseries(self, x=None, x0=0, dir='+', logx=None):
"""
Wrapper for series yielding an iterator of the terms of the series.
Note: an infinite series will yield an infinite iterator. The following,
for exaxmple, will never terminate. It will just keep printing terms
of the sin(x) series::
for term in sin(x).lseries(x):
print term
The advantage of lseries() over nseries() is that many times you are
just interested in the next term in the series (i.e. the first term for
example), but you don't know how many you should ask for in nseries()
using the "n" parameter.
See also nseries().
"""
return self.series(x, x0, n=None, dir=dir, logx=logx)
def _eval_lseries(self, x, logx=None):
# default implementation of lseries is using nseries(), and adaptively
# increasing the "n". As you can see, it is not very efficient, because
# we are calculating the series over and over again. Subclasses should
# override this method and implement much more efficient yielding of
# terms.
n = 0
series = self._eval_nseries(x, n=n, logx=logx)
if not series.is_Order:
if series.is_Add:
yield series.removeO()
else:
yield series
raise StopIteration
while series.is_Order:
n += 1
series = self._eval_nseries(x, n=n, logx=logx)
e = series.removeO()
yield e
while 1:
while 1:
n += 1
series = self._eval_nseries(x, n=n, logx=logx).removeO()
if e != series:
break
yield series - e
e = series
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None):
"""
Wrapper to _eval_nseries if assumptions allow, else to series.
If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is
called. This calculates "n" terms in the innermost expressions and
then builds up the final series just by "cross-multiplying" everything
out.
The optional ``logx`` parameter can be used to replace any log(x) in the
returned series with a symbolic value to avoid evaluating log(x) at 0. A
symbol to use in place of log(x) should be provided.
Advantage -- it's fast, because we don't have to determine how many
terms we need to calculate in advance.
Disadvantage -- you may end up with less terms than you may have
expected, but the O(x**n) term appended will always be correct and
so the result, though perhaps shorter, will also be correct.
If any of those assumptions is not met, this is treated like a
wrapper to series which will try harder to return the correct
number of terms.
See also lseries().
Examples
========
>>> from sympy import sin, log, Symbol
>>> from sympy.abc import x, y
>>> sin(x).nseries(x, 0, 6)
x - x**3/6 + x**5/120 + O(x**6)
>>> log(x+1).nseries(x, 0, 5)
x - x**2/2 + x**3/3 - x**4/4 + O(x**5)
Handling of the ``logx`` parameter --- in the following example the
expansion fails since ``sin`` does not have an asymptotic expansion
at -oo (the limit of log(x) as x approaches 0):
>>> e = sin(log(x))
>>> e.nseries(x, 0, 6)
Traceback (most recent call last):
...
PoleError: ...
...
>>> logx = Symbol('logx')
>>> e.nseries(x, 0, 6, logx=logx)
sin(logx)
In the following example, the expansion works but gives only an Order term
unless the ``logx`` parameter is used:
>>> e = x**y
>>> e.nseries(x, 0, 2)
O(log(x)**2)
>>> e.nseries(x, 0, 2, logx=logx)
exp(logx*y)
"""
if x and not x in self.free_symbols:
return self
if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):
return self.series(x, x0, n, dir)
else:
return self._eval_nseries(x, n=n, logx=logx)
def _eval_nseries(self, x, n, logx):
"""
Return terms of series for self up to O(x**n) at x=0
from the positive direction.
This is a method that should be overridden in subclasses. Users should
never call this method directly (use .nseries() instead), so you don't
have to write docstrings for _eval_nseries().
"""
from sympy.utilities.misc import filldedent
raise NotImplementedError(filldedent("""
The _eval_nseries method should be added to
%s to give terms up to O(x**n) at x=0
from the positive direction so it is available when
nseries calls it.""" % self.func)
)
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return limit(self, x, xlim, dir)
def compute_leading_term(self, x, logx=None):
"""
as_leading_term is only allowed for results of .series()
This is a wrapper to compute a series first.
"""
from sympy.series.gruntz import calculate_series
if self.removeO() == 0:
return self
if logx is None:
d = C.Dummy('logx')
s = calculate_series(self, x, d).subs(d, C.log(x))
else:
s = calculate_series(self, x, logx)
return s.as_leading_term(x)
@cacheit
def as_leading_term(self, *symbols):
"""
Returns the leading (nonzero) term of the series expansion of self.
The _eval_as_leading_term routines are used to do this, and they must
always return a non-zero value.
Examples
========
>>> from sympy.abc import x
>>> (1 + x + x**2).as_leading_term(x)
1
>>> (1/x**2 + x + x**2).as_leading_term(x)
x**(-2)
"""
from sympy import powsimp
if len(symbols) > 1:
c = self
for x in symbols:
c = c.as_leading_term(x)
return c
elif not symbols:
return self
x = sympify(symbols[0])
if not x.is_Symbol:
raise ValueError('expecting a Symbol but got %s' % x)
if x not in self.free_symbols:
return self
obj = self._eval_as_leading_term(x)
if obj is not None:
return powsimp(obj, deep=True, combine='exp')
raise NotImplementedError('as_leading_term(%s, %s)' % (self, x))
def _eval_as_leading_term(self, x):
return self
def as_coeff_exponent(self, x):
""" ``c*x**e -> c,e`` where x can be any symbolic expression.
"""
from sympy import collect
s = collect(self, x)
c, p = s.as_coeff_mul(x)
if len(p) == 1:
b, e = p[0].as_base_exp()
if b == x:
return c, e
return s, S.Zero
def leadterm(self, x):
"""
Returns the leading term a*x**b as a tuple (a, b).
Examples
========
>>> from sympy.abc import x
>>> (1+x+x**2).leadterm(x)
(1, 0)
>>> (1/x**2+x+x**2).leadterm(x)
(1, -2)
"""
l = self.as_leading_term(x)
d = C.Dummy('logx')
if l.has(C.log(x)):
l = l.subs(C.log(x), d)
c, e = l.as_coeff_exponent(x)
if x in c.free_symbols:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
cannot compute leadterm(%s, %s). The coefficient
should have been free of x but got %s""" % (self, x, c)))
c = c.subs(d, C.log(x))
return c, e
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
return S.One, self
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
return S.Zero, self
###################################################################################
##################### DERIVATIVE, INTEGRAL, FUNCTIONAL METHODS ####################
###################################################################################
def diff(self, *symbols, **assumptions):
new_symbols = list(map(sympify, symbols)) # e.g. x, 2, y, z
assumptions.setdefault("evaluate", True)
return Derivative(self, *new_symbols, **assumptions)
###########################################################################
###################### EXPRESSION EXPANSION METHODS #######################
###########################################################################
# Relevant subclasses should override _eval_expand_hint() methods. See
# the docstring of expand() for more info.
def _eval_expand_complex(self, **hints):
real, imag = self.as_real_imag(**hints)
return real + S.ImaginaryUnit*imag
@staticmethod
def _expand_hint(expr, hint, deep=True, **hints):
"""
Helper for ``expand()``. Recursively calls ``expr._eval_expand_hint()``.
Returns ``(expr, hit)``, where expr is the (possibly) expanded
``expr`` and ``hit`` is ``True`` if ``expr`` was truly expanded and
``False`` otherwise.
"""
hit = False
# XXX: Hack to support non-Basic args
# |
# V
if deep and getattr(expr, 'args', ()) and not expr.is_Atom:
sargs = []
for arg in expr.args:
arg, arghit = Expr._expand_hint(arg, hint, **hints)
hit |= arghit
sargs.append(arg)
if hit:
expr = expr.func(*sargs)
if hasattr(expr, hint):
newexpr = getattr(expr, hint)(**hints)
if newexpr != expr:
return (newexpr, True)
return (expr, hit)
@cacheit
def expand(self, deep=True, modulus=None, power_base=True, power_exp=True,
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using hints.
See the docstring of the expand() function in sympy.core.function for
more information.
"""
from sympy.simplify.simplify import fraction
hints.update(power_base=power_base, power_exp=power_exp, mul=mul,
log=log, multinomial=multinomial, basic=basic)
expr = self
if hints.pop('frac', False):
n, d = [a.expand(deep=deep, modulus=modulus, **hints)
for a in fraction(self)]
return n/d
elif hints.pop('denom', False):
n, d = fraction(self)
return n/d.expand(deep=deep, modulus=modulus, **hints)
elif hints.pop('numer', False):
n, d = fraction(self)
return n.expand(deep=deep, modulus=modulus, **hints)/d
# Although the hints are sorted here, an earlier hint may get applied
# at a given node in the expression tree before another because of how
# the hints are applied. e.g. expand(log(x*(y + z))) -> log(x*y +
# x*z) because while applying log at the top level, log and mul are
# applied at the deeper level in the tree so that when the log at the
# upper level gets applied, the mul has already been applied at the
# lower level.
# Additionally, because hints are only applied once, the expression
# may not be expanded all the way. For example, if mul is applied
# before multinomial, x*(x + 1)**2 won't be expanded all the way. For
# now, we just use a special case to make multinomial run before mul,
# so that at least polynomials will be expanded all the way. In the
# future, smarter heuristics should be applied.
# TODO: Smarter heuristics
def _expand_hint_key(hint):
"""Make multinomial come before mul"""
if hint == 'mul':
return 'mulz'
return hint
for hint in sorted(hints.keys(), key=_expand_hint_key):
use_hint = hints[hint]
if use_hint:
hint = '_eval_expand_' + hint
expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints)
while True:
was = expr
if hints.get('multinomial', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_multinomial', deep=deep, **hints)
if hints.get('mul', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_mul', deep=deep, **hints)
if hints.get('log', False):
expr, _ = Expr._expand_hint(
expr, '_eval_expand_log', deep=deep, **hints)
if expr == was:
break
if modulus is not None:
modulus = sympify(modulus)
if not modulus.is_Integer or modulus <= 0:
raise ValueError(
"modulus must be a positive integer, got %s" % modulus)
terms = []
for term in Add.make_args(expr):
coeff, tail = term.as_coeff_Mul(rational=True)
coeff %= modulus
if coeff:
terms.append(coeff*tail)
expr = Add(*terms)
return expr
###########################################################################
################### GLOBAL ACTION VERB WRAPPER METHODS ####################
###########################################################################
def integrate(self, *args, **kwargs):
"""See the integrate function in sympy.integrals"""
from sympy.integrals import integrate
return integrate(self, *args, **kwargs)
def simplify(self, ratio=1.7, measure=None):
"""See the simplify function in sympy.simplify"""
from sympy.simplify import simplify
from sympy.core.function import count_ops
measure = measure or count_ops
return simplify(self, ratio, measure)
def nsimplify(self, constants=[], tolerance=None, full=False):
"""See the nsimplify function in sympy.simplify"""
from sympy.simplify import nsimplify
return nsimplify(self, constants, tolerance, full)
def separate(self, deep=False, force=False):
"""See the separate function in sympy.simplify"""
from sympy.core.function import expand_power_base
return expand_power_base(self, deep=deep, force=force)
def collect(self, syms, func=None, evaluate=True, exact=False, distribute_order_term=True):
"""See the collect function in sympy.simplify"""
from sympy.simplify import collect
return collect(self, syms, func, evaluate, exact, distribute_order_term)
def together(self, *args, **kwargs):
"""See the together function in sympy.polys"""
from sympy.polys import together
return together(self, *args, **kwargs)
def apart(self, x=None, **args):
"""See the apart function in sympy.polys"""
from sympy.polys import apart
return apart(self, x, **args)
def ratsimp(self):
"""See the ratsimp function in sympy.simplify"""
from sympy.simplify import ratsimp
return ratsimp(self)
def trigsimp(self, **args):
"""See the trigsimp function in sympy.simplify"""
from sympy.simplify import trigsimp
return trigsimp(self, **args)
def radsimp(self):
"""See the radsimp function in sympy.simplify"""
from sympy.simplify import radsimp
return radsimp(self)
def powsimp(self, deep=False, combine='all'):
"""See the powsimp function in sympy.simplify"""
from sympy.simplify import powsimp
return powsimp(self, deep, combine)
def combsimp(self):
"""See the combsimp function in sympy.simplify"""
from sympy.simplify import combsimp
return combsimp(self)
def factor(self, *gens, **args):
"""See the factor() function in sympy.polys.polytools"""
from sympy.polys import factor
return factor(self, *gens, **args)
def refine(self, assumption=True):
"""See the refine function in sympy.assumptions"""
from sympy.assumptions import refine
return refine(self, assumption)
def cancel(self, *gens, **args):
"""See the cancel function in sympy.polys"""
from sympy.polys import cancel
return cancel(self, *gens, **args)
def invert(self, g):
"""See the invert function in sympy.polys"""
from sympy.polys import invert
return invert(self, g)
def round(self, p=0):
"""Return x rounded to the given decimal place.
If a complex number would results, apply round to the real
and imaginary components of the number.
Examples
========
>>> from sympy import pi, E, I, S, Add, Mul, Number
>>> S(10.5).round()
11.
>>> pi.round()
3.
>>> pi.round(2)
3.14
>>> (2*pi + E*I).round()
6. + 3.*I
The round method has a chopping effect:
>>> (2*pi + I/10).round()
6.
>>> (pi/10 + 2*I).round()
2.*I
>>> (pi/10 + E*I).round(2)
0.31 + 2.72*I
Notes
=====
Do not confuse the Python builtin function, round, with the
SymPy method of the same name. The former always returns a float
(or raises an error if applied to a complex value) while the
latter returns either a Number or a complex number:
>>> isinstance(round(S(123), -2), Number)
False
>>> isinstance(S(123).round(-2), Number)
True
>>> isinstance((3*I).round(), Mul)
True
>>> isinstance((1 + 3*I).round(), Add)
True
"""
x = self
if not x.is_number:
raise TypeError('%s is not a number' % type(x))
if x in (S.NaN, S.Infinity, S.NegativeInfinity, S.ComplexInfinity):
return x
if not x.is_real:
i, r = x.as_real_imag()
return i.round(p) + S.ImaginaryUnit*r.round(p)
if not x:
return x
p = int(p)
precs = [f._prec for f in x.atoms(C.Float)]
dps = prec_to_dps(max(precs)) if precs else None
mag_first_dig = _mag(x)
allow = digits_needed = mag_first_dig + p
if dps is not None and allow > dps:
allow = dps
mag = Pow(10, p) # magnitude needed to bring digit p to units place
xwas = x
x += 1/(2*mag) # add the half for rounding
i10 = 10*mag*x.n((dps if dps is not None else digits_needed) + 1)
if i10.is_negative:
x = xwas - 1/(2*mag) # should have gone the other way
i10 = 10*mag*x.n((dps if dps is not None else digits_needed) + 1)
rv = -(Integer(-i10)//10)
else:
rv = Integer(i10)//10
q = 1
if p > 0:
q = mag
elif p < 0:
rv /= mag
rv = Rational(rv, q)
if rv.is_Integer:
# use str or else it won't be a float
return C.Float(str(rv), digits_needed)
else:
if not allow and rv > self:
allow += 1
return C.Float(rv, allow)
class AtomicExpr(Atom, Expr):
"""
A parent class for object which are both atoms and Exprs.
For example: Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_number = False
is_Atom = True
__slots__ = []
def _eval_derivative(self, s):
if self == s:
return S.One
return S.Zero
def _eval_is_polynomial(self, syms):
return True
def _eval_is_rational_function(self, syms):
return True
def _eval_is_algebraic_expr(self, syms):
return True
def _eval_nseries(self, x, n, logx):
return self
def _mag(x):
"""Return integer ``i`` such that .1 <= x/10**i < 1
Examples
========
>>> from sympy.core.expr import _mag
>>> from sympy import Float
>>> _mag(Float(.1))
0
>>> _mag(Float(.01))
-1
>>> _mag(Float(1234))
4
"""
from math import log10, ceil, log
xpos = abs(x.n())
if not xpos:
return S.Zero
try:
mag_first_dig = int(ceil(log10(xpos)))
except (ValueError, OverflowError):
mag_first_dig = int(ceil(C.Float(mpf_log(xpos._mpf_, 53))/log(10)))
# check that we aren't off by 1
if (xpos/10**mag_first_dig) >= 1:
assert 1 <= (xpos/10**mag_first_dig) < 10
mag_first_dig += 1
return mag_first_dig
from .mul import Mul
from .add import Add
from .power import Pow
from .function import Derivative, Function
from .mod import Mod
from .exprtools import factor_terms
from .numbers import Integer, Rational
| 34.636163
| 109
| 0.524881
|
287301ccc71e2a8280fe9e14b1ccd360ce80414f
| 2,254
|
py
|
Python
|
rasa_core/visualize.py
|
DavidSted/rasa_core
|
2b072e564373ad680600947521805911f44f3732
|
[
"Apache-2.0"
] | 1
|
2018-12-05T22:30:43.000Z
|
2018-12-05T22:30:43.000Z
|
rasa_core/visualize.py
|
DavidSted/rasa_core
|
2b072e564373ad680600947521805911f44f3732
|
[
"Apache-2.0"
] | 6
|
2020-01-28T22:38:59.000Z
|
2022-02-10T00:11:15.000Z
|
rasa_core/visualize.py
|
DavidSted/rasa_core
|
2b072e564373ad680600947521805911f44f3732
|
[
"Apache-2.0"
] | 6
|
2018-09-19T20:53:13.000Z
|
2018-10-24T14:37:12.000Z
|
import argparse
import logging
import os
from rasa_core import utils, config, cli
from rasa_core.agent import Agent
logger = logging.getLogger(__name__)
def create_argument_parser():
"""Parse all the command line arguments for the visualisation script."""
parser = argparse.ArgumentParser(
description='Visualize the stories in a dialogue training file')
parser.add_argument(
'-o', '--output',
required=True,
type=str,
help="filename of the output path, e.g. 'graph.html")
parser.add_argument(
'-m', '--max_history',
default=2,
type=int,
help="max history to consider when merging "
"paths in the output graph")
parser.add_argument(
'-nlu', '--nlu_data',
default=None,
type=str,
help="path of the Rasa NLU training data, "
"used to insert example messages into the graph")
utils.add_logging_option_arguments(parser)
cli.arguments.add_config_arg(parser, nargs=1)
cli.arguments.add_domain_arg(parser)
cli.arguments.add_model_and_story_group(parser,
allow_pretrained_model=False)
return parser
if __name__ == '__main__':
arg_parser = create_argument_parser()
cmdline_arguments = arg_parser.parse_args()
utils.configure_colored_logging(cmdline_arguments.loglevel)
policies = config.load(cmdline_arguments.config[0])
agent = Agent(cmdline_arguments.domain, policies=policies)
# this is optional, only needed if the `/greet` type of
# messages in the stories should be replaced with actual
# messages (e.g. `hello`)
if cmdline_arguments.nlu_data is not None:
from rasa_nlu.training_data import load_data
nlu_data = load_data(cmdline_arguments.nlu_data)
else:
nlu_data = None
stories = cli.stories_from_cli_args(cmdline_arguments)
logger.info("Starting to visualize stories...")
agent.visualize(stories, cmdline_arguments.output,
cmdline_arguments.max_history,
nlu_training_data=nlu_data)
logger.info("Finished graph creation. Saved into file://{}".format(
os.path.abspath(cmdline_arguments.output)))
| 30.876712
| 76
| 0.672582
|
354a5b06363de9e8dcf44e2c8f7978c32b121579
| 17,765
|
py
|
Python
|
Examples/FanFicFare-master/fanficfare/adapters/base_efiction_adapter.py
|
TomNorrie/ePubify
|
89c89bd22cafdea787f3131ca9cdc8336209ed6c
|
[
"MIT"
] | null | null | null |
Examples/FanFicFare-master/fanficfare/adapters/base_efiction_adapter.py
|
TomNorrie/ePubify
|
89c89bd22cafdea787f3131ca9cdc8336209ed6c
|
[
"MIT"
] | null | null | null |
Examples/FanFicFare-master/fanficfare/adapters/base_efiction_adapter.py
|
TomNorrie/ePubify
|
89c89bd22cafdea787f3131ca9cdc8336209ed6c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 Fanficdownloader team, 2017 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
# import time
# import urllib
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
import bs4 as bs
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
"""
This is a generic adapter for eFiction based archives (see
http://fanlore.org/wiki/List_of_eFiction_Archives for a list).
Most of them share common traits:
* No HTTPS
* 'www.' is optional
* Default story template is 'viewstory.php' with arguments
* 'sid' the storyId
* 'chapter' for chapters (will be thrown away anyway by
stripURLParameters in base_adapter
Use Printable version which is easier to parse and has everything in one
page and cache between extractChapterUrlsAndMetadata and getChapterText
"""
# PHP constants
_RUSERSONLY = 'Registered Users Only'
_NOSUCHACCOUNT = "There is no such account on our website"
_WRONGPASSWORD = "That password doesn't match the one in our database"
_USERACCOUNT = 'Member Account'
# Regular expressions
_REGEX_WARNING_PARAM = re.compile("warning=(?P<warningId>\d+)")
_REGEX_CHAPTER_B = re.compile("^(?P<chapterId>\d+)\.")
_REGEX_CHAPTER_PARAM = re.compile("chapter=(?P<chapterId>\d+)$")
_REGEX_CHAPTER_FRAGMENT = re.compile("^#(?P<chapterId>\d+)$")
_REGEX_DOESNT_START_WITH_HTTP = re.compile("^(?!http)")
class BaseEfictionAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.story.setMetadata('siteabbrev',self.getSiteAbbrev())
self.set_decode(self.getEncoding())
storyId = re.compile(self.getSiteURLPattern()).match(self.url).group('storyId')
self.story.setMetadata('storyId', storyId)
self._setURL(self.getViewStoryUrl(storyId))
self.triedLoggingIn = False
self.triedAcceptWarnings = False
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
@classmethod
def getConfigSections(cls):
"Only needs to be overriden if has additional ini sections."
return ['base_efiction',cls.getConfigSection()]
@classmethod
def getAcceptDomains(cls):
return [cls.getSiteDomain(),'www.' + cls.getSiteDomain()]
@classmethod
def getSiteExampleURLs(cls):
return cls.getViewStoryUrl('1234') + ' ' + cls.getViewStoryUrl('1234') + '&chapter=2'
@classmethod
def getSiteURLPattern(self):
return r"https?://(www\.)?%s%s/%s\?sid=(?P<storyId>\d+)" % (self.getSiteDomain(), self.getPathToArchive(), self.getViewStoryPhpName())
@classmethod
def getSiteURLFragment(self):
return self.getSiteDomain()+self.getPathToArchive()
@classmethod
def getEncoding(cls):
"""
Return an array of character encodings to try to decode the HTML with
"""
return ["Windows-1252", "utf8"]
@classmethod
def getPathToArchive(cls):
"""
Get the path segment of the archive, default '/'.
In many cases, it's '/archive' or '/fanfiction'
"""
return ""
@classmethod
def getViewStoryPhpName(cls):
"""
Get the name of the story PHP script, by default 'viewstory.php'
"""
return "viewstory.php"
@classmethod
def getViewUserPhpName(cls):
"""
Get the name of the user PHP script, by default 'viewuser.php'
"""
return "viewuser.php"
@classmethod
def getUserPhpName(cls):
"""
Get the name of the user PHP script, by default 'viewuser.php'
"""
return "user.php"
@classmethod
def getDateFormat(self):
"""
Describe the date format of this site in terms of strftime
See http://docs.python.org/library/datetime.html#strftime-strptime-behavior
"""
return "%d %b %Y"
@classmethod
def getUrlForPhp(self, php):
return "http://%s%s/%s" % (self.getSiteDomain(), self.getPathToArchive(), php)
@classmethod
def getViewStoryUrl(self, storyId):
"""
Get the URL to a user page on this site.
"""
return "%s?sid=%s" % (self.getUrlForPhp(self.getViewStoryPhpName()), storyId)
@classmethod
def getViewUserUrl(self, userId):
"""
Get the URL to a user page on this site.
"""
return "%s?uid=%s" % (self.getUrlForPhp(self.getViewUserPhpName()), userId)
@classmethod
def getLoginUrl(self):
"""
Get the URL to the login page on this site.
"""
return "%s?action=login" % self.getUrlForPhp(self.getUserPhpName())
@classmethod
def getMessageRegisteredUsersOnly(self):
"""
Constant _RUSERSONLY defined in languages/en.php
"""
return _RUSERSONLY
@classmethod
def getMessageThereIsNoSuchAccount(self):
"""
Constant _NOSUCHACCOUNT defined in languages/en.php
"""
return _NOSUCHACCOUNT
@classmethod
def getMessageWrongPassword(self):
"""
Constant _WRONGPASSWORD defined in languages/en.php
"""
return _WRONGPASSWORD
@classmethod
def getMessageMemberAccount(self):
"""
Constant _USERACCOUNT defined in languages/en.php
"""
return _USERACCOUNT
## Login seems to be reasonably standard across eFiction sites.
@classmethod
def needToLoginCheck(self, html):
"""
Return whether the HTML contains either of _RUSERSONLY, _NOSUCHACCOUNT or _WRONGPASSWORD
"""
return getMessageRegisteredUsersOnly() in html \
or getMessageThereIsNoSuchAccount in html \
or getMessageWrongPassword in html
def _fetch_to_soup(self, url):
"""
Fetch a HTML document, fix it and parse it to BeautifulSoup.
Replaces old characters, broken meta-tags, non-self-closing hr/br.
Makes image links absolute so they can be downloaded
"""
try:
html = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
# Some site use old, old-school Comments <!- comment -> (single dash)
html = re.sub("<!-.+?->", "", html)
# There is a problem with meta tags on some sites where spaces aren't
# properly encoded
html = re.sub("<meta[^<>]+>(.*</meta>)?", "", html)
# fix non-closing hr/br
html = html.replace("<hr>", "<hr/>")
html = html.replace("<br>", "<br/>")
soup = self.make_soup(html)
## fix all local image 'src' to absolute
for img in soup.findAll("img", {"src": _REGEX_DOESNT_START_WITH_HTTP}):
# TODO handle '../../' and so on
if img['src'].startswith('/'):
img['src'] = img['src'][1:]
img['src'] = "http://%s%s/%s" % (self.getSiteDomain(), self.getPathToArchive(), img['src'])
return soup
def performLogin(self, url):
params = {}
if self.password:
params['penname'] = self.username
params['password'] = self.password
else:
params['penname'] = self.getConfig("username")
params['password'] = self.getConfig("password")
params['cookiecheck'] = '1'
params['submit'] = 'Submit'
logger.debug("Will now login to URL (%s) as (%s)" % (self.getLoginUrl(), params['penname']))
d = self._fetchUrl(self.getLoginUrl(), params)
if self.getMessageMemberAccount() not in d : #Member Account
logger.info("Failed to login to URL <%s> as '%s'" % (self.getLoginUrl(), params['penname']))
raise exceptions.FailedToLogin(url, params['penname'])
return False
else:
return True
def handleMetadataPairHTML(self, key, valueHTML):
"""
Handles a key-value pair of story metadata.
Returns straight away if the value is 'None' (that's a string)
Allows for handling of HTML values before calling
handleMetadataPair() to handle string values.
"""
if valueHTML == 'None':
return
elif key == 'Summary':
## will be de-HTML'd inside setDescription if keep_summary_html:false
self.setDescription(self.url, valueHTML)
else:
## strip trailing line breaks
valueStr = re.sub("<br/>", "", valueHTML)
valueStr = stripHTML(valueStr)
self.handleMetadataPair(key,valueStr)
def handleMetadataPair(self, key, value):
"""
Handles a key-value pair of story metadata.
Returns straight away if the value is 'None' (that's a string)
Can be overridden by subclasses::
def handleMetadataPair(self, key, value):
if key == 'MyCustomKey':
self.story.setMetadata('somekye', value)
else:
super(NameOfMyAdapter, self).handleMetadata(key, value)
"""
if value == 'None':
return
elif 'Genre' in key:
for val in re.split("\s*,\s*", value):
self.story.addToList('genre', val)
elif 'Warning' in key:
for val in re.split("\s*,\s*", value):
self.story.addToList('warnings', val)
elif 'Characters' in key:
for val in re.split("\s*,\s*", value):
self.story.addToList('characters', val)
elif 'Categories' in key:
for val in re.split("\s*,\s*", value):
self.story.addToList('category', val)
elif 'Challenges' in key:
for val in re.split("\s*,\s*", value):
# TODO this should be an official field I guess
self.story.addToList('challenge', val)
elif key == 'Chapters':
self.story.setMetadata('numChapters', int(value))
elif key == 'Rating' or key == 'Rated':
self.story.setMetadata('rating', value)
elif key == 'Word count':
self.story.setMetadata('numWords', value)
elif key == 'Completed':
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
elif key == 'Read':
# TODO this should be an official field I guess
self.story.setMetadata('readings', value)
elif key == 'Published':
self.story.setMetadata('datePublished', makeDate(value, self.getDateFormat()))
elif key == 'Updated':
self.story.setMetadata('dateUpdated', makeDate(value, self.getDateFormat()))
elif key == 'Pairing':
for val in re.split("\s*,\s*", value):
self.story.addToList('ships', val)
elif key == 'Series':
## TODO is not a link in the printable view, so no seriesURL possible
self.story.setMetadata('series', value)
else:
# Any other metadata found, convert label to lower case
# w/o spaces and use as key. Still needs to be in
# extra_valid_entries to be used.
autokey = key.replace(' ','').lower()
for val in re.split("\s*,\s*", value):
self.story.addToList(autokey, val)
logger.debug("Auto metadata: entry:%s %s_label:%s value:%s" % (autokey, autokey, key, value))
def extractChapterUrlsAndMetadata(self):
printUrl = self.url + '&action=printable&textsize=0&chapter='
if self.getConfig('bulk_load'):
printUrl += 'all'
else:
printUrl += '1'
soup = self._fetch_to_soup(printUrl)
## Handle warnings and login checks
errorDiv = soup.find("div", "errortext")
if errorDiv is None:
errorDiv = soup.find("div", "errormsg") # sometimes different class.
while errorDiv is not None:
if self.getMessageRegisteredUsersOnly() in errorDiv.prettify():
if not self.triedLoggingIn:
self.performLogin(self.url)
soup = self._fetch_to_soup(printUrl)
errorDiv = soup.find("div", "errortext")
self.triedLoggingIn = True
else:
raise exceptions.FailedToLogin(self.url, unicode(errorDiv))
elif "This story has not been validated" in stripHTML(errorDiv):
raise exceptions.AccessDenied(self.getSiteDomain() +" says: "+stripHTML(errorDiv))
else:
warningLink = errorDiv.find("a")
if warningLink is not None and ( \
'ageconsent' in warningLink['href'] \
or 'warning' in warningLink['href']):
if not self.triedAcceptWarnings:
if not (self.is_adult or self.getConfig("is_adult")):
raise exceptions.AdultCheckRequired(self.url)
# XXX Using this method, we're independent of # getHighestWarningLevel
printUrl += "&ageconsent=ok&warning=%s" % (_REGEX_WARNING_PARAM.search(warningLink['href']).group(1))
# printUrl += "&ageconsent=ok&warning=%s" % self.getHighestWarningLevel()
soup = self._fetch_to_soup(printUrl)
errorDiv = soup.find("div", "errortext")
self.triedAcceptWarnings = True
else:
raise exceptions.FailedToDownload("Error with URL: %s (%s)" % (self.url,stripHTML(errorDiv)))
else:
raise exceptions.FailedToDownload("Error with URL: %s (%s)" % (self.url,stripHTML(errorDiv)))
# title and author
pagetitleDiv = soup.find("div", {"id": "pagetitle"})
if pagetitleDiv.find('a') is None:
raise exceptions.FailedToDownload("Couldn't find title and author")
self.story.setMetadata('title', pagetitleDiv.find("a").string)
authorLink = pagetitleDiv.findAll("a")[1]
self.story.setMetadata('author', authorLink.string)
self.story.setMetadata('authorId', re.search("\d+", authorLink['href']).group(0))
self.story.setMetadata('authorUrl', self.getViewUserUrl(self.story.getMetadata('authorId')))
## Parse the infobox
labelSpans = soup.find("div", "infobox").find("div", "content").findAll("span", "label")
for labelSpan in labelSpans:
valueStr = ""
nextEl = labelSpan.nextSibling
while nextEl is not None and not (\
type(nextEl) is bs.Tag \
and nextEl.name == "span" \
and 'label' in nextEl.get('class',[]) \
):
## must string copy nextEl or nextEl will change trees
if (type(nextEl) is bs.Tag):
valueStr += nextEl.prettify()
else:
valueStr += unicode(nextEl)
nextEl = nextEl.nextSibling
key = labelSpan.string.strip()
## strip trailing colons
key = re.sub("\s*:\s*$", "", key)
## strip whitespace
key = key.strip()
self.handleMetadataPairHTML(key, valueStr)
## Retrieving the story notes
sn = soup.find('div', {'class':'noteinfo'})
if sn:
self.story.setMetadata('storynotes', stripHTML(sn))
## Chapter URLs
# If we didn't bulk-load the whole chapter we now need to load
# the non-printable HTML version of the landing page (i.e. the story
# URL to get the Chapter titles
if not self.getConfig('bulk_load'):
soup = self._fetch_to_soup(self.url + '&index=1')
chapterLinks = []
for b in soup.find_all("b"):
m = _REGEX_CHAPTER_B.search(stripHTML(b))
if m:
chapterId = m.group('chapterId')
chapterLink = b.findNext("a")
chapterLink['href'] = "%s&chapter=%s" % (self.url, chapterId)
if chapterLink.string !='Back to index':
self.chapterUrls.append((chapterLink.string, chapterLink['href']))
## Store reference to soup for getChapterText
self.html = soup
def getChapterText(self, url):
if self.getConfig('bulk_load'):
logger.debug('Cached chapter text from <%s>' % url)
anchor = _REGEX_CHAPTER_PARAM.search(url).group(1)
chapterDiv = self.html.find("a", {"name": anchor}).parent.findNext("div", "chapter")
else:
logger.debug('Download chapter text from <%s>' % url)
soup = self._fetch_to_soup(url + '&action=printable')
chapterDiv = soup.find("div", "chapter")
return self.utf8FromSoup(self.url, chapterDiv)
def getClass():
return BaseEfictionAdapter
| 38.122318
| 142
| 0.590149
|
f9a95880fd2da2a30d55148539cb1ef04bfc78b0
| 5,457
|
py
|
Python
|
framework/auth/core.py
|
shun-nakazawa/RDM-osf.io
|
cdf0101426e3a3637f76bd7f477897947f163366
|
[
"Apache-2.0"
] | null | null | null |
framework/auth/core.py
|
shun-nakazawa/RDM-osf.io
|
cdf0101426e3a3637f76bd7f477897947f163366
|
[
"Apache-2.0"
] | 8
|
2018-11-09T05:57:09.000Z
|
2019-07-25T10:27:55.000Z
|
framework/auth/core.py
|
shun-nakazawa/RDM-osf.io
|
cdf0101426e3a3637f76bd7f477897947f163366
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime as dt
import logging
from django.utils import timezone
from django.db.models import Q
from django.db.models import Subquery
from django.core.validators import URLValidator
from flask import request
from framework.sessions import session
from osf.exceptions import ValidationValueError, ValidationError
from osf.utils.requests import check_select_for_update
from website import security, settings
name_formatters = {
'long': lambda user: user.fullname,
'surname': lambda user: user.family_name if user.family_name else user.fullname,
'initials': lambda user: u'{surname}, {initial}.'.format(
surname=user.family_name,
initial=user.given_name_initial,
),
}
logger = logging.getLogger(__name__)
def generate_verification_key(verification_type=None):
"""
Generate a one-time verification key with an optional expiration time.
The type of the verification key determines the expiration time defined in `website.settings.EXPIRATION_TIME_DICT`.
:param verification_type: None, verify, confirm or claim
:return: a string or a dictionary
"""
token = security.random_string(30)
# v1 with only the token
if not verification_type:
return token
# v2 with a token and the expiration time
expires = timezone.now() + dt.timedelta(minutes=settings.EXPIRATION_TIME_DICT[verification_type])
return {
'token': token,
'expires': expires,
}
def validate_year(item):
if item:
try:
int(item)
except ValueError:
raise ValidationValueError('Please enter a valid year.')
else:
if len(item) != 4:
raise ValidationValueError('Please enter a valid year.')
validate_url = URLValidator()
def validate_profile_websites(profile_websites):
for value in profile_websites or []:
try:
validate_url(value)
except ValidationError:
# Reraise with a better message
raise ValidationError('Invalid personal URL.')
def validate_social(value):
validate_profile_websites(value.get('profileWebsites'))
def get_current_user_id():
return session._get_current_object() and session.data.get('auth_user_id')
# TODO - rename to _get_current_user_from_session /HRYBACKI
def _get_current_user():
from osf.models import OSFUser
current_user_id = get_current_user_id()
if current_user_id:
return OSFUser.load(current_user_id, select_for_update=check_select_for_update(request))
else:
return None
# TODO: This should be a class method of User?
def get_user(email=None, password=None, token=None, external_id_provider=None, external_id=None, eppn=None):
"""
Get an instance of `User` matching the provided params.
1. email
2. email and password
3 token
4. external_id_provider and external_id
:param token: the token in verification key
:param email: user's email
:param password: user's password
:param external_id_provider: the external identity provider
:param external_id: the external id
:rtype User or None
"""
from osf.models import OSFUser, Email
if not any([email, password, token, external_id_provider, external_id_provider, eppn]):
return None
if password and not email:
raise AssertionError('If a password is provided, an email must also be provided.')
qs = OSFUser.objects.filter()
if email:
email = email.strip().lower()
qs = qs.filter(Q(Q(username=email) | Q(id=Subquery(Email.objects.filter(address=email).values('user_id')))))
if password:
password = password.strip()
try:
user = qs.get()
except Exception as err:
logger.error(err)
user = None
if user and not user.check_password(password):
return False
return user
if eppn:
qs = qs.filter(eppn=eppn)
if token:
qs = qs.filter(verification_key=token)
if external_id_provider and external_id:
qs = qs.filter(**{'external_identity__{}__{}'.format(external_id_provider, external_id): 'VERIFIED'})
try:
user = qs.get()
return user
except Exception as err:
logger.error(err)
return None
class Auth(object):
def __init__(self, user=None, api_node=None,
private_key=None):
self.user = user
self.api_node = api_node
self.private_key = private_key
def __repr__(self):
return ('<Auth(user="{self.user}", '
'private_key={self.private_key})>').format(self=self)
@property
def logged_in(self):
return self.user is not None
@property
def private_link(self):
if not self.private_key:
return None
# Avoid circular import
from osf.models import PrivateLink
try:
private_link = PrivateLink.objects.get(key=self.private_key)
if private_link.is_deleted:
return None
except PrivateLink.DoesNotExist:
return None
return private_link
@classmethod
def from_kwargs(cls, request_args, kwargs):
user = request_args.get('user') or kwargs.get('user') or _get_current_user()
private_key = request_args.get('view_only')
return cls(
user=user,
private_key=private_key,
)
| 28.873016
| 119
| 0.665934
|
2682cc0d57503e566e54dfd1368ed46ad7ce1325
| 1,492
|
py
|
Python
|
setup.py
|
johnkozina/ansible-pan
|
335e05daa2f955194ca77328b5bf754b5e3d6751
|
[
"Apache-2.0"
] | 1
|
2019-04-19T23:08:27.000Z
|
2019-04-19T23:08:27.000Z
|
setup.py
|
johnkozina/ansible-pan
|
335e05daa2f955194ca77328b5bf754b5e3d6751
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
johnkozina/ansible-pan
|
335e05daa2f955194ca77328b5bf754b5e3d6751
|
[
"Apache-2.0"
] | 2
|
2019-01-31T02:51:08.000Z
|
2020-09-03T15:45:52.000Z
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ansible-pan',
version='1.0.1',
packages=['library'],
# The project's main homepage.
url='https://github.com/PaloAltoNetworks/ansible-pan',
license='Apache V2.0',
author='@ivanbojer',
author_email='ijb@networkrift.com',
description='Set ot Ansible modules for Palo Alto Networks device configuration',
long_description=long_description,
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'pan-python>=0.10.0',
'pandevice>=0.4.0'
],
)
| 33.909091
| 85
| 0.63807
|
f0c4c29a6065c6231f7ff7c380d5d992ab5e0d69
| 5,058
|
py
|
Python
|
frappe/config/settings.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/config/settings.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
frappe/config/settings.py
|
ektai/frappe3
|
44aa948b4d5a0d729eacfb3dabdc9c8894ae1799
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.moduleview import add_setup_section
def get_data():
data = [
{
"label": _("Core"),
"items": [
{
"type": "doctype",
"name": "System Settings",
"label": _("System Settings"),
"description": _("Language, Date and Time settings"),
"hide_count": True
},
{
"type": "doctype",
"name": "Error Log",
"description": _("Log of error on automated events (scheduler).")
},
{
"type": "doctype",
"name": "Error Snapshot",
"description": _("Log of error during requests.")
},
{
"type": "doctype",
"name": "Domain Settings",
"label": _("Domain Settings"),
"description": _("Enable / Disable Domains"),
"hide_count": True
},
]
},
{
"label": _("Data"),
"items": [
{
"type": "doctype",
"name": "Data Import",
"label": _("Import Data"),
"icon": "octicon octicon-cloud-upload",
"description": _("Import Data from CSV / Excel files.")
},
{
"type": "doctype",
"name": "Data Export",
"label": _("Export Data"),
"icon": "octicon octicon-cloud-upload",
"description": _("Export Data in CSV / Excel format.")
},
{
"type": "doctype",
"name": "Naming Series",
"description": _("Set numbering series for transactions."),
"hide_count": True
},
{
"type": "doctype",
"name": "Rename Tool",
"label": _("Bulk Rename"),
"description": _("Rename many items by uploading a .csv file."),
"hide_count": True
},
{
"type": "doctype",
"name": "Bulk Update",
"label": _("Bulk Update"),
"description": _("Update many values at one time."),
"hide_count": True
},
{
"type": "page",
"name": "backups",
"label": _("Download Backups"),
"description": _("List of backups available for download"),
},
{
"type": "doctype",
"name": "Deleted Document",
"label": _("Deleted Documents"),
"description": _("Restore or permanently delete a document.")
},
]
},
{
"label": _("Email / Notifications"),
"items": [
{
"type": "doctype",
"name": "Email Account",
"description": _("Add / Manage Email Accounts.")
},
{
"type": "doctype",
"name": "Email Domain",
"description": _("Add / Manage Email Domains.")
},
{
"type": "doctype",
"name": "Notification",
"description": _("Setup Notifications based on various criteria.")
},
{
"type": "doctype",
"name": "Email Template",
"description": _("Email Templates for common queries.")
},
{
"type": "doctype",
"name": "Auto Email Report",
"description": _("Setup Reports to be emailed at regular intervals"),
},
{
"type": "doctype",
"name": "Newsletter",
"description": _("Create and manage newsletter")
},
{
"type": "doctype",
"route": "Form/Notification Settings/{}".format(frappe.session.user),
"name": "Notification Settings",
"description": _("Configure notifications for mentions, assignments, energy points and more.")
}
]
},
{
"label": _("Printing"),
"items": [
{
"type": "page",
"label": _("Print Format Builder"),
"name": "print-format-builder",
"description": _("Drag and Drop tool to build and customize Print Formats.")
},
{
"type": "doctype",
"name": "Print Settings",
"description": _("Set default format, page size, print style etc.")
},
{
"type": "doctype",
"name": "Print Format",
"description": _("Customized HTML Templates for printing transactions.")
},
{
"type": "doctype",
"name": "Print Style",
"description": _("Stylesheets for Print Formats")
},
]
},
{
"label": _("Workflow"),
"items": [
{
"type": "doctype",
"name": "Workflow",
"description": _("Define workflows for forms.")
},
{
"type": "doctype",
"name": "Workflow State",
"description": _("States for workflow (e.g. Draft, Approved, Cancelled).")
},
{
"type": "doctype",
"name": "Workflow Action",
"description": _("Actions for workflow (e.g. Approve, Cancel).")
},
{
"type": "doctype",
"name": "Assignment Rule",
"description": _("Set up rules for user assignments.")
}
]
},
{
"label": _("Automation"),
"items": [
{
"type": "doctype",
"name": "Assignment Rule",
"description": _("Set up rules for user assignments.")
},
{
"type": "doctype",
"name": "Milestone",
"description": _("Tracks milestones on the lifecycle of a document if it undergoes multiple stages.")
},
{
"type": "doctype",
"name": "Auto Repeat",
"description": _("Automatically generates recurring documents.")
},
]
},
]
add_setup_section(data, "frappe", "website", _("Website"), "fas fa-globe")
return data
| 24.916256
| 106
| 0.546263
|
9dad61fe5b338e1c2087628999147f8ac573ea96
| 6,729
|
py
|
Python
|
etc/tf_tutorial/learning_tf/21.py
|
zhangbo2008/facenet
|
4dfabcb5cf14f99622dbe5f9f12f0539821c169c
|
[
"MIT"
] | null | null | null |
etc/tf_tutorial/learning_tf/21.py
|
zhangbo2008/facenet
|
4dfabcb5cf14f99622dbe5f9f12f0539821c169c
|
[
"MIT"
] | 7
|
2019-12-16T22:10:01.000Z
|
2022-02-10T00:27:35.000Z
|
etc/tf_tutorial/learning_tf/21.py
|
zhangbo2008/facenet
|
4dfabcb5cf14f99622dbe5f9f12f0539821c169c
|
[
"MIT"
] | null | null | null |
""" Generative Adversarial Networks (GAN).
Using generative adversarial networks (GAN) to generate digit images from a
noise distribution.
References:
- Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza,
B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information
processing systems, 2672-2680.
- Understanding the difficulty of training deep feedforward neural networks.
X Glorot, Y Bengio. Aistats 9, 249-256
Links:
- [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf).
- [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
- [Xavier Glorot Init](www.cs.cmu.edu/~bhiksha/courses/deeplearning/Fall.../AISTATS2010_Glorot.pdf).
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./tmp/data/", one_hot=True)
# Training Params
num_steps = 1000
batch_size = 128
learning_rate = 0.0002
# Network Params
image_dim = 784 # 28*28 pixels
gen_hidden_dim = 256
disc_hidden_dim = 256
noise_dim = 100 # Noise data points
# A custom initialization (see Xavier Glorot init)
def glorot_init(shape):
return tf.random_normal(shape=shape, stddev=1. / tf.sqrt(shape[0] / 2.))
# Store layers weight & bias
weights = {
'gen_hidden1': tf.Variable(glorot_init([noise_dim, gen_hidden_dim])),
'gen_out': tf.Variable(glorot_init([gen_hidden_dim, image_dim])),
'disc_hidden1': tf.Variable(glorot_init([image_dim, disc_hidden_dim])),
'disc_out': tf.Variable(glorot_init([disc_hidden_dim, 1])),
}
biases = {
'gen_hidden1': tf.Variable(tf.zeros([gen_hidden_dim])),
'gen_out': tf.Variable(tf.zeros([image_dim])),
'disc_hidden1': tf.Variable(tf.zeros([disc_hidden_dim])),
'disc_out': tf.Variable(tf.zeros([1])),
}
# Generator
def generator(x):
hidden_layer = tf.matmul(x, weights['gen_hidden1'])
hidden_layer = tf.add(hidden_layer, biases['gen_hidden1'])
hidden_layer = tf.nn.relu(hidden_layer)
out_layer = tf.matmul(hidden_layer, weights['gen_out'])
out_layer = tf.add(out_layer, biases['gen_out'])
out_layer = tf.nn.sigmoid(out_layer)
'''
得到的向量是[None,image_dim] image_dim:784 表示28*28的flat后的结果
'''
return out_layer
# Discriminator
def discriminator(x):
hidden_layer = tf.matmul(x, weights['disc_hidden1'])
hidden_layer = tf.add(hidden_layer, biases['disc_hidden1'])
hidden_layer = tf.nn.relu(hidden_layer)
out_layer = tf.matmul(hidden_layer, weights['disc_out'])
out_layer = tf.add(out_layer, biases['disc_out'])
out_layer = tf.nn.sigmoid(out_layer)
return out_layer
# Build Networks
# Network Inputs
'''
生成假的gen_input
'''
gen_input = tf.placeholder(tf.float32, shape=[None, noise_dim], name='input_noise')
# Build Generator Network
gen_sample = generator(gen_input)
'''
gen_sample得到的是得到的向量是[None,image_dim] image_dim:784 表示28*28的flat后的结果
'''
'''
生成真的disc_input
'''
disc_input = tf.placeholder(tf.float32, shape=[None, image_dim], name='disc_input')
# Build 2 Discriminator Networks (one from noise input, one from generated samples)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample)
'''
disc_real和disc_fake是真,假图片提取后的一个在0,1之间的实数!
'''
# Build Loss
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
'''
下面我们要学习的是discriminator这个分类器:
这个分类器好就表示:
这个loss函数为什么用log来计算呢?因为discrimator函数里面有sigmoid函数,所以
极大似然就是log函数.
从函数单调性分析知道:disfake->0: gen_loss->无穷
disreal->1: disc_loss->0
那么为什么需要使用2个loss函数呢?
以为如果只用第一个显然没法评估disc_real的值
如果只用第二个那么disc_real,disc_fake没法区别到底哪个值趋近1哪个值趋近0.
所以也不行.所以必须要用2个loss函数来刻画.
'''
# Build Optimizers
optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate)
optimizer_disc = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Training Variables for each optimizer
# By default in TensorFlow, all variables are updated by each optimizer, so we
# need to precise for each one of them the specific variables to update.
# Generator Network Variables
'''
下面几行初始化变量
'''
gen_vars = [weights['gen_hidden1'], weights['gen_out'],
biases['gen_hidden1'], biases['gen_out']]
# Discriminator Network Variables
disc_vars = [weights['disc_hidden1'], weights['disc_out'],
biases['disc_hidden1'], biases['disc_out']]
# Create training operations
train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for i in range(1, num_steps+1):
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
batch_x, _ = mnist.train.next_batch(batch_size)
# print(batch_x.shape) 看出是(128.784)类型的.说明图像dimension是1*784类型的.
# Generate noise to feed to the generator
'''
高斯噪音
'''
z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])
# Train
feed_dict = {disc_input: batch_x, gen_input: z}
'''
sess里面run 最后的optimizer即可.
'''
_, _, gl, dl = sess.run([train_gen, train_disc, gen_loss, disc_loss],
feed_dict=feed_dict)
if i % 1000 == 0 or i == 1:
print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl))
print('33333333333333333')
# Generate images from noise, using the generator network.
f, a = plt.subplots(4, 10, figsize=(10, 4))
print('11111111111111')
for i in range(10):
# Noise input.
z = np.random.uniform(-1., 1., size=[4, noise_dim])
g = sess.run([gen_sample], feed_dict={gen_input: z})
print('222222222')
g = np.reshape(g, newshape=(4, 28, 28, 1))
print('4444444444')
# Reverse colours for better display
g = -1 * (g - 1)
for j in range(4):
# Generate image from noise. Extend to 3 channels for matplot figure.
img = np.reshape(np.repeat(g[j][:, :, np.newaxis], 3, axis=2),
newshape=(28, 28, 3))
a[j][i].imshow(img)
print('5555555555')
f.show()
print('55555555551')
plt.draw()
print('55555555552')
plt.show()
# plt.waitforbuttonpress()这个代码在spyder里面跑不了,用cmd跑.这个命令主要是在ion画图模式生成动画用.
| 33.477612
| 104
| 0.685689
|
1a3f4e05d0e7d1c43ddc01b0a5a35c6e116c78fa
| 24,656
|
py
|
Python
|
conkycolors/scripts/conkyRhythmbox.py
|
CarryJzzZ/pithy-conky-colors
|
573aaa262f86ec510927ed81bc3beb25e2195a9e
|
[
"MIT"
] | 4
|
2018-08-22T05:28:32.000Z
|
2018-08-23T06:44:17.000Z
|
conkycolors/scripts/conkyRhythmbox.py
|
CarryJzzZ/pithy-conky-colors
|
573aaa262f86ec510927ed81bc3beb25e2195a9e
|
[
"MIT"
] | null | null | null |
conkycolors/scripts/conkyRhythmbox.py
|
CarryJzzZ/pithy-conky-colors
|
573aaa262f86ec510927ed81bc3beb25e2195a9e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
###############################################################################
# conkyRhythmbox.py is a simple python script to gather
# details of from Rhythmbox for use in conky.
#
# Author: Kaivalagi
# Created: 23/09/2008
from datetime import datetime
from optparse import OptionParser
import sys
import traceback
import codecs
import os
import shutil
import urllib
try:
import dbus
DBUS_AVAIL = True
except ImportError:
# Dummy D-Bus library
class _Connection:
get_object = lambda *a: object()
class _Interface:
__init__ = lambda *a: None
ListNames = lambda *a: []
class Dummy: pass
dbus = Dummy()
dbus.Interface = _Interface
dbus.service = Dummy()
dbus.service.method = lambda *a: lambda f: f
dbus.service.Object = object
dbus.SessionBus = _Connection
DBUS_AVAIL = False
class CommandLineParser:
parser = None
def __init__(self):
self.parser = OptionParser()
self.parser.add_option("-t", "--template", dest="template", type="string", metavar="FILE", help=u"define a template file to generate output in one call. A displayable item in the file is in the form [--datatype=TI]. The following are possible options within each item: --datatype,--ratingchar. Note that the short forms of the options are not currently supported! None of these options are applicable at command line when using templates.")
self.parser.add_option("-d", "--datatype", dest="datatype", default="TI", type="string", metavar="DATATYPE", help=u"[default: %default] The data type options are: ST (status), CA (coverart), TI (title), AL (album), AR (artist), GE (genre), YR (year), TN (track number), FN (file name), BR (bitrate k/s), LE (length), PP (current position in percent), PT (current position in time), VO (volume), RT (rating). Not applicable at command line when using templates.")
self.parser.add_option("-c", "--coverartpath", dest="coverartpath", default="/tmp/cover", type="string", metavar="PATH", help=u"[default: %default] The file where coverart gets copied to if found when using the --datatype=CA option. Note that if set to an empty string i.e. \"\" the original file path is provided for the coverart path.")
self.parser.add_option("-r", "--ratingchar", dest="ratingchar", default="*", type="string", metavar="CHAR", help=u"[default: %default] The output character for the ratings scale. Command line option overridden if used in templates.")
self.parser.add_option("-s", "--statustext", dest="statustext", default="Playing,Paused,Stopped", type="string", metavar="TEXT", help=u"[default: %default] The text must be comma delimited in the form 'A,B,C'. Command line option overridden if used in templates.")
self.parser.add_option("-n", "--nounknownoutput", dest="nounknownoutput", default=False, action="store_true", help=u"Turn off unknown output such as \"Unknown\" for title and \"0:00\" for length. Command line option overridden if used in templates.")
self.parser.add_option("-S", "--secondsoutput", dest="secondsoutput", default=False, action="store_true", help=u"Force all position and length output to be in seconds only.")
self.parser.add_option("-m", "--maxlength", dest="maxlength", default="0", type="int", metavar="LENGTH", help=u"[default: %default] Define the maximum length of any datatypes output, if truncated the output ends in \"...\"")
self.parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help=u"Request verbose output, not a good idea when running through conky!")
self.parser.add_option("-V", "--version", dest="version", default=False, action="store_true", help=u"Displays the version of the script.")
self.parser.add_option("--errorlogfile", dest="errorlogfile", type="string", metavar="FILE", help=u"If a filepath is set, the script appends errors to the filepath.")
self.parser.add_option("--infologfile", dest="infologfile", type="string", metavar="FILE", help=u"If a filepath is set, the script appends info to the filepath.")
def parse_args(self):
(options, args) = self.parser.parse_args()
return (options, args)
def print_help(self):
return self.parser.print_help()
class MusicData:
def __init__(self,status,coverart,title,album,length,artist,tracknumber,genre,year,filename,bitrate,current_position_percent,current_position,rating,volume):
self.status = status
self.coverart = coverart
self.title = title
self.album = album
self.length = length
self.artist = artist
self.tracknumber = tracknumber
self.genre = genre
self.year = year
self.filename = filename
self.bitrate = bitrate
self.current_position_percent = current_position_percent
self.current_position = current_position
self.rating = rating
self.volume = volume
class RhythmboxInfo:
error = u""
musicData = None
def __init__(self, options):
self.options = options
def testDBus(self, bus, interface):
obj = bus.get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
dbus_iface = dbus.Interface(obj, 'org.freedesktop.DBus')
avail = dbus_iface.ListNames()
return interface in avail
def getOutputData(self, datatype, ratingchar, statustext, nounknownoutput, maxlength):
output = u""
if nounknownoutput == True:
unknown_time = ""
unknown_number = ""
unknown_string = ""
else:
unknown_time = "0:00"
unknown_number = "0"
unknown_string = "Unknown"
try:
bus = dbus.SessionBus()
if self.musicData == None:
if self.testDBus(bus, 'org.gnome.Rhythmbox'):
self.logInfo("Calling dbus interface for music data")
try:
self.logInfo("Setting up dbus interface")
# setup dbus hooks
remote_object_shell = bus.get_object('org.gnome.Rhythmbox', '/org/gnome/Rhythmbox/Shell')
iface_shell = dbus.Interface(remote_object_shell, 'org.gnome.Rhythmbox.Shell')
remote_object_player = bus.get_object('org.gnome.Rhythmbox', '/org/gnome/Rhythmbox/Player')
iface_player = dbus.Interface(remote_object_player, 'org.gnome.Rhythmbox.Player')
self.logInfo("Calling dbus interface for music data")
# prepare song properties for data retrieval
volume = str(int(100*iface_player.getVolume()))
uri = iface_player.getPlayingUri()
if len(uri) == 0:
status = self.getStatusText("stopped", statustext)
self.musicData = MusicData(status,None,None,None,None,None,None,None,None,None,None,None,None,None,volume)
else:
playing = iface_player.getPlaying()
if playing == True:
status = self.getStatusText("playing", statustext)
else:
status = self.getStatusText("paused", statustext)
props = iface_shell.getSongProperties(uri)
#print props
# grab the data into variables
location = props["location"]
# handle a file or stream differently for filename
if location.find("file://") != -1:
filename = location[location.rfind("/")+1:]
elif len(location) > 0:
filename = location
else:
filename = ""
# try to get all the normal stuff...the props return an empty string if nothing is available
title = props["title"]
# if stream song title then use that too (internet radio)
if "rb:stream-song-title" in props:
title = props["rb:stream-song-title"] + " (" + title + ")"
album = props["album"]
artist = props["artist"]
year = str(props["year"])
tracknumber = str(props["track-number"])
bitrate = str(props["bitrate"])+"k/s"
if year == "0": year = "?"
if tracknumber == "0": tracknumber = "?"
# get coverart url or file link
coverart = ""
if "rb:coverArt-uri" in props:
coverart = props["rb:coverArt-uri"]
if coverart.find("http://") != -1:
coverart = ""
else:
coverart = urllib.unquote(coverart).replace("file://","")
# common details
genre = props["genre"]
length_seconds = int(props["duration"])
current_seconds = int(iface_player.getElapsed())
if length_seconds > 0:
current_position_percent = str(int((float(current_seconds) / float(props["duration"]))*100))
else:
length_seconds = 0
current_position_percent = "0"
if self.options.secondsoutput == True:
length = str(length_seconds)
current_position = str(current_seconds)
else:
length = str(length_seconds/60).rjust(1,"0")+":"+str(length_seconds%60).rjust(2,"0")
current_position = str(int(current_seconds/60)).rjust(1,"0")+":"+str(int(current_seconds%60)).rjust(2,"0")
rating = str(int(props["rating"]))
self.musicData = MusicData(status,coverart,title,album,length,artist,tracknumber,genre,year,filename,bitrate,current_position_percent,current_position,rating,volume)
except Exception, e:
self.logError("Issue calling the dbus service:"+e.__str__())
if self.musicData != None:
self.logInfo("Preparing output for datatype:"+datatype)
if datatype == "ST": #status
if self.musicData.status == None or len(self.musicData.status) == 0:
output = None
else:
output = self.musicData.status
elif datatype == "CA": #coverart
if self.musicData.coverart == None or len(self.musicData.coverart) == 0:
output = None
else:
self.logInfo("Copying coverart from %s to %s"%(self.musicData.coverart, self.options.coverartpath))
shutil.copy(self.musicData.coverart, self.options.coverartpath)
self.musicData.coverart = self.options.coverartpath
output = self.musicData.coverart
elif datatype == "TI": #title
if self.musicData.title == None or len(self.musicData.title) == 0:
output = None
else:
output = self.musicData.title
elif datatype == "AL": #album
if self.musicData.album == None or len(self.musicData.album) == 0:
output = None
else:
output = self.musicData.album
elif datatype == "AR": #artist
if self.musicData.artist == None or len(self.musicData.artist) == 0:
output = None
else:
output = self.musicData.artist
elif datatype == "TN": #tracknumber
if self.musicData.tracknumber == None or len(self.musicData.tracknumber) == 0:
output = None
else:
output = self.musicData.tracknumber
elif datatype == "GE": #genre
if self.musicData.title == genre or len(self.musicData.genre) == 0:
output = None
else:
output = self.musicData.genre
elif datatype == "YR": #year
if self.musicData.year == None or len(self.musicData.year) == 0:
output = None
else:
output = self.musicData.year
elif datatype == "FN": #filename
if self.musicData.filename == None or len(self.musicData.filename) == 0:
output = None
else:
output = self.musicData.filename
elif datatype == "BR": #bitrate
if self.musicData.bitrate == None or len(self.musicData.bitrate) == 0:
output = None
else:
output = self.musicData.bitrate
elif datatype == "LE": # length
if self.musicData.length == None or len(self.musicData.length) == 0:
output = None
else:
output = self.musicData.length
elif datatype == "PP": #current position in percent
if self.musicData.current_position_percent == None or len(self.musicData.current_position_percent) == 0:
output = None
else:
output = self.musicData.current_position_percent
elif datatype == "PT": #current position in time
if self.musicData.current_position == None or len(self.musicData.current_position) == 0:
output = None
else:
output = self.musicData.current_position
elif datatype == "VO": #volume
if self.musicData.volume == None or len(self.musicData.volume) == 0:
output = None
else:
output = self.musicData.volume
elif datatype == "RT": #rating
if self.musicData.rating == None or self.isNumeric(self.musicData.rating) == False:
output = None
else:
rating = int(self.musicData.rating)
if rating > 0:
output = u"".ljust(rating,ratingchar)
elif rating == 0:
output = u""
else:
output = None
else:
self.logError("Unknown datatype provided: " + datatype)
return u""
if output == None or self.musicData == None:
if datatype in ["LE","PT"]:
if self.options.secondsoutput == True:
output = unknown_number
else:
output = unknown_time
elif datatype in ["PP","VO","YR","TN"]:
output = unknown_number
elif datatype == "CA":
output = ""
else:
output = unknown_string
if maxlength > 0 and len(output) > maxlength:
output = output[:maxlength-3]+"..."
return output
except SystemExit:
self.logError("System Exit!")
return u""
except Exception, e:
traceback.print_exc()
self.logError("Unknown error when calling getOutputData:" + e.__str__())
return u""
def getStatusText(self, status, statustext):
if status != None:
statustextparts = statustext.split(",")
if status == "playing":
return statustextparts[0]
elif status == "paused":
return statustextparts[1]
elif status == "stopped":
return statustextparts[2]
else:
return status
def getTemplateItemOutput(self, template_text):
# keys to template data
DATATYPE_KEY = "datatype"
RATINGCHAR_KEY = "ratingchar"
STATUSTEXT_KEY = "statustext"
NOUNKNOWNOUTPUT_KEY = "nounknownoutput"
MAXLENGTH_KEY = "maxlength"
datatype = None
ratingchar = self.options.ratingchar #default to command line option
statustext = self.options.statustext #default to command line option
nounknownoutput = self.options.nounknownoutput #default to command line option
maxlength = self.options.maxlength #default to command line option
for option in template_text.split('--'):
if len(option) == 0 or option.isspace():
continue
# not using split here...it can't assign both key and value in one call, this should be faster
x = option.find('=')
if (x != -1):
key = option[:x].strip()
value = option[x + 1:].strip()
if value == "":
value = None
else:
key = option.strip()
value = None
try:
if key == DATATYPE_KEY:
datatype = value
elif key == RATINGCHAR_KEY:
ratingchar = value
elif key == STATUSTEXT_KEY:
statustext = value
elif key == NOUNKNOWNOUTPUT_KEY:
nounknownoutput = True
elif key == MAXLENGTH_KEY:
maxlength = int(value)
else:
self.logError("Unknown template option: " + option)
except (TypeError, ValueError):
self.logError("Cannot convert option argument to number: " + option)
return u""
if datatype != None:
return self.getOutputData(datatype, ratingchar, statustext, nounknownoutput, maxlength)
else:
self.logError("Template item does not have datatype defined")
return u""
def getOutputFromTemplate(self, template):
output = u""
end = False
a = 0
# a and b are indexes in the template string
# moving from left to right the string is processed
# b is index of the opening bracket and a of the closing bracket
# everything between b and a is a template that needs to be parsed
while not end:
b = template.find('[', a)
if b == -1:
b = len(template)
end = True
# if there is something between a and b, append it straight to output
if b > a:
output += template[a : b]
# check for the escape char (if we are not at the end)
if template[b - 1] == '\\' and not end:
# if its there, replace it by the bracket
output = output[:-1] + '['
# skip the bracket in the input string and continue from the beginning
a = b + 1
continue
if end:
break
a = template.find(']', b)
if a == -1:
self.logError("Missing terminal bracket (]) for a template item")
return u""
# if there is some template text...
if a > b + 1:
output += self.getTemplateItemOutput(template[b + 1 : a])
a = a + 1
return output
def writeOutput(self):
if self.options.template != None:
#load the file
try:
fileinput = codecs.open(os.path.expanduser(self.options.template), encoding='utf-8')
template = fileinput.read()
fileinput.close()
except Exception, e:
self.logError("Error loading template file: " + e.__str__())
else:
output = self.getOutputFromTemplate(template)
else:
output = self.getOutputData(self.options.datatype, self.options.ratingchar, self.options.statustext, self.options.nounknownoutput, self.options.maxlength)
print output.encode("utf-8")
def isNumeric(self,value):
try:
temp = int(value)
return True
except:
return False
def logInfo(self, text):
if self.options.verbose == True:
print >> sys.stdout, "INFO: " + text
if self.options.infologfile != None:
datetimestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fileoutput = open(self.options.infologfile, "ab")
fileoutput.write(datetimestamp+" INFO: "+text+"\n")
fileoutput.close()
def logError(self, text):
print >> sys.stderr, "ERROR: " + text
if self.options.errorlogfile != None:
datetimestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fileoutput = open(self.options.errorlogfile, "ab")
fileoutput.write(datetimestamp+" ERROR: "+text+"\n")
fileoutput.close()
def main():
parser = CommandLineParser()
(options, args) = parser.parse_args()
if options.version == True:
print >> sys.stdout,"conkyRhythmbox v.2.17"
else:
if options.verbose == True:
print >> sys.stdout, "*** INITIAL OPTIONS:"
print >> sys.stdout, " datatype:", options.datatype
print >> sys.stdout, " template:", options.template
print >> sys.stdout, " ratingchar:", options.ratingchar
print >> sys.stdout, " nounknownoutput:", options.nounknownoutput
print >> sys.stdout, " secondsoutput:", options.secondsoutput
print >> sys.stdout, " maxlength:", options.maxlength
print >> sys.stdout, " verbose:", options.verbose
print >> sys.stdout, " errorlogfile:",options.errorlogfile
print >> sys.stdout, " infologfile:",options.infologfile
rhythmboxinfo = RhythmboxInfo(options)
rhythmboxinfo.writeOutput()
if __name__ == '__main__':
main()
sys.exit()
| 46.96381
| 470
| 0.495539
|
5e22663fd5063c30fa9d38192f940bb174319f4c
| 888
|
py
|
Python
|
tests/cloud_operations/dns_fine_grained_iam/test_plan.py
|
00inboxtest/cloud-foundation-fabric
|
ee25965c895aa4bc3f62090a08b797d45b82d178
|
[
"Apache-2.0"
] | null | null | null |
tests/cloud_operations/dns_fine_grained_iam/test_plan.py
|
00inboxtest/cloud-foundation-fabric
|
ee25965c895aa4bc3f62090a08b797d45b82d178
|
[
"Apache-2.0"
] | null | null | null |
tests/cloud_operations/dns_fine_grained_iam/test_plan.py
|
00inboxtest/cloud-foundation-fabric
|
ee25965c895aa4bc3f62090a08b797d45b82d178
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_resources(e2e_plan_runner):
"Test that plan works and the numbers of resources is as expected."
modules, resources = e2e_plan_runner(FIXTURES_DIR)
assert len(modules) == 8
assert len(resources) == 25
| 31.714286
| 74
| 0.757883
|
2f710c6cb25795b9ebe2abc7ba6739c8a0fb5a46
| 629
|
py
|
Python
|
main/migrations/0002_auto_20210817_1406.py
|
Mneimoh/gestionimmo
|
b6c631b198537b04e07eaa59b48b2b24f9d2ee0d
|
[
"MIT"
] | null | null | null |
main/migrations/0002_auto_20210817_1406.py
|
Mneimoh/gestionimmo
|
b6c631b198537b04e07eaa59b48b2b24f9d2ee0d
|
[
"MIT"
] | null | null | null |
main/migrations/0002_auto_20210817_1406.py
|
Mneimoh/gestionimmo
|
b6c631b198537b04e07eaa59b48b2b24f9d2ee0d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-08-17 21:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dossier',
name='Paiement',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='main.paiement'),
),
migrations.AddField(
model_name='paiement',
name='uid',
field=models.IntegerField(null=True),
),
]
| 25.16
| 124
| 0.600954
|
7a542a1b120079a9e3ceb28de56c671c182ef69d
| 5,134
|
py
|
Python
|
venv/lib/python3.8/site-packages/docs/conf.py
|
OscarJHernandez/qc_portfolio_optimization
|
30f0e27689ad6bf37fb87880c2813a5b9858608d
|
[
"Apache-2.0"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
venv/lib/python3.8/site-packages/docs/conf.py
|
OscarJHernandez/qc_mentorship_project
|
30f0e27689ad6bf37fb87880c2813a5b9858608d
|
[
"Apache-2.0"
] | 4
|
2020-11-27T09:34:13.000Z
|
2021-04-30T21:13:41.000Z
|
venv/lib/python3.8/site-packages/docs/conf.py
|
OscarJHernandez/qc_mentorship_project
|
30f0e27689ad6bf37fb87880c2813a5b9858608d
|
[
"Apache-2.0"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
"""
Sphinx documentation builder
"""
import os
# Set env flag so that we can doc functions that may otherwise not be loaded
# see for example interactive visualizations in qiskit.visualization.
os.environ['QISKIT_DOCS'] = 'TRUE'
# -- Project information -----------------------------------------------------
project = 'Qiskit Ignis'
copyright = '2019, Qiskit Development Team' # pylint: disable=redefined-builtin
author = 'Qiskit Development Team'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.5.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'sphinx_tabs.tabs',
'jupyter_sphinx',
'sphinx_autodoc_typehints',
'reno.sphinxext',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
html_static_path = ['_static']
templates_path = ['_templates']
html_css_files = [
'style.css',
]
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
autodoc_default_options = {
'inherited-members': None,
}
# If true, figures, tables and code-blocks are automatically numbered if they
# have a caption.
numfig = True
# A dictionary mapping 'figure', 'table', 'code-block' and 'section' to
# strings that are used for format of figure numbers. As a special character,
# %s will be replaced to figure number.
numfig_format = {
'table': 'Table %s'
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A boolean that decides whether module names are prepended to all object names
# (for object types where a “module” of some kind is defined), e.g. for
# py:function directives.
add_module_names = False
# A list of prefixes that are ignored for sorting the Python module index
# (e.g., if this is set to ['foo.'], then foo.bar is shown under B, not F).
# This can be handy if you document a project that consists of a single
# package. Works only for the HTML builder currently.
modindex_common_prefix = ['qiskit.']
# -- Configuration for extlinks extension ------------------------------------
# Refer to https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' # use the theme in subdir 'theme'
html_logo = 'images/logo.png'
#html_sidebars = {'**': ['globaltoc.html']}
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
'style_nav_header_background': '#212121',
}
autoclass_content = 'both'
| 33.122581
| 80
| 0.652513
|
81b09ea05170b1ac8018627908439318d68c4eab
| 1,643
|
py
|
Python
|
tests/unit/test_extract_email.py
|
georgettica/URLExtract
|
638c0e2d4d8fec077b13b0eefb2c96ffaee112be
|
[
"MIT"
] | 184
|
2017-03-05T22:17:14.000Z
|
2022-03-02T22:45:54.000Z
|
tests/unit/test_extract_email.py
|
georgettica/URLExtract
|
638c0e2d4d8fec077b13b0eefb2c96ffaee112be
|
[
"MIT"
] | 93
|
2017-03-20T21:58:55.000Z
|
2022-03-16T20:09:37.000Z
|
tests/unit/test_extract_email.py
|
georgettica/URLExtract
|
638c0e2d4d8fec077b13b0eefb2c96ffaee112be
|
[
"MIT"
] | 55
|
2017-03-19T23:35:36.000Z
|
2022-03-14T02:58:38.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. Licence MIT
.. codeauthor:: Jan Lipovský <janlipovsky@gmail.com>, janlipovsky.cz
"""
import pytest
@pytest.mark.parametrize(
"text, expected",
[
(
"URI with User info in Authority ftp://jan@example.com:123/test",
["ftp://jan@example.com:123/test"],
),
("<email@address.net>", []),
("Do not extract emails by default jan@example.com", []),
],
)
def test_extract_email_disabled(urlextract, text, expected):
"""
Testing find_urls *NOT* returning email addresses from text
:param fixture urlextract: fixture holding URLExtract object
:param str text: text in which we should find links
:param list(str) expected: list of URLs that has to be found in text
"""
assert expected == urlextract.find_urls(text)
@pytest.mark.parametrize(
"text, expected",
[
("Do not extract emails by default jan@example.com", ["jan@example.com"]),
("<email@address.net>", ["email@address.net"]),
("Given URIs are not mail jan@example.com/asdasd jan@example.com:1234", []),
("Given URIs are not mail jan@example.com?not jan@example.com#not", []),
],
)
def test_extract_email_enabled(urlextract, text, expected):
"""
Testing find_urls returning all email addresses from text
:param fixture urlextract: fixture holding URLExtract object
:param str text: text in which we should find links
:param list(str) expected: list of URLs that has to be found in text
"""
urlextract.extract_email = True
assert expected == urlextract.find_urls(text)
| 32.215686
| 84
| 0.654291
|
d0bcf6e2325bf58e84217551512dbc19038223d2
| 1,163
|
py
|
Python
|
GeneralPython/PyDataStructure/multiValuedDict.py
|
prashantas/MyOwnR
|
8db4f288b30840161c6422bde7c7a7770f85c09d
|
[
"BSD-2-Clause"
] | null | null | null |
GeneralPython/PyDataStructure/multiValuedDict.py
|
prashantas/MyOwnR
|
8db4f288b30840161c6422bde7c7a7770f85c09d
|
[
"BSD-2-Clause"
] | null | null | null |
GeneralPython/PyDataStructure/multiValuedDict.py
|
prashantas/MyOwnR
|
8db4f288b30840161c6422bde7c7a7770f85c09d
|
[
"BSD-2-Clause"
] | null | null | null |
#http://code.activestate.com/recipes/52219-associating-multiple-values-with-each-key-in-a-dic/
#https://www.oreilly.com/library/view/python-cookbook/0596001673/ch01s06.html
def repeatedValue():
# this method allows duplicate values for the same key
d = dict()
# To add a key->value pair, do this:
#d.setdefault(key, []).append(value)
d.setdefault('a',[]).append('apple')
d.setdefault('b',[]).append('ball')
d.setdefault('c',[]).append('cat')
d.setdefault('a',[]).append('aeroplane')
d.setdefault('a',[]).append('anar')
d.setdefault('b',[]).append('balloon')
#aval = d['a']
print(d)
d['a'].remove('apple')
print(d)
def nonRepeatedValue():
example = {}
example.setdefault('a', set()).add('apple')
example.setdefault('b', set()).add('ball')
example.setdefault('a', set()).add('ant')
#example.setdefault('a', set()).add('apple')
#example.setdefault('c', {})['cat']=1
#example.setdefault('a', {})['ant']=1
#example.setdefault('a', {})['apple']=1
print(example)
d = example['a']
print(d)
if __name__ == '__main__':
#repeatedValue()
nonRepeatedValue()
| 27.046512
| 94
| 0.607051
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.