hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31b4d4041b346080b3c99bf0b972817cb29cc91f | 1,056 | py | Python | setup.py | MLResearchAtOSRAM/cause2e | 9420e88802172b893d4029b741dfd3e5e718880b | [
"MIT"
] | 33 | 2021-05-18T13:03:54.000Z | 2022-02-17T16:50:48.000Z | setup.py | MLResearchAtOSRAM/cause2e | 9420e88802172b893d4029b741dfd3e5e718880b | [
"MIT"
] | 11 | 2021-09-17T07:27:38.000Z | 2022-03-29T07:04:33.000Z | setup.py | MLResearchAtOSRAM/cause2e | 9420e88802172b893d4029b741dfd3e5e718880b | [
"MIT"
] | 1 | 2021-11-15T12:22:51.000Z | 2021-11-15T12:22:51.000Z | import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="cause2e",
version="0.2.0",
author="Daniel Gruenbaum",
author_email="daniel.gruenbaum@ams-osram.com",
description="A package for end-to-end causal analysis",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MLResearchAtOSRAM/cause2e",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows"
],
python_requires='>=3.7',
install_requires=[
"dowhy",
"ipython",
"jinja2",
"pillow",
"pyarrow",
"pycausal",
"seaborn"
]
)
| 28.540541 | 60 | 0.585227 | import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="cause2e",
version="0.2.0",
author="Daniel Gruenbaum",
author_email="daniel.gruenbaum@ams-osram.com",
description="A package for end-to-end causal analysis",
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MLResearchAtOSRAM/cause2e",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows"
],
python_requires='>=3.7',
install_requires=[
"dowhy",
"ipython",
"jinja2",
"pillow",
"pyarrow",
"pycausal",
"seaborn"
]
)
| 0 | 0 | 0 |
d48c2ef6acdd3722de0456719e1c6ed9281ed527 | 132 | py | Python | src/deep_dialog/models/nlg/convert.py | gzpbbd/DDQ | 44f4d2bf27c4299d349339de7bc85d1b9b640c50 | [
"MIT"
] | 141 | 2018-05-23T02:20:36.000Z | 2022-03-20T21:49:03.000Z | D3Q/src/deep_dialog/models/nlg/convert.py | yanglongfei908/D3Q | eb9cb05ffc3c22fcd4972371a987dbacab3e4ff6 | [
"MIT"
] | 10 | 2018-05-25T07:08:16.000Z | 2021-05-23T08:36:20.000Z | D3Q/src/deep_dialog/models/nlg/convert.py | yanglongfei908/D3Q | eb9cb05ffc3c22fcd4972371a987dbacab3e4ff6 | [
"MIT"
] | 44 | 2018-07-17T10:14:07.000Z | 2021-09-11T07:19:43.000Z | import cPickle
model=cPickle.load(open('lstm_tanh_relu_[1468202263.38]_2_0.610.p'))
cPickle.dump(model,open('model.bin.nlg','wb')) | 44 | 69 | 0.765152 | import cPickle
model=cPickle.load(open('lstm_tanh_relu_[1468202263.38]_2_0.610.p'))
cPickle.dump(model,open('model.bin.nlg','wb')) | 0 | 0 | 0 |
cf22a43cb28da76dd9e14ea02ae3c02acf643632 | 70 | py | Python | controller/controller/__init__.py | FilippoRanza/rr-scheduler | 8fc06f9d0ffe514ab6a94fd8a330f1cfd45b56c3 | [
"MIT"
] | 1 | 2022-01-13T13:59:28.000Z | 2022-01-13T13:59:28.000Z | controller/controller/__init__.py | FilippoRanza/rr-scheduler | 8fc06f9d0ffe514ab6a94fd8a330f1cfd45b56c3 | [
"MIT"
] | null | null | null | controller/controller/__init__.py | FilippoRanza/rr-scheduler | 8fc06f9d0ffe514ab6a94fd8a330f1cfd45b56c3 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
from . import get_best
from . import math_helper
| 14 | 25 | 0.742857 | #! /usr/bin/python3
from . import get_best
from . import math_helper
| 0 | 0 | 0 |
7c8df316045b0d9c8310c4d65c2bec2cf734c1d1 | 2,809 | py | Python | torch_geometric/datasets/twitch.py | rietesh/pytorch_geometric | 2ccebcdbcc763943282822e08214dca0cfc81243 | [
"MIT"
] | null | null | null | torch_geometric/datasets/twitch.py | rietesh/pytorch_geometric | 2ccebcdbcc763943282822e08214dca0cfc81243 | [
"MIT"
] | null | null | null | torch_geometric/datasets/twitch.py | rietesh/pytorch_geometric | 2ccebcdbcc763943282822e08214dca0cfc81243 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Callable, Optional
import numpy as np
import torch
from torch_geometric.data import Data, InMemoryDataset, download_url
class Twitch(InMemoryDataset):
r"""The Twitch Gamer networks introduced in the
`"Multi-scale Attributed Node Embedding"
<https://arxiv.org/abs/1909.13021>`_ paper.
Nodes represent gamers on Twitch and edges are followerships between them.
Node features represent embeddings of games played by the Twitch users.
The task is to predict whether a user streams mature content.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"DE"`, :obj:`"EN"`,
:obj:`"ES"`, :obj:`"FR"`, :obj:`"PT"`, :obj:`"RU"`).
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
"""
url = 'https://graphmining.ai/datasets/ptg/twitch'
@property
@property
@property
@property
| 36.960526 | 78 | 0.645069 | from pathlib import Path
from typing import Callable, Optional
import numpy as np
import torch
from torch_geometric.data import Data, InMemoryDataset, download_url
class Twitch(InMemoryDataset):
r"""The Twitch Gamer networks introduced in the
`"Multi-scale Attributed Node Embedding"
<https://arxiv.org/abs/1909.13021>`_ paper.
Nodes represent gamers on Twitch and edges are followerships between them.
Node features represent embeddings of games played by the Twitch users.
The task is to predict whether a user streams mature content.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"DE"`, :obj:`"EN"`,
:obj:`"ES"`, :obj:`"FR"`, :obj:`"PT"`, :obj:`"RU"`).
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
"""
url = 'https://graphmining.ai/datasets/ptg/twitch'
def __init__(self, root: str, name: str,
transform: Optional[Callable] = None,
pre_transform: Optional[Callable] = None):
self.name = name
assert self.name in ['DE', 'EN', 'ES', 'FR', 'PT', 'RU']
super().__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self) -> str:
return Path.joinpath(Path(self.root), self.name, 'raw')
@property
def processed_dir(self) -> str:
return Path.joinpath(Path(self.root), self.name, 'processed')
@property
def raw_file_names(self) -> str:
return f'{self.name}.npz'
@property
def processed_file_names(self) -> str:
return 'data.pt'
def download(self):
download_url(f'{self.url}/{self.name}.npz', self.raw_dir)
def process(self):
data = np.load(self.raw_paths[0], 'r', allow_pickle=True)
x = torch.from_numpy(data['features']).to(torch.float)
y = torch.from_numpy(data['target']).to(torch.long)
edge_index = torch.from_numpy(data['edges']).to(torch.long)
edge_index = edge_index.t().contiguous()
data = Data(x=x, y=y, edge_index=edge_index)
if self.pre_transform is not None:
data = self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0])
| 1,162 | 0 | 185 |
5925eb09d90602cd2178221ab4c247113649ec3c | 918 | py | Python | core/type.py | kainstan/stealer | 5e363a70558454093c4ad3f0065366f2b99ef4f4 | [
"MIT"
] | null | null | null | core/type.py | kainstan/stealer | 5e363a70558454093c4ad3f0065366f2b99ef4f4 | [
"MIT"
] | null | null | null | core/type.py | kainstan/stealer | 5e363a70558454093c4ad3f0065366f2b99ef4f4 | [
"MIT"
] | null | null | null | import json
from enum import Enum, unique
@unique
video_mapper = {item.value: item for item in Video.__members__.values() if item.enable}
video_mapper_json = []
for item in Video.__members__.values():
if not item.enable:
continue
video_mapper_json.append({
'label': item.label,
'value': item.value,
})
video_mapper_json = json.dumps(video_mapper_json, ensure_ascii=False)
| 24.810811 | 87 | 0.620915 | import json
from enum import Enum, unique
@unique
class Video(Enum):
AUTO = '自动适配', 'auto', True
DOUYIN = '抖音', 'douyin', True
TIKTOK = 'TikTok', 'tiktok', True
KUAISHOU = '快手', 'kuaishou', True
HUOSHAN = '火山小视频', 'huoshan', True
XIGUA = ' 西瓜视频', 'xigua', False
PIPIXIA = '皮皮虾', 'pipixia', True
def __new__(cls, *value):
obj = object.__new__(cls)
obj.label = value[0]
obj._value_ = value[1]
obj.enable = value[2]
return obj
def __int__(self):
return int(self._value_)
video_mapper = {item.value: item for item in Video.__members__.values() if item.enable}
video_mapper_json = []
for item in Video.__members__.values():
if not item.enable:
continue
video_mapper_json.append({
'label': item.label,
'value': item.value,
})
video_mapper_json = json.dumps(video_mapper_json, ensure_ascii=False)
| 177 | 345 | 22 |
8d066504a7fa81e53bf316edc3ef322b6e28ad1e | 4,813 | py | Python | lib/googlecloudsdk/command_lib/asset/flags.py | kylewuolle/google-cloud-sdk | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/asset/flags.py | kylewuolle/google-cloud-sdk | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/asset/flags.py | kylewuolle/google-cloud-sdk | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for commands in cloudasset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
def AddContentTypeArgs(parser, required):
"""--content-type argument for asset export and get-history."""
if required:
help_text = (
'Asset content type. Choices are `resource`, `iam-policy`. '
'Specifying `resource` will export resource metadata, and specifying '
'`iam-policy` will export IAM policy set on assets.')
else:
help_text = (
'Asset content type. If specified, only content matching the '
'specified type will be returned. Otherwise, no content but the '
'asset name will be returned. Choices are `resource`, '
'`iam-policy`. Specifying `resource` will export resource '
'metadata, and specifying `iam-policy` will export IAM policy set '
'on assets.')
parser.add_argument(
'--content-type',
required=required,
choices=['resource', 'iam-policy'],
help=help_text)
| 36.462121 | 80 | 0.67006 | # -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags for commands in cloudasset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
def AddOrganizationArgs(parser):
parser.add_argument(
'--organization',
metavar='ORGANIZATION_ID',
help='The ID of the organization which is the root asset.')
def AddSnapshotTimeArgs(parser):
parser.add_argument(
'--snapshot-time',
type=arg_parsers.Datetime.Parse,
help=('Timestamp to take a snapshot on assets. This could only be a '
'current or past time. If not specified, the current time will be '
'used. Due to delays in resource data collection and indexing, '
'there is a volatile window during which running the same query at '
'different time may return different results. '
'See $ gcloud topic datetimes for information on time formats.'))
def AddAssetTypesArgs(parser):
parser.add_argument(
'--asset-types',
metavar='ASSET_TYPES',
type=arg_parsers.ArgList(),
default=[],
help=('A list of asset types (i.e., "google.compute.Disk") to take a '
'snapshot. If specified and non-empty, only assets matching the '
'specified types will be returned.'))
def AddContentTypeArgs(parser, required):
"""--content-type argument for asset export and get-history."""
if required:
help_text = (
'Asset content type. Choices are `resource`, `iam-policy`. '
'Specifying `resource` will export resource metadata, and specifying '
'`iam-policy` will export IAM policy set on assets.')
else:
help_text = (
'Asset content type. If specified, only content matching the '
'specified type will be returned. Otherwise, no content but the '
'asset name will be returned. Choices are `resource`, '
'`iam-policy`. Specifying `resource` will export resource '
'metadata, and specifying `iam-policy` will export IAM policy set '
'on assets.')
parser.add_argument(
'--content-type',
required=required,
choices=['resource', 'iam-policy'],
help=help_text)
def AddOutputPathArgs(parser):
parser.add_argument(
'--output-path',
required=True,
type=arg_parsers.RegexpValidator(
r'^gs://.*',
'--output-path must be a Google Cloud Storage URI starting with '
'"gs://". For example, "gs://bucket_name/object_name"'),
help='Google Cloud Storage URI where the results will go. '
'URI must start with "gs://". For example, "gs://bucket_name/object_name"'
)
def AddAssetNamesArgs(parser):
parser.add_argument(
'--asset-names',
metavar='ASSET_NAMES',
required=True,
type=arg_parsers.ArgList(),
help=
('A list of full names of the assets to get the history for. See '
'https://cloud.google.com/apis/design/resource_names#full_resource_name '
'for name format.'))
def AddStartTimeArgs(parser):
parser.add_argument(
'--start-time',
required=True,
type=arg_parsers.Datetime.Parse,
help=('Start time of the time window (inclusive) for the asset history. '
'Must be later than 2018-10-02T00:00:00Z. '
'See $ gcloud topic datetimes for information on time formats.'))
def AddEndTimeArgs(parser):
parser.add_argument(
'--end-time',
required=False,
type=arg_parsers.Datetime.Parse,
help=('End time of the time window (exclusive) for the asset history. '
'Defaults to current time if not specified. '
'See $ gcloud topic datetimes for information on time formats.'))
def AddOperationArgs(parser):
parser.add_argument(
'id',
metavar='OPERATION_NAME',
help='Name of the operation to describe.',
type=arg_parsers.RegexpValidator(
r'^(projects|organizations)/[^/]+/operations/ExportAssets/[^/]+',
'Operation name must be "projects/<project_id>/operations/'
'ExportAssets/<operation_id>" or "organizations/<organization_id>/'
'operations/ExportAssets/<operation_id>"'))
| 2,907 | 0 | 184 |
393a6e5e32f57b74dc75ac6425a9355ea05c3fba | 1,597 | py | Python | graph4ipy/jgfio.py | agapow/graph4ipy | 447a7361d5e78304460f3a46971cb62ab26d548f | [
"MIT"
] | null | null | null | graph4ipy/jgfio.py | agapow/graph4ipy | 447a7361d5e78304460f3a46971cb62ab26d548f | [
"MIT"
] | null | null | null | graph4ipy/jgfio.py | agapow/graph4ipy | 447a7361d5e78304460f3a46971cb62ab26d548f | [
"MIT"
] | null | null | null | """
Reading and writing JGF format graphs.
"""
### IMPORTS
import json
### CONSTANTS & DEFINES
### CODE ###
# XXX: maybe look at a custom decoder/loader?
| 27.534483 | 89 | 0.608641 | """
Reading and writing JGF format graphs.
"""
### IMPORTS
import json
### CONSTANTS & DEFINES
### CODE ###
class JgfReader (object):
# XXX: maybe look at a custom decoder/loader?
def parse (self, str_or_file):
# NOTE: try to decode multiple objects: MultiGraph, SingleGraph or Graph
# XXX: do we need specialised decoders for each?
if hasattr (str_or_file, 'read'):
buf = str_or_file.read()
else:
buf = str_or_file
json_obj = json.loads (buf)
# what am I looking at?
assert type (json_obj) == dict, \
"expected top level JSON object to be a dict, actually a '%s'" % type (json_obj)
json_keys = json_obj.keys()
if 'graphs' in json_keys:
return self.parse_multigraph (json_obj)
if 'graphs' in json_keys:
return self.parse_multigraph (json_obj)
if 'graphs' in json_keys:
return self.parse_multigraph (json_obj)
def parse_multigraph (self, json_obj):
graphs = [self.parse_graph (g) for g in json_obj['graphs']]
return MultiGraph (
graphs=graphs,
mgraph_type=json_obj.get ('type', None),
label==json_obj.get ('label', None),
**json_obj.get ('metadata', {})
)
def parse_singlegraph (self, json_obj):
graph = [self.parse_graph (g) for g in json_obj['graph']]
return MultiGraph (
graph=graph,
graph_type=json_obj.get ('type', None),
label==json_obj.get ('label', None),
**json_obj.get ('metadata', {})
)
def parse_graph (self, json_obj):
pass
| 1,304 | 4 | 127 |
3ff5e8b4281d311e8e44b442abc96ec7cc202046 | 7,765 | py | Python | proteus/tests/LS_with_edgeBased_EV/VOF/test_vof.py | dloney/proteus | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | [
"MIT"
] | null | null | null | proteus/tests/LS_with_edgeBased_EV/VOF/test_vof.py | dloney/proteus | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | [
"MIT"
] | null | null | null | proteus/tests/LS_with_edgeBased_EV/VOF/test_vof.py | dloney/proteus | 615cdf57f765b2e99bac904bb6eb71e39e58ab56 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Test module for VOF with EV
"""
from __future__ import absolute_import
from builtins import object
from proteus.iproteus import *
from proteus import Comm
comm = Comm.get()
Profiling.logLevel=2
Profiling.verbose=True
import numpy as np
import tables
from . import thelper_vof
from . import thelper_vof_p
from . import thelper_vof_n
| 38.825 | 80 | 0.523889 | #!/usr/bin/env python
"""
Test module for VOF with EV
"""
from __future__ import absolute_import
from builtins import object
from proteus.iproteus import *
from proteus import Comm
comm = Comm.get()
Profiling.logLevel=2
Profiling.verbose=True
import numpy as np
import tables
from . import thelper_vof
from . import thelper_vof_p
from . import thelper_vof_n
class TestVOF(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup_method(self,method):
"""Initialize the test problem. """
reload(thelper_vof)
self.pList = [thelper_vof_p]
self.nList = [thelper_vof_n]
self.sList = [default_s]
self.so = default_so
self.so.tnList = self.nList[0].tnList
self._scriptdir = os.path.dirname(__file__)
self.sim_names = []
self.aux_names = []
def teardown_method(self,method):
pass
def test_supg(self):
########
# SUPG #
########
thelper_vof.ct.STABILIZATION_TYPE = 0 # SUPG
thelper_vof.ct.FCT = False
reload(thelper_vof_p)
reload(thelper_vof_n)
self.so.name = self.pList[0].name+"_SUPG"
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(self.so,
self.pList,
self.nList,
self.sList,
opts)
self.sim_names.append(ns.modelList[0].name)
ns.calculateSolution('vof')
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/vof_level_3_SUPG.h5'
expected = tables.open_file(os.path.join(self._scriptdir,expected_path))
actual = tables.open_file('vof_level_3_SUPG.h5','r')
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
expected.close()
actual.close()
def test_TaylorGalerkin(self):
##################
# TaylorGalerkin #
##################
thelper_vof.ct.STABILIZATION_TYPE = 1 # Taylor Galerkin
thelper_vof.ct.FCT = False
reload(thelper_vof_p)
reload(thelper_vof_n)
self.so.name = self.pList[0].name+"_TaylorGalerkin"
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(self.so,
self.pList,
self.nList,
self.sList,
opts)
self.sim_names.append(ns.modelList[0].name)
ns.calculateSolution('vof')
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/vof_level_3_TaylorGalerkin.h5'
expected = tables.open_file(os.path.join(self._scriptdir,expected_path))
actual = tables.open_file('vof_level_3_TaylorGalerkin.h5','r')
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
expected.close()
actual.close()
def test_EV1(self):
#######################
# ENTROPY VISCOSITY 1 # Polynomial entropy
#######################
thelper_vof.ct.STABILIZATION_TYPE = 2 # EV
thelper_vof.ct.ENTROPY_TYPE = 1 #polynomial
thelper_vof.ct.cE = 1.0
thelper_vof.ct.FCT = True
reload(thelper_vof_p)
reload(thelper_vof_n)
self.so.name = self.pList[0].name+"_EV1"
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(self.so,
self.pList,
self.nList,
self.sList,
opts)
self.sim_names.append(ns.modelList[0].name)
ns.calculateSolution('vof')
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/vof_level_3_EV1.h5'
expected = tables.open_file(os.path.join(self._scriptdir,expected_path))
actual = tables.open_file('vof_level_3_EV1.h5','r')
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
expected.close()
actual.close()
def test_EV2(self):
thelper_vof.ct.STABILIZATION_TYPE = 2 # EV
thelper_vof.ct.ENTROPY_TYPE = 1 #logarithmic
thelper_vof.ct.cE = 0.1
thelper_vof.ct.FCT = True
reload(thelper_vof_p)
reload(thelper_vof_n)
self.so.name = self.pList[0].name+"_EV2"
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(self.so,
self.pList,
self.nList,
self.sList,
opts)
self.sim_names.append(ns.modelList[0].name)
ns.calculateSolution('vof')
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/vof_level_3_EV2.h5'
expected = tables.open_file(os.path.join(self._scriptdir,expected_path))
actual = tables.open_file('vof_level_3_EV2.h5','r')
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
expected.close()
actual.close()
def test_SmoothnessBased(self):
thelper_vof.ct.STABILIZATION_TYPE = 3 # Smoothness based
thelper_vof.ct.FCT = True
reload(thelper_vof_p)
reload(thelper_vof_n)
self.so.name = self.pList[0].name+"_SmoothnessBased"
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(self.so,
self.pList,
self.nList,
self.sList,
opts)
self.sim_names.append(ns.modelList[0].name)
ns.calculateSolution('vof')
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/vof_level_3_SmoothnessBased.h5'
expected = tables.open_file(os.path.join(self._scriptdir,expected_path))
actual = tables.open_file('vof_level_3_SmoothnessBased.h5','r')
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
expected.close()
actual.close()
def test_stab4(self):
thelper_vof.ct.STABILIZATION_TYPE = 4 # Proposed by D.Kuzmin
thelper_vof.ct.FCT = True
reload(thelper_vof_p)
reload(thelper_vof_n)
self.so.name = self.pList[0].name+"_stab4"
# NUMERICAL SOLUTION #
ns = proteus.NumericalSolution.NS_base(self.so,
self.pList,
self.nList,
self.sList,
opts)
self.sim_names.append(ns.modelList[0].name)
ns.calculateSolution('vof')
# COMPARE VS SAVED FILES #
expected_path = 'comparison_files/vof_level_3_stab4.h5'
expected = tables.open_file(os.path.join(self._scriptdir,expected_path))
actual = tables.open_file('vof_level_3_stab4.h5','r')
assert np.allclose(expected.root.u_t2,
actual.root.u_t2,
atol=1e-10)
expected.close()
actual.close()
| 6,707 | 676 | 23 |
600f817de7371c0681cdf874cff69364d4981fec | 1,637 | py | Python | k2/python/k2/fsa_properties.py | Jarvan-Wang/k2 | 7f164ecb804d15006fd30e8564d80e0fa212f011 | [
"Apache-2.0"
] | 1 | 2021-03-03T03:30:40.000Z | 2021-03-03T03:30:40.000Z | k2/python/k2/fsa_properties.py | Jarvan-Wang/k2 | 7f164ecb804d15006fd30e8564d80e0fa212f011 | [
"Apache-2.0"
] | null | null | null | k2/python/k2/fsa_properties.py | Jarvan-Wang/k2 | 7f164ecb804d15006fd30e8564d80e0fa212f011 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
# Xiaomi Corporation (authors: Haowen Qiu)
#
# See ../../../LICENSE for clarification regarding multiple authors
import torch # noqa
import _k2
# The FSA properties are a bit-field; these constants can be used
# with '&' to determine the properties.
VALID = 0x01 # Valid from a formatting perspective
NONEMPTY = 0x02 # Nonempty as in, has at least one arc.
TOPSORTED = 0x04, # FSA is top-sorted, but possibly with
# self-loops, dest_state >= src_state
TOPSORTED_AND_ACYCLIC = 0x08 # Fsa is topsorted, dest_state > src_state
ARC_SORTED = 0x10 # Fsa is arc-sorted: arcs leaving a state are are sorted by
# label first and then on `dest_state`, see operator< in
# struct Arc in /k2/csrc/fsa.h (Note: labels are treated as
# uint32 for purpose of sorting!)
ARC_SORTED_AND_DETERMINISTIC = 0x20 # Arcs leaving a given state are *strictly*
# sorted by label, i.e. no duplicates with
# the same label.
EPSILON_FREE = 0x40 # Label zero (epsilon) is not present..
ACCESSIBLE = 0x80 # True if there are no obvious signs
# of states not being accessible or
# co-accessible, i.e. states with no
# arcs entering them
COACCESSIBLE = 0x0100 # True if there are no obvious signs of
# states not being co-accessible, i.e.
# i.e. states with no arcs leaving them
ALL = 0x01FF
def to_str(p: int) -> str:
'''Convert properties to a string for debug purpose.
Args:
p:
An integer returned by :func:`get_properties`.
Returns:
A string representation of the input properties.
'''
return _k2.fsa_properties_as_str(p)
| 34.829787 | 80 | 0.709224 | # Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
# Xiaomi Corporation (authors: Haowen Qiu)
#
# See ../../../LICENSE for clarification regarding multiple authors
import torch # noqa
import _k2
# The FSA properties are a bit-field; these constants can be used
# with '&' to determine the properties.
VALID = 0x01 # Valid from a formatting perspective
NONEMPTY = 0x02 # Nonempty as in, has at least one arc.
TOPSORTED = 0x04, # FSA is top-sorted, but possibly with
# self-loops, dest_state >= src_state
TOPSORTED_AND_ACYCLIC = 0x08 # Fsa is topsorted, dest_state > src_state
ARC_SORTED = 0x10 # Fsa is arc-sorted: arcs leaving a state are are sorted by
# label first and then on `dest_state`, see operator< in
# struct Arc in /k2/csrc/fsa.h (Note: labels are treated as
# uint32 for purpose of sorting!)
ARC_SORTED_AND_DETERMINISTIC = 0x20 # Arcs leaving a given state are *strictly*
# sorted by label, i.e. no duplicates with
# the same label.
EPSILON_FREE = 0x40 # Label zero (epsilon) is not present..
ACCESSIBLE = 0x80 # True if there are no obvious signs
# of states not being accessible or
# co-accessible, i.e. states with no
# arcs entering them
COACCESSIBLE = 0x0100 # True if there are no obvious signs of
# states not being co-accessible, i.e.
# i.e. states with no arcs leaving them
ALL = 0x01FF
def to_str(p: int) -> str:
'''Convert properties to a string for debug purpose.
Args:
p:
An integer returned by :func:`get_properties`.
Returns:
A string representation of the input properties.
'''
return _k2.fsa_properties_as_str(p)
| 0 | 0 | 0 |
1d4cbd29ad7f4886c5b362bdeccbf0638428eb2a | 1,212 | py | Python | configs.py | microsoft/nxs | b271c0637576084b36bd0bd397a673fb348913b3 | [
"MIT"
] | 5 | 2022-03-23T21:27:42.000Z | 2022-03-24T19:57:27.000Z | configs.py | microsoft/nxs | b271c0637576084b36bd0bd397a673fb348913b3 | [
"MIT"
] | null | null | null | configs.py | microsoft/nxs | b271c0637576084b36bd0bd397a673fb348913b3 | [
"MIT"
] | 1 | 2022-03-23T21:27:44.000Z | 2022-03-23T21:27:44.000Z | # Database info
MONGODB_DB_NAME = "NXS"
MONGODB_MODELS_COLLECTION_NAME = "Models"
MONGODB_PIPELINES_COLLECTION_NAME = "Pipelines"
MONGODB_W4_MODEL_PROFILES_COLLECTION_NAME = "W4Profiles"
# Storage info
STORAGE_MODEL_PATH = "models"
STORAGE_PREPROC_PATH = "preprocessing"
STORAGE_POSTPROC_PATH = "postprocessing"
STORAGE_TRANSFORM_PATH = "transforming"
STORAGE_PREDEFINED_PREPROC_PATH = "w4preprocessing"
STORAGE_PREDEFINED_POSTPROC_PATH = "w4postprocessing"
STORAGE_PREDEFINED_TRANSFORM_PATH = "w4transforming"
STORAGE_PREDEFINED_EXTRAS_PATH = "w4extras"
# QUEUE INFO
| 29.560976 | 57 | 0.806931 | # Database info
MONGODB_DB_NAME = "NXS"
MONGODB_MODELS_COLLECTION_NAME = "Models"
MONGODB_PIPELINES_COLLECTION_NAME = "Pipelines"
MONGODB_W4_MODEL_PROFILES_COLLECTION_NAME = "W4Profiles"
# Storage info
STORAGE_MODEL_PATH = "models"
STORAGE_PREPROC_PATH = "preprocessing"
STORAGE_POSTPROC_PATH = "postprocessing"
STORAGE_TRANSFORM_PATH = "transforming"
STORAGE_PREDEFINED_PREPROC_PATH = "w4preprocessing"
STORAGE_PREDEFINED_POSTPROC_PATH = "w4postprocessing"
STORAGE_PREDEFINED_TRANSFORM_PATH = "w4transforming"
STORAGE_PREDEFINED_EXTRAS_PATH = "w4extras"
# QUEUE INFO
class GLOBAL_QUEUE_NAMES:
SCHEDULER = "nxs_scheduler"
SCHEDULER_LOGS = "nxs_scheduler_logs"
WORKLOAD_MANAGER = "nxs_workload_manager"
BACKEND_LOGS = "nxs_backend_logs"
BACKEND_MONITOR_LOGS = "nxs_backend_monitor_logs"
class NXS_CONFIG:
LOG_LEVEL = "NXS_LOG_LEVEL"
class NXS_BACKEND_CONFIG:
ORIGINAL_REQUEST = "ORIGINAL_REQUEST"
USER_METADATA = "USER_METADATA"
FORWARD_INPUTS = "FORWARD_INPUTS"
class BACKEND_INTERNAL_CONFIG:
TASK_SKIP_COMPUTE = "task_skip_compute"
TASK_SKIP_COMPUTE_RESULT = "task_skip_compute_result"
TASK_STATUS = "task_status"
TASK_ERROR_MSGS = "task_errror_msgs"
| 0 | 548 | 91 |
3df2e92f7e304451ae09047afa971ea7d8e328b5 | 1,226 | py | Python | chromium/tools/telemetry/telemetry/core/network_controller.py | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | 27 | 2016-04-27T01:02:03.000Z | 2021-12-13T08:53:19.000Z | chromium/tools/telemetry/telemetry/core/network_controller.py | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | 2 | 2017-03-09T09:00:50.000Z | 2017-09-21T15:48:20.000Z | chromium/tools/telemetry/telemetry/core/network_controller.py | wedataintelligence/vivaldi-source | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | [
"BSD-3-Clause"
] | 17 | 2016-04-27T02:06:39.000Z | 2019-12-18T08:07:00.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class NetworkController(object):
"""Control network settings and servers to simulate the Web.
Network changes include forwarding device ports to host platform ports.
Web Page Replay is used to record and replay HTTP/HTTPS responses.
"""
def SetReplayArgs(self,
archive_path,
wpr_mode,
netsim,
extra_wpr_args,
make_javascript_deterministic=False):
"""Save the arguments needed for replay."""
self._network_controller_backend.SetReplayArgs(
archive_path, wpr_mode, netsim, extra_wpr_args,
make_javascript_deterministic)
def UpdateReplayForExistingBrowser(self):
"""Restart replay if needed for an existing browser.
TODO(slamm): Drop this method when the browser_backend dependencies are
moved to the platform. https://crbug.com/423962
"""
self._network_controller_backend.UpdateReplay()
| 36.058824 | 75 | 0.709625 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class NetworkController(object):
"""Control network settings and servers to simulate the Web.
Network changes include forwarding device ports to host platform ports.
Web Page Replay is used to record and replay HTTP/HTTPS responses.
"""
def __init__(self, network_controller_backend):
self._network_controller_backend = network_controller_backend
def SetReplayArgs(self,
archive_path,
wpr_mode,
netsim,
extra_wpr_args,
make_javascript_deterministic=False):
"""Save the arguments needed for replay."""
self._network_controller_backend.SetReplayArgs(
archive_path, wpr_mode, netsim, extra_wpr_args,
make_javascript_deterministic)
def UpdateReplayForExistingBrowser(self):
"""Restart replay if needed for an existing browser.
TODO(slamm): Drop this method when the browser_backend dependencies are
moved to the platform. https://crbug.com/423962
"""
self._network_controller_backend.UpdateReplay()
| 92 | 0 | 25 |
55281a41f46df9fd13977f6cbe8b7bf74aadca4f | 4,995 | py | Python | weatherbot.py | kraused53/Project-Hermes | db1c0f759e9e59e4b926c7927726c5f458ef7db0 | [
"OML"
] | null | null | null | weatherbot.py | kraused53/Project-Hermes | db1c0f759e9e59e4b926c7927726c5f458ef7db0 | [
"OML"
] | null | null | null | weatherbot.py | kraused53/Project-Hermes | db1c0f759e9e59e4b926c7927726c5f458ef7db0 | [
"OML"
] | null | null | null | import OPEN_WEATHER_KEYS
from requests import get, exceptions
from datetime import datetime
# ----------------------------------------------------------------------------
"""
Use the datetime library to convert an integer unix timestamp and a unix
timezone offset to calculate string formated time and date.
Inputs:
dt -> Int
unix time-code
tz -> Int
unix time-code timexone offset
AM_PM -> Bool
True: Convert to 12 hour clock
Flase: Convert to 24 hour clock
Output:
Returns given time data as a formated string
"""
# ----------------------------------------------------------------------------
"""
Use the requests library to make an API call to Open Weather. If the
request is successful, return the requestd data as a JSON data set.
If the request fails, return None data type. The None response is to
be handled by the caller of the function
"""
# ----------------------------------------------------------------------------
"""
When 'weather-bot.py' is run as a program, this is where the program
starts. If another python file is currently the active project,
this section is ignored.
This is where I will test the weather-bot before it is added to the main
Hermes Project.
"""
if __name__ == '__main__':
weather_json = get_weather_json(OPEN_WEATHER_KEYS.lat, OPEN_WEATHER_KEYS.lon)
if weather_json is not None:
if 'current' in weather_json:
print('Current Weather Forecast:')
# Format available items
# Format time data
if 'dt' in weather_json['current']:
print('\tCurrent Time:\t'+convert_time(weather_json['current']['dt'], weather_json['timezone_offset'], True)[11:])
if 'sunrise' in weather_json['current']:
print('\tSunrise:\t'+convert_time(weather_json['current']['sunrise'], weather_json['timezone_offset'], True)[11:])
if 'sunset' in weather_json['current']:
print('\tSunset:\t\t'+convert_time(weather_json['current']['sunset'], weather_json['timezone_offset'], True)[11:])
# Add line between time and temp data
print(' ')
# Format temperature data
if 'temp' in weather_json['current']:
print('\tCurrent Temp:\t'+str(weather_json['current']['temp'])+' F')
if 'feels_like' in weather_json['current']:
print('\tFeels Like:\t'+str(weather_json['current']['feels_like'])+' F')
if 'dew_point' in weather_json['current']:
print('\tDew Point:\t'+str(weather_json['current']['dew_point'])+' F')
if 'pressure' in weather_json['current']:
print('\tPressure:\t'+str(weather_json['current']['pressure'])+' hPa')
# Add line between temp and sky data
print(' ')
# Format Sky Data
if 'uvi' in weather_json['current']:
print('\tUV Index:\t'+str(weather_json['current']['uvi'])+' ')
if 'clouds' in weather_json['current']:
print('\tCloud Cover:\t'+str(weather_json['current']['clouds'])+' %')
if 'humidity' in weather_json['current']:
print('\tHumidity:\t'+str(weather_json['current']['humidity'])+' %')
if 'visibility' in weather_json['current']:
print('\tVisibility:\t'+str(weather_json['current']['visibility'])+' meters')
if 'weather' in weather_json['current']:
if 'icon' in weather_json['current']['weather'][0]:
icon_url = 'http://openweathermap.org/img/wn/' + \
weather_json['current']['weather'][0]['icon'] + \
'@2x.png'
print(icon_url) | 40.609756 | 130 | 0.545946 | import OPEN_WEATHER_KEYS
from requests import get, exceptions
from datetime import datetime
# ----------------------------------------------------------------------------
"""
Use the datetime library to convert an integer unix timestamp and a unix
timezone offset to calculate string formated time and date.
Inputs:
dt -> Int
unix time-code
tz -> Int
unix time-code timexone offset
AM_PM -> Bool
True: Convert to 12 hour clock
Flase: Convert to 24 hour clock
Output:
Returns given time data as a formated string
"""
def convert_time(dt, tz, AM_PM):
if not isinstance(dt, int):
dt = int(dt)
if not isinstance(tz, int):
tz = int(tz)
if AM_PM:
t = datetime.utcfromtimestamp(dt+tz).strftime('%Y-%m-%d %I:%M:%S %p')
else:
t = datetime.utcfromtimestamp(dt+tz).strftime('%Y-%m-%d %H:%M:%S')
return t
# ----------------------------------------------------------------------------
"""
Use the requests library to make an API call to Open Weather. If the
request is successful, return the requestd data as a JSON data set.
If the request fails, return None data type. The None response is to
be handled by the caller of the function
"""
def get_weather_json(lat = '40.7128', lon = '-74.0030', exclusions = ''):
API_URL = 'https://api.openweathermap.org/data/2.5/onecall?' +\
'lat=' + str(lat) + '&lon=' + str(lon) +\
'&exclude=' + exclusions +\
'&units=imperial' +\
'&appid=' + OPEN_WEATHER_KEYS.OPEN_WEATHER_API_KEY
# print(API_URL)
try:
response = get(API_URL)
except exceptions.RequestException as e: # This is the correct syntax
raise SystemExit(e)
# Check to make sure response.get() worked
if response is not None:
# Check for valid response
if response.status_code == 200:
return response.json()
else:
return None
# ----------------------------------------------------------------------------
"""
When 'weather-bot.py' is run as a program, this is where the program
starts. If another python file is currently the active project,
this section is ignored.
This is where I will test the weather-bot before it is added to the main
Hermes Project.
"""
if __name__ == '__main__':
weather_json = get_weather_json(OPEN_WEATHER_KEYS.lat, OPEN_WEATHER_KEYS.lon)
if weather_json is not None:
if 'current' in weather_json:
print('Current Weather Forecast:')
# Format available items
# Format time data
if 'dt' in weather_json['current']:
print('\tCurrent Time:\t'+convert_time(weather_json['current']['dt'], weather_json['timezone_offset'], True)[11:])
if 'sunrise' in weather_json['current']:
print('\tSunrise:\t'+convert_time(weather_json['current']['sunrise'], weather_json['timezone_offset'], True)[11:])
if 'sunset' in weather_json['current']:
print('\tSunset:\t\t'+convert_time(weather_json['current']['sunset'], weather_json['timezone_offset'], True)[11:])
# Add line between time and temp data
print(' ')
# Format temperature data
if 'temp' in weather_json['current']:
print('\tCurrent Temp:\t'+str(weather_json['current']['temp'])+' F')
if 'feels_like' in weather_json['current']:
print('\tFeels Like:\t'+str(weather_json['current']['feels_like'])+' F')
if 'dew_point' in weather_json['current']:
print('\tDew Point:\t'+str(weather_json['current']['dew_point'])+' F')
if 'pressure' in weather_json['current']:
print('\tPressure:\t'+str(weather_json['current']['pressure'])+' hPa')
# Add line between temp and sky data
print(' ')
# Format Sky Data
if 'uvi' in weather_json['current']:
print('\tUV Index:\t'+str(weather_json['current']['uvi'])+' ')
if 'clouds' in weather_json['current']:
print('\tCloud Cover:\t'+str(weather_json['current']['clouds'])+' %')
if 'humidity' in weather_json['current']:
print('\tHumidity:\t'+str(weather_json['current']['humidity'])+' %')
if 'visibility' in weather_json['current']:
print('\tVisibility:\t'+str(weather_json['current']['visibility'])+' meters')
if 'weather' in weather_json['current']:
if 'icon' in weather_json['current']['weather'][0]:
icon_url = 'http://openweathermap.org/img/wn/' + \
weather_json['current']['weather'][0]['icon'] + \
'@2x.png'
print(icon_url) | 1,004 | 0 | 44 |
bb635bcee607470ded8929e519323a7c0c4e2554 | 19,001 | py | Python | Evolife/Ecology/Alliances.py | antoorofino/Emergence_in_complex_systems_EVOLIFE | c36d3883326ea91b8d890666bf2b37b599141945 | [
"MIT"
] | null | null | null | Evolife/Ecology/Alliances.py | antoorofino/Emergence_in_complex_systems_EVOLIFE | c36d3883326ea91b8d890666bf2b37b599141945 | [
"MIT"
] | null | null | null | Evolife/Ecology/Alliances.py | antoorofino/Emergence_in_complex_systems_EVOLIFE | c36d3883326ea91b8d890666bf2b37b599141945 | [
"MIT"
] | null | null | null | #!/usr/bin/env python##############################################################################
#!/usr/bin/env python3
##############################################################################
# EVOLIFE http://evolife.telecom-paris.fr Jean-Louis Dessalles #
# Telecom Paris 2021 www.dessalles.fr #
# -------------------------------------------------------------------------- #
# License: Creative Commons BY-NC-SA #
##############################################################################
##############################################################################
# Alliances #
##############################################################################
""" EVOLIFE: Module Alliances:
Individuals inherit this class
which determines who is friend with whom
"""
import sys
if __name__ == '__main__': sys.path.append('../..') # for tests
from Evolife.Tools.Tools import error
class club:
""" class club: list of individuals associated with their performance.
The performance is used to decide who gets acquainted with whom.
"""
# def members(self): return self.__members
def minimal(self):
" returns the minimal performance among members "
if self.size(): return min([T[1] for T in self])
return -1
def maximal(self):
" returns the maximal performance among members "
if self.size(): return max([T[1] for T in self])
return -1
def best(self):
" returns the member with the best performance "
# if self.size(): return self.ordered()[0]
# if self.size(): return max([T for T in self.__members], key=lambda x: x[1])[0]
if self.size(): return max(self, key=lambda x: x[1])[0]
return None
def worst(self):
" returns the member with the worst performance "
if self.size(): return self.ordered()[-1]
return None
def accepts(self, performance, conservative=True):
" signals that the new individual can be accepted into the club "
if self.size() >= self.sizeMax:
if conservative and performance <= self.minimal():
return -1 # equality: priority given to former members
elif performance < self.minimal(): return -1
# returning the rank that the candidate would be assigned
# return sorted([performance] + self.performances(),reverse=True).index(performance)
rank = self.size() - sorted([performance] + self.performances()).index(performance)
if rank <= self.sizeMax: return rank
error('Alliances', 'accept')
def exits(self, oldMember):
" a member goes out from the club "
for (M,Perf) in self.__members[:]: # safe to copy the list as it is changed within the loop
if M == oldMember:
self.__members.remove((oldMember,Perf))
return True
print('exiled: %s' % str(oldMember))
error('Alliances: non-member attempting to quit a club')
return False
def weakening(self, Factor = 0.9): # temporary value
" all performances are reduced (represents temporal erosion) "
for (M,Perf) in self.__members[:]: # safe to copy the list as it is changed within the loop
self.__members.remove((M, Perf))
self.__members.append((M, Perf * Factor))
class Friend:
""" class Friend: defines an individual's acqaintances
"""
#################################
# asymmetrical links #
#################################
def affiliable(self, F_perf, conservative=True):
" Checks whether affiliation is possible "
return self.friends.accepts(F_perf, conservative=conservative) >= 0
def follow(self, F, F_perf, conservative=True, Quit=None):
""" the individual wants to be F's disciple due to F's performance
"""
# print self.ID, "wants to follows", (F.ID, F_perf)
if self.affiliable(F_perf, conservative=conservative):
# the new friend is good enough
RF = self.friends.enters(F, F_perf, conservative=conservative) # returns ejected old friend
if RF is not None:
# print('redundant friend of %s: %s' % (self, RF))
# print('self: %s' % self, ">>> self's friends: %s " % map(str, Friend.social_signature(self)))
if Quit is None: Quit = self.quit_
Quit(RF) # some redundant friend is disowned
return True
else: return False
# R = Friend in self.friends.names()
# if R: print self.ID, 'is already following', Friend.ID
def quit_(self, Friend=None):
""" the individual no longer follows its friend
"""
if Friend is None: Friend = self.friends.worst()
if Friend is not None:
# print(self, 'quits ', Friend)
self.friends.exits(Friend)
def checkNetwork(self, membershipFunction=None):
" updates links by forgetting friends that are gone "
for F in self:
if not membershipFunction(F): self.quit_(F)
def detach(self):
""" The individual quits all its friends """
for F in self: self.quit_(F)
#################################
# symmetrical links #
#################################
def get_friend(self, Offer, Partner, PartnerOffer):
" Checks mutual acceptance before establishing friendship "
if self.acquaintable(Offer, Partner, PartnerOffer):
if not self.follow(Partner, PartnerOffer, Quit=self.end_friendship):
error("Friend: self changed mind")
if not Partner.follow(self, Offer, Quit=Partner.end_friendship):
error("Friend: Partner changed mind")
return True
return False
def acquainted(self, Partner):
" same as get_friend/3 with no performance "
return self.get_friend(0, Partner, 0)
def end_friendship(self, Partner):
" Partners remove each other from their address book "
# print('\nsplitting up', self.ID, Partner.ID)
self.quit_(Partner)
Partner.quit_(self)
def forgetAll(self):
""" The individual quits its friends """
for F in self: self.end_friendship(F)
class Follower(Friend):
""" Augmented version of Friends for asymmetrical links - replaces 'Alliances'.
'Follower' in addition knows about who is following self
"""
def F_affiliable(self, perf, Guru, G_perf, conservative=True):
" Checks whether affiliation is possible "
A = self.affiliable(G_perf, conservative=conservative) # Guru is acceptable and ...
if self.followers is not None:
A &= Guru.followers.affiliable(perf, conservative=conservative) # ...self acceptable to Guru
return A
def F_follow(self, perf, G, G_perf, conservative=True):
""" the individual wants to be G's disciple because of some of G's performance
G may evaluate the individual's performance too
"""
# print '.',
if self.F_affiliable(perf, G, G_perf, conservative=conservative):
# ------ the new guru is good enough and the individual is good enough for the guru
# print('%s (%s) is about to follow %s (%s)' % (self, list(map(str, self.social_signature())), G, list(map(str, G.social_signature()))))
if not self.follow(G, G_perf, conservative=conservative, Quit=self.G_quit_):
error("Alliances", "inconsistent guru")
if G.followers is not None:
if not G.followers.follow(self, perf, conservative=conservative, Quit=G.F_quit_):
error('Alliances', "inconsistent self")
# self.consistency()
# G.consistency()
return True
else: return False
def G_quit_(self, Guru):
""" the individual no longer follows its guru
"""
# self.consistency()
# Guru.consistency()
self.quit_(Guru)
if Guru.followers is not None: Guru.followers.quit_(self)
def F_quit_(self, Follower):
""" the individual does not want its disciple any longer
"""
if self.followers is not None:
self.followers.quit_(Follower)
Follower.quit_(self)
else: error('Alliances', 'No Follower whatsoever')
def get_friend(self, Offer, Partner, PartnerOffer):
" Checks mutual acceptance before establishing friendship "
if self.acquaintable(Offer, Partner, PartnerOffer):
if not self.F_follow(Offer, Partner, PartnerOffer):
error("Friend: self changed mind")
if not Partner.F_follow(PartnerOffer, self, Offer):
error("Friend: Partner changed mind")
return True
return False
def end_friendship(self, Partner):
" Partners remove each other from their address book "
# print('\nsplitting up', self.ID, Partner.ID)
# print(self.consistency(), Partner.consistency())
self.G_quit_(Partner)
Partner.G_quit_(self)
def detach(self):
""" The individual quits its guru and its followers
"""
for G in self.names(): self.G_quit_(G) # G is erased from self's guru list
if self.names() != []: error("Alliances: recalcitrant guru")
if self.followers is not None:
for F in self.followers.names(): self.F_quit_(F) # self is erased from F's guru list
if self.followers.names() != []: error("Alliances: sticky followers")
# # # # class Alliances(object):
# # # # """ class Alliances: each agent stores both its gurus and its followers
# # # # (This is an old class, kept for compatibility (and not tested) """
# # # # def __init__(self, MaxGurus, MaxFollowers):
# # # # self.gurus = Friend(MaxGurus)
# # # # self.followers = Friend(MaxFollowers)
# # # # #################################
# # # # # hierarchical links #
# # # # #################################
# # # # def affiliable(self, perf, Guru, G_perf, conservative=True):
# # # # " Checks whether affiliation is possible "
# # # # return self.gurus.affiliable(G_perf, conservative=conservative) \
# # # # and Guru.followers.affiliable(perf, conservative=conservative)
# # # # def follow(self, perf, G, G_perf, conservative=True):
# # # # """ the individual wants to be G's disciple because of some of G's performance
# # # # G may evaluate the individual's performance too
# # # # """
# # # # if self.affiliable(perf, G, G_perf, conservative=conservative):
# # # # # the new guru is good enough and the individual is good enough for the guru
# # # # self.gurus.follow(G, G_perf, conservative=conservative, Quit=self.quit_)
# # # # G.followers.follow(self, perf, conservative=conservative, Quit=G.quit_)
# # # # return True
# # # # else: return False
# # # # def quit_(self, Guru):
# # # # """ the individual no longer follows its guru
# # # # """
# # # # Guru.followers.quit_(self)
# # # # self.gurus.quit_(Guru)
# # # # def best_friend(self): return self.gurus.best_friend()
# # # # def friends(self, ordered=True): return self.gurus.Friends(ordered=ordered)
# # # # def nbFriends(self): return self.gurus.nbFriends()
# # # # def nbFollowers(self): return self.followers.nbFriends()
# # # # def lessening_friendship(self, Factor=0.9):
# # # # self.gurus.lessening_friendship(Factor)
# # # # def forgetAll(self):
# # # # self.gurus.forgetAll()
# # # # self.followers.forgetAll()
# # # # #################################
# # # # # symmetrical links #
# # # # #################################
# # # # def acquaintable(self, Partner, Deal):
# # # # return self.affiliable(Deal, Partner, Deal) and Partner.affiliable(Deal, self, Deal)
# # # # def get_friend(self, Offer, Partner, Return=None):
# # # # " Checks mutual acceptance before establishing friendship "
# # # # if Return is None: Return = Offer
# # # # if self.affiliable(Offer, Partner, Return) and Partner.affiliable(Return, self, Offer):
# # # # self.follow(Offer, Partner, Return)
# # # # Partner.follow(Return, self, Offer)
# # # # return True
# # # # return False
# # # # def best_friend_symmetry(self):
# # # # " Checks whether self is its best friend's friend "
# # # # BF = self.best_friend()
# # # # if BF: return self == BF.best_friend()
# # # # return False
# # # # def restore_symmetry(self):
# # # # " Makes sure that self is its friends' friend - Useful for symmmtrical relations "
# # # # for F in self.gurus.names()[:]: # need to copy the list, as it is modified within the loop
# # # # #print 'checking symmetry for %d' % F.ID, F.gurus.names()
# # # # if self not in F.gurus.names():
# # # # print('%s quits %s ***** because absent from %s' % (self.ID, F.ID, str(F.gurus.names())))
# # # # self.quit_(F) # no hard feelings
# # # # #################################
# # # # # link processing #
# # # # #################################
# # # # def detach(self):
# # # # """ The individual quits its guru and its followers
# # # # """
# # # # for G in self.gurus.names(): self.quit_(G)
# # # # for F in self.followers.names(): F.quit_(self)
# # # # if self.gurus.names() != []: error("Alliances: recalcitrant guru")
# # # # if self.followers.names() != []: error("Alliances: sticky followers")
# # # # def consistency(self):
# # # # if self.gurus.size() > self.gurus.sizeMax():
# # # # error("Alliances", "too many gurus: %d" % self.gurus.size())
# # # # if self.followers.size() > self.followers.sizeMax():
# # # # error("Alliances", "too many followers: %d" % self.followers.size())
# # # # for F in self.followers.names():
# # # # if self not in F.gurus.names():
# # # # error("Alliances: non following followers")
# # # # if self == F: error("Alliances: Narcissism")
# # # # ## print self.ID, ' is in ', F.ID, "'s guru list: ", [G.ID for G in F.gurus.names()]
# # # # for G in self.gurus.names():
# # # # if self not in G.followers.names():
# # # # # print 'self: ',str(self), "self's gurus: ",Alliances.social_signature(self)
# # # # # print 'guru: ',str(G), 'its followers: ',[str(F) for F in G.followers.names()]
# # # # error("Alliances: unaware guru")
# # # # if self == G: error("Alliances: narcissism")
# # # # ## print self.ID, ' is in ', G.ID, "'s follower list: ", [F.ID for F in G.followers.names()]
# # # # ## print '\t', self.ID, ' OK'
# # # # if self.gurus.size() > 0:
# # # # if not self.gurus.friends.present((self.gurus.best(), self.gurus.friends.maximal())):
# # # # error("Alliances: best guru is ghost")
# # # # def social_signature(self):
# # # # ## return [F.ID for F in self.gurus.names()]
# # # # return self.gurus.Friends()
# # # # def signature(self): return self.social_signature()
###############################
# Local Test #
###############################
if __name__ == "__main__":
print(__doc__ + '\n')
print(Friend.__doc__ + '\n\n')
raw_input('[Return]')
__author__ = 'Dessalles'
| 38.541582 | 139 | 0.625651 | #!/usr/bin/env python##############################################################################
#!/usr/bin/env python3
##############################################################################
# EVOLIFE http://evolife.telecom-paris.fr Jean-Louis Dessalles #
# Telecom Paris 2021 www.dessalles.fr #
# -------------------------------------------------------------------------- #
# License: Creative Commons BY-NC-SA #
##############################################################################
##############################################################################
# Alliances #
##############################################################################
""" EVOLIFE: Module Alliances:
Individuals inherit this class
which determines who is friend with whom
"""
import sys
if __name__ == '__main__': sys.path.append('../..') # for tests
from Evolife.Tools.Tools import error
class club:
""" class club: list of individuals associated with their performance.
The performance is used to decide who gets acquainted with whom.
"""
def __init__(self, sizeMax = 0):
self.sizeMax = sizeMax
if sizeMax == 0:
self.sizeMax = sys.maxsize
self.reset()
def reset(self):
self.__members = [] # list of couples (individual,performance)
# def members(self): return self.__members
def names(self): return [T[0] for T in self]
def performances(self): return [T[1] for T in self]
def present(self, MemberPerf): return MemberPerf in self
def ordered(self, ordered=True):
if ordered:
return [T[0] for T in sorted(self.__members, key = lambda x: x[1], reverse=True)]
return [T[0] for T in self]
def rank(self, Member):
try: return self.ordered().index(Member)
except ValueError: return -1
def performance(self, Member):
try: return self.__members[self.names().index(Member)][1]
except ValueError: error('Alliances', 'Searching for non-member')
def size(self): return len(self.__members)
def minimal(self):
" returns the minimal performance among members "
if self.size(): return min([T[1] for T in self])
return -1
def maximal(self):
" returns the maximal performance among members "
if self.size(): return max([T[1] for T in self])
return -1
def best(self):
" returns the member with the best performance "
# if self.size(): return self.ordered()[0]
# if self.size(): return max([T for T in self.__members], key=lambda x: x[1])[0]
if self.size(): return max(self, key=lambda x: x[1])[0]
return None
def worst(self):
" returns the member with the worst performance "
if self.size(): return self.ordered()[-1]
return None
def accepts(self, performance, conservative=True):
" signals that the new individual can be accepted into the club "
if self.size() >= self.sizeMax:
if conservative and performance <= self.minimal():
return -1 # equality: priority given to former members
elif performance < self.minimal(): return -1
# returning the rank that the candidate would be assigned
# return sorted([performance] + self.performances(),reverse=True).index(performance)
rank = self.size() - sorted([performance] + self.performances()).index(performance)
if rank <= self.sizeMax: return rank
error('Alliances', 'accept')
def enters(self, newMember, performance, conservative=True):
if self.accepts(performance, conservative=conservative) >= 0:
# First, check whether newMember is not already a member
if newMember in self.names():
self.exits(newMember) # to prepare the come-back
if self.size() >= self.sizeMax:
worst = self.worst() # the redundant individual will be ejected
else: worst = None
self.__members.append((newMember, performance))
return worst
error("Alliances: unchecked admittance")
return None
def exits(self, oldMember):
" a member goes out from the club "
for (M,Perf) in self.__members[:]: # safe to copy the list as it is changed within the loop
if M == oldMember:
self.__members.remove((oldMember,Perf))
return True
print('exiled: %s' % str(oldMember))
error('Alliances: non-member attempting to quit a club')
return False
def weakening(self, Factor = 0.9): # temporary value
" all performances are reduced (represents temporal erosion) "
for (M,Perf) in self.__members[:]: # safe to copy the list as it is changed within the loop
self.__members.remove((M, Perf))
self.__members.append((M, Perf * Factor))
def __iter__(self): return iter(self.__members)
def __len__(self): return len(self.__members)
def __str__(self):
# return "[" + '-'.join([T.ID for T in self.ordered()]) + "]"
return "[" + '-'.join([str(T) for T in self.names()]) + "]"
class Friend:
""" class Friend: defines an individual's acqaintances
"""
def __init__(self, MaxFriends=1):
self.friends = club(MaxFriends)
#################################
# asymmetrical links #
#################################
def accepts(self, F_perf): return self.friends.accepts(F_perf)
def affiliable(self, F_perf, conservative=True):
" Checks whether affiliation is possible "
return self.friends.accepts(F_perf, conservative=conservative) >= 0
def follow(self, F, F_perf, conservative=True, Quit=None):
""" the individual wants to be F's disciple due to F's performance
"""
# print self.ID, "wants to follows", (F.ID, F_perf)
if self.affiliable(F_perf, conservative=conservative):
# the new friend is good enough
RF = self.friends.enters(F, F_perf, conservative=conservative) # returns ejected old friend
if RF is not None:
# print('redundant friend of %s: %s' % (self, RF))
# print('self: %s' % self, ">>> self's friends: %s " % map(str, Friend.social_signature(self)))
if Quit is None: Quit = self.quit_
Quit(RF) # some redundant friend is disowned
return True
else: return False
def follows(self, Friend): return Friend in self.names()
# R = Friend in self.friends.names()
# if R: print self.ID, 'is already following', Friend.ID
def quit_(self, Friend=None):
""" the individual no longer follows its friend
"""
if Friend is None: Friend = self.friends.worst()
if Friend is not None:
# print(self, 'quits ', Friend)
self.friends.exits(Friend)
def best_friend(self): return self.friends.best()
def Max(self): return max(0, self.friends.maximal())
def Friends(self, ordered=True): return self.friends.ordered(ordered=ordered)
def names(self): return self.friends.ordered(ordered=False)
def rank(self, Friend): return self.friends.rank(Friend)
def nbFriends(self): return self.friends.size()
def size(self): return self.friends.size()
def sizeMax(self): return self.friends.sizeMax
def lessening_friendship(self, Factor=0.9):
self.friends.weakening(Factor)
def checkNetwork(self, membershipFunction=None):
" updates links by forgetting friends that are gone "
for F in self:
if not membershipFunction(F): self.quit_(F)
def detach(self):
""" The individual quits all its friends """
for F in self: self.quit_(F)
#################################
# symmetrical links #
#################################
def acquaintable(self, Offer, Partner, PartnerOffer):
return self.affiliable(PartnerOffer) and Partner.affiliable(Offer)
def get_friend(self, Offer, Partner, PartnerOffer):
" Checks mutual acceptance before establishing friendship "
if self.acquaintable(Offer, Partner, PartnerOffer):
if not self.follow(Partner, PartnerOffer, Quit=self.end_friendship):
error("Friend: self changed mind")
if not Partner.follow(self, Offer, Quit=Partner.end_friendship):
error("Friend: Partner changed mind")
return True
return False
def acquainted(self, Partner):
" same as get_friend/3 with no performance "
return self.get_friend(0, Partner, 0)
def end_friendship(self, Partner):
" Partners remove each other from their address book "
# print('\nsplitting up', self.ID, Partner.ID)
self.quit_(Partner)
Partner.quit_(self)
def forgetAll(self):
""" The individual quits its friends """
for F in self: self.end_friendship(F)
def __iter__(self): return iter(self.friends.names())
def __len__(self): return len(self.friends)
def social_signature(self):
# return [F.ID for F in self.friends.names()]
return self.friends.ordered()
def signature(self): return self.social_signature()
def __str__(self):
return str(self.friends)
class Follower(Friend):
""" Augmented version of Friends for asymmetrical links - replaces 'Alliances'.
'Follower' in addition knows about who is following self
"""
def __init__(self, MaxGurus, MaxFollowers=0):
Friend.__init__(self, MaxGurus)
if MaxFollowers:
self.followers = Friend(MaxFollowers) # 'Friend' used as a mirror class to keep track of followers
else: self.followers = None
def F_affiliable(self, perf, Guru, G_perf, conservative=True):
" Checks whether affiliation is possible "
A = self.affiliable(G_perf, conservative=conservative) # Guru is acceptable and ...
if self.followers is not None:
A &= Guru.followers.affiliable(perf, conservative=conservative) # ...self acceptable to Guru
return A
def F_follow(self, perf, G, G_perf, conservative=True):
""" the individual wants to be G's disciple because of some of G's performance
G may evaluate the individual's performance too
"""
# print '.',
if self.F_affiliable(perf, G, G_perf, conservative=conservative):
# ------ the new guru is good enough and the individual is good enough for the guru
# print('%s (%s) is about to follow %s (%s)' % (self, list(map(str, self.social_signature())), G, list(map(str, G.social_signature()))))
if not self.follow(G, G_perf, conservative=conservative, Quit=self.G_quit_):
error("Alliances", "inconsistent guru")
if G.followers is not None:
if not G.followers.follow(self, perf, conservative=conservative, Quit=G.F_quit_):
error('Alliances', "inconsistent self")
# self.consistency()
# G.consistency()
return True
else: return False
def G_quit_(self, Guru):
""" the individual no longer follows its guru
"""
# self.consistency()
# Guru.consistency()
self.quit_(Guru)
if Guru.followers is not None: Guru.followers.quit_(self)
def F_quit_(self, Follower):
""" the individual does not want its disciple any longer
"""
if self.followers is not None:
self.followers.quit_(Follower)
Follower.quit_(self)
else: error('Alliances', 'No Follower whatsoever')
def get_friend(self, Offer, Partner, PartnerOffer):
" Checks mutual acceptance before establishing friendship "
if self.acquaintable(Offer, Partner, PartnerOffer):
if not self.F_follow(Offer, Partner, PartnerOffer):
error("Friend: self changed mind")
if not Partner.F_follow(PartnerOffer, self, Offer):
error("Friend: Partner changed mind")
return True
return False
def end_friendship(self, Partner):
" Partners remove each other from their address book "
# print('\nsplitting up', self.ID, Partner.ID)
# print(self.consistency(), Partner.consistency())
self.G_quit_(Partner)
Partner.G_quit_(self)
def nbFollowers(self): return self.followers.nbFriends()
def follower_rank(self, Friend):
if self.followers: return self.followers.rank(Friend)
return -1
def forgetAll(self):
if self.followers is None: Friend.forgetAll(self)
else: self.detach()
def detach(self):
""" The individual quits its guru and its followers
"""
for G in self.names(): self.G_quit_(G) # G is erased from self's guru list
if self.names() != []: error("Alliances: recalcitrant guru")
if self.followers is not None:
for F in self.followers.names(): self.F_quit_(F) # self is erased from F's guru list
if self.followers.names() != []: error("Alliances: sticky followers")
def consistency(self):
# if self.size() > self.sizeMax():
# error("Alliances", "too many gurus: %d" % self.friends.size())
# if self.followers.size() > self.followers.sizeMax():
# error("Alliances", "too many followers: %d" % self.followers.friends.size())
for F in self.followers:
if self not in F:
print('self: %s' % self)
print("self's followers: %s" % list(map(str, self.followers.names())))
print('follower: %s' % F)
print('its gurus: %s' % list(map(str, F.friends.names())))
error("Alliances: non following followers")
if self == F: error("Alliances: Narcissism")
## print self.ID, ' is in ', F.ID, "'s guru list: ", [G.ID for G in F.gurus.names()]
for G in self:
if self not in G.followers:
print('\n\nself: %s' % self)
print("self's gurus: %s" % list(map(str, self.friends.names())))
print('guru: %s' % G)
print('its followers: %s' % list(map(str, G.followers.names())))
error("Alliances: unaware guru")
if self == G: error("Alliances: narcissism")
## print self.ID, ' is in ', G.ID, "'s follower list: ", [F.ID for F in G.followers.names()]
## print '\t', self.ID, ' OK'
if self.friends.size() > 0:
if not self.friends.present((self.friends.best(), self.friends.maximal())):
error("Alliances: best guru is ghost")
return ('%s consistent' % self.ID)
# # # # class Alliances(object):
# # # # """ class Alliances: each agent stores both its gurus and its followers
# # # # (This is an old class, kept for compatibility (and not tested) """
# # # # def __init__(self, MaxGurus, MaxFollowers):
# # # # self.gurus = Friend(MaxGurus)
# # # # self.followers = Friend(MaxFollowers)
# # # # #################################
# # # # # hierarchical links #
# # # # #################################
# # # # def affiliable(self, perf, Guru, G_perf, conservative=True):
# # # # " Checks whether affiliation is possible "
# # # # return self.gurus.affiliable(G_perf, conservative=conservative) \
# # # # and Guru.followers.affiliable(perf, conservative=conservative)
# # # # def follow(self, perf, G, G_perf, conservative=True):
# # # # """ the individual wants to be G's disciple because of some of G's performance
# # # # G may evaluate the individual's performance too
# # # # """
# # # # if self.affiliable(perf, G, G_perf, conservative=conservative):
# # # # # the new guru is good enough and the individual is good enough for the guru
# # # # self.gurus.follow(G, G_perf, conservative=conservative, Quit=self.quit_)
# # # # G.followers.follow(self, perf, conservative=conservative, Quit=G.quit_)
# # # # return True
# # # # else: return False
# # # # def quit_(self, Guru):
# # # # """ the individual no longer follows its guru
# # # # """
# # # # Guru.followers.quit_(self)
# # # # self.gurus.quit_(Guru)
# # # # def best_friend(self): return self.gurus.best_friend()
# # # # def friends(self, ordered=True): return self.gurus.Friends(ordered=ordered)
# # # # def nbFriends(self): return self.gurus.nbFriends()
# # # # def nbFollowers(self): return self.followers.nbFriends()
# # # # def lessening_friendship(self, Factor=0.9):
# # # # self.gurus.lessening_friendship(Factor)
# # # # def forgetAll(self):
# # # # self.gurus.forgetAll()
# # # # self.followers.forgetAll()
# # # # #################################
# # # # # symmetrical links #
# # # # #################################
# # # # def acquaintable(self, Partner, Deal):
# # # # return self.affiliable(Deal, Partner, Deal) and Partner.affiliable(Deal, self, Deal)
# # # # def get_friend(self, Offer, Partner, Return=None):
# # # # " Checks mutual acceptance before establishing friendship "
# # # # if Return is None: Return = Offer
# # # # if self.affiliable(Offer, Partner, Return) and Partner.affiliable(Return, self, Offer):
# # # # self.follow(Offer, Partner, Return)
# # # # Partner.follow(Return, self, Offer)
# # # # return True
# # # # return False
# # # # def best_friend_symmetry(self):
# # # # " Checks whether self is its best friend's friend "
# # # # BF = self.best_friend()
# # # # if BF: return self == BF.best_friend()
# # # # return False
# # # # def restore_symmetry(self):
# # # # " Makes sure that self is its friends' friend - Useful for symmmtrical relations "
# # # # for F in self.gurus.names()[:]: # need to copy the list, as it is modified within the loop
# # # # #print 'checking symmetry for %d' % F.ID, F.gurus.names()
# # # # if self not in F.gurus.names():
# # # # print('%s quits %s ***** because absent from %s' % (self.ID, F.ID, str(F.gurus.names())))
# # # # self.quit_(F) # no hard feelings
# # # # #################################
# # # # # link processing #
# # # # #################################
# # # # def detach(self):
# # # # """ The individual quits its guru and its followers
# # # # """
# # # # for G in self.gurus.names(): self.quit_(G)
# # # # for F in self.followers.names(): F.quit_(self)
# # # # if self.gurus.names() != []: error("Alliances: recalcitrant guru")
# # # # if self.followers.names() != []: error("Alliances: sticky followers")
# # # # def consistency(self):
# # # # if self.gurus.size() > self.gurus.sizeMax():
# # # # error("Alliances", "too many gurus: %d" % self.gurus.size())
# # # # if self.followers.size() > self.followers.sizeMax():
# # # # error("Alliances", "too many followers: %d" % self.followers.size())
# # # # for F in self.followers.names():
# # # # if self not in F.gurus.names():
# # # # error("Alliances: non following followers")
# # # # if self == F: error("Alliances: Narcissism")
# # # # ## print self.ID, ' is in ', F.ID, "'s guru list: ", [G.ID for G in F.gurus.names()]
# # # # for G in self.gurus.names():
# # # # if self not in G.followers.names():
# # # # # print 'self: ',str(self), "self's gurus: ",Alliances.social_signature(self)
# # # # # print 'guru: ',str(G), 'its followers: ',[str(F) for F in G.followers.names()]
# # # # error("Alliances: unaware guru")
# # # # if self == G: error("Alliances: narcissism")
# # # # ## print self.ID, ' is in ', G.ID, "'s follower list: ", [F.ID for F in G.followers.names()]
# # # # ## print '\t', self.ID, ' OK'
# # # # if self.gurus.size() > 0:
# # # # if not self.gurus.friends.present((self.gurus.best(), self.gurus.friends.maximal())):
# # # # error("Alliances: best guru is ghost")
# # # # def social_signature(self):
# # # # ## return [F.ID for F in self.gurus.names()]
# # # # return self.gurus.Friends()
# # # # def signature(self): return self.social_signature()
###############################
# Local Test #
###############################
if __name__ == "__main__":
print(__doc__ + '\n')
print(Friend.__doc__ + '\n\n')
raw_input('[Return]')
__author__ = 'Dessalles'
| 3,770 | 0 | 895 |
db8dc16caffaac1205f497668a199d8909d61214 | 1,460 | py | Python | min_mp3.py | amikey/audio_scripts | 3c6adc3c4e2a338590bb69e2a13c954bfd8cec46 | [
"MIT"
] | 6 | 2016-05-29T23:20:17.000Z | 2019-03-10T18:18:05.000Z | min_mp3.py | amikey/audio_scripts | 3c6adc3c4e2a338590bb69e2a13c954bfd8cec46 | [
"MIT"
] | null | null | null | min_mp3.py | amikey/audio_scripts | 3c6adc3c4e2a338590bb69e2a13c954bfd8cec46 | [
"MIT"
] | null | null | null | #!//Users/tkirke/anaconda/bin/python
# -*- coding: utf-8 -*-
import re
import sys,os
import codecs
from math import sqrt,log
from scipy.io.wavfile import read,write
from scipy import signal
import numpy
import matplotlib
import pylab
from lame import *
# Remove chunks more -27 db down from peak to remove audio 'gaps'
# optional plot envelope
mp = re.compile('\.mp3')
files = []
show_plot = False
if (len(sys.argv) > 1):
files.append(sys.argv[1])
if (len(sys.argv) > 2): show_plot = True
else:
files = os.listdir('.')
debug = False
PB = open('mp3_levels.txt','w')
count = 0
for fil in files:
if (mp.search(fil)):
audio_in = decode_mp3(fil)
samples = len(audio_in)
seg = 1024
intvl = samples/seg
k = 0
minsig = 0
for i in xrange(intvl):
sum = 0.0
for j in xrange(seg):
s = float(audio_in[k])
sum += (s*s)
k = k+1
rms = sqrt(sum/seg)/16384.0
if (rms > 0): rms_db = 20.0*log(rms)/log(10.0)
if (rms_db < minsig):
minsig = rms_db
db10 = '%02d' % int(-minsig)
if (minsig > -20):
s = "Minimum level is -"+db10+" dB in "+str(seg)+" sample segments over "+str(0.1*int(samples/4410))+" seconds for "+fil
PB.write(s+"\n")
cmd = 'mv \"'+fil+"\" ./levels/"
os.system(cmd)
print s
PB.close()
| 23.934426 | 132 | 0.541096 | #!//Users/tkirke/anaconda/bin/python
# -*- coding: utf-8 -*-
import re
import sys,os
import codecs
from math import sqrt,log
from scipy.io.wavfile import read,write
from scipy import signal
import numpy
import matplotlib
import pylab
from lame import *
# Remove chunks more -27 db down from peak to remove audio 'gaps'
# optional plot envelope
mp = re.compile('\.mp3')
files = []
show_plot = False
if (len(sys.argv) > 1):
files.append(sys.argv[1])
if (len(sys.argv) > 2): show_plot = True
else:
files = os.listdir('.')
debug = False
PB = open('mp3_levels.txt','w')
count = 0
for fil in files:
if (mp.search(fil)):
audio_in = decode_mp3(fil)
samples = len(audio_in)
seg = 1024
intvl = samples/seg
k = 0
minsig = 0
for i in xrange(intvl):
sum = 0.0
for j in xrange(seg):
s = float(audio_in[k])
sum += (s*s)
k = k+1
rms = sqrt(sum/seg)/16384.0
if (rms > 0): rms_db = 20.0*log(rms)/log(10.0)
if (rms_db < minsig):
minsig = rms_db
db10 = '%02d' % int(-minsig)
if (minsig > -20):
s = "Minimum level is -"+db10+" dB in "+str(seg)+" sample segments over "+str(0.1*int(samples/4410))+" seconds for "+fil
PB.write(s+"\n")
cmd = 'mv \"'+fil+"\" ./levels/"
os.system(cmd)
print s
PB.close()
| 0 | 0 | 0 |
3c6755baeede12a1db47d023b18dd9493e78a17c | 2,620 | py | Python | application/bills/bills.py | akelshareif/fiscally | ca44ca00537d2b9ef1bca8a3a67b66427394dc72 | [
"MIT"
] | 1 | 2020-09-18T04:18:58.000Z | 2020-09-18T04:18:58.000Z | application/bills/bills.py | akelshareif/fiscally | ca44ca00537d2b9ef1bca8a3a67b66427394dc72 | [
"MIT"
] | null | null | null | application/bills/bills.py | akelshareif/fiscally | ca44ca00537d2b9ef1bca8a3a67b66427394dc72 | [
"MIT"
] | null | null | null | """ Bills routes """
from flask import Blueprint, render_template, redirect, request, url_for, flash
from flask_login import login_required, current_user
from application import db
from .bill_forms import BillForm
from ..models import Bill
bills_bp = Blueprint('bills', __name__, url_prefix='/user',
template_folder='templates')
@bills_bp.route('/bills', methods=['GET', 'POST'])
@login_required
def bills_display():
""" Show and add bills """
bill_form = BillForm()
user_bills = Bill.query.filter_by(user_id=str(current_user.id)).all()
total_amount_due = round(
sum([bill.bill_amount for bill in user_bills if bill.is_paid == 'Not Paid']), 2)
if bill_form.validate_on_submit():
new_bill = Bill(bill_name=bill_form.bill_name.data,
bill_due_date=bill_form.bill_due_date.data, bill_amount=bill_form.bill_amount.data, user_id=str(current_user.id))
db.session.add(new_bill)
db.session.commit()
flash('Successfully added bill', 'success')
return redirect(url_for('bills.bills_display'))
return render_template('bills/bills.jinja', bill_form=bill_form, bills=user_bills, total_amount_due=total_amount_due)
@bills_bp.route('/bills/paid', methods=['POST'])
@login_required
def mark_bill_paid():
""" Marks a bill as paid """
bill_ids = request.json['idArr']
for id in bill_ids:
bill = Bill.query.get(id)
if bill.is_paid == 'Not Paid':
bill.is_paid = 'Paid'
else:
bill.is_paid = 'Not Paid'
db.session.commit()
return {"msg": "success"}
@bills_bp.route('/bills/edit/<bill_id>', methods=['GET', 'POST'])
@login_required
def edit_bill(bill_id):
""" Handle bill edit """
bill = Bill.query.get(bill_id)
bill_form = BillForm(obj=bill)
if bill_form.validate_on_submit():
bill.bill_name = bill_form.bill_name.data
bill.bill_due_date = bill_form.bill_due_date.data
bill.bill_amount = bill_form.bill_amount.data
db.session.commit()
flash('Successfully edited bill', 'info')
return redirect(url_for('bills.bills_display'))
return render_template('bills/edit_bill.jinja', form=bill_form, bill=bill)
@bills_bp.route('/bills/delete', methods=['POST'])
@login_required
def delete_bills():
""" Handle bill deletion """
bill_ids = request.json['idArr']
for id in bill_ids:
bill = Bill.query.get(id)
db.session.delete(bill)
db.session.commit()
flash('Bill successfully deleted', 'warning')
return {"msg": "success"}
| 28.172043 | 137 | 0.666794 | """ Bills routes """
from flask import Blueprint, render_template, redirect, request, url_for, flash
from flask_login import login_required, current_user
from application import db
from .bill_forms import BillForm
from ..models import Bill
bills_bp = Blueprint('bills', __name__, url_prefix='/user',
template_folder='templates')
@bills_bp.route('/bills', methods=['GET', 'POST'])
@login_required
def bills_display():
""" Show and add bills """
bill_form = BillForm()
user_bills = Bill.query.filter_by(user_id=str(current_user.id)).all()
total_amount_due = round(
sum([bill.bill_amount for bill in user_bills if bill.is_paid == 'Not Paid']), 2)
if bill_form.validate_on_submit():
new_bill = Bill(bill_name=bill_form.bill_name.data,
bill_due_date=bill_form.bill_due_date.data, bill_amount=bill_form.bill_amount.data, user_id=str(current_user.id))
db.session.add(new_bill)
db.session.commit()
flash('Successfully added bill', 'success')
return redirect(url_for('bills.bills_display'))
return render_template('bills/bills.jinja', bill_form=bill_form, bills=user_bills, total_amount_due=total_amount_due)
@bills_bp.route('/bills/paid', methods=['POST'])
@login_required
def mark_bill_paid():
""" Marks a bill as paid """
bill_ids = request.json['idArr']
for id in bill_ids:
bill = Bill.query.get(id)
if bill.is_paid == 'Not Paid':
bill.is_paid = 'Paid'
else:
bill.is_paid = 'Not Paid'
db.session.commit()
return {"msg": "success"}
@bills_bp.route('/bills/edit/<bill_id>', methods=['GET', 'POST'])
@login_required
def edit_bill(bill_id):
""" Handle bill edit """
bill = Bill.query.get(bill_id)
bill_form = BillForm(obj=bill)
if bill_form.validate_on_submit():
bill.bill_name = bill_form.bill_name.data
bill.bill_due_date = bill_form.bill_due_date.data
bill.bill_amount = bill_form.bill_amount.data
db.session.commit()
flash('Successfully edited bill', 'info')
return redirect(url_for('bills.bills_display'))
return render_template('bills/edit_bill.jinja', form=bill_form, bill=bill)
@bills_bp.route('/bills/delete', methods=['POST'])
@login_required
def delete_bills():
""" Handle bill deletion """
bill_ids = request.json['idArr']
for id in bill_ids:
bill = Bill.query.get(id)
db.session.delete(bill)
db.session.commit()
flash('Bill successfully deleted', 'warning')
return {"msg": "success"}
| 0 | 0 | 0 |
1ef83bce037d82916bba554d8f49ad853081e5a7 | 5,986 | py | Python | coherence/upnp/core/soap_service.py | stonewell/Coherence | af7d2dc1224e705d172cee8a15d87f3abcccab2a | [
"MIT"
] | 112 | 2015-01-13T14:50:41.000Z | 2022-01-20T08:48:04.000Z | coherence/upnp/core/soap_service.py | stonewell/Coherence | af7d2dc1224e705d172cee8a15d87f3abcccab2a | [
"MIT"
] | 14 | 2015-01-26T21:54:14.000Z | 2020-01-19T19:28:52.000Z | coherence/upnp/core/soap_service.py | stonewell/Coherence | af7d2dc1224e705d172cee8a15d87f3abcccab2a | [
"MIT"
] | 40 | 2015-01-01T07:59:25.000Z | 2020-05-07T14:54:48.000Z | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2007 - Frank Scholz <coherence@beebits.net>
from twisted.web import server, resource
from twisted.python import failure
from twisted.internet import defer
from coherence import log, SERVER_ID
from coherence.extern.et import ET, namespace_map_update
from coherence.upnp.core.utils import parse_xml
from coherence.upnp.core import soap_lite
import coherence.extern.louie as louie
class UPnPPublisher(resource.Resource, log.Loggable):
""" Based upon twisted.web.soap.SOAPPublisher and
extracted to remove the SOAPpy dependency
UPnP requires headers and OUT parameters to be returned
in a slightly
different way than the SOAPPublisher class does.
"""
logCategory = 'soap'
isLeaf = 1
encoding = "UTF-8"
envelope_attrib = None
def render(self, request):
"""Handle a SOAP command."""
data = request.content.read()
headers = request.getAllHeaders()
self.info('soap_request: %s', headers)
# allow external check of data
louie.send('UPnPTest.Control.Client.CommandReceived', None, headers, data)
tree = parse_xml(data)
#root = tree.getroot()
#print_c(root)
body = tree.find('{http://schemas.xmlsoap.org/soap/envelope/}Body')
method = body.getchildren()[0]
methodName = method.tag
ns = None
if methodName.startswith('{') and methodName.rfind('}') > 1:
ns, methodName = methodName[1:].split('}')
args = []
kwargs = {}
for child in method.getchildren():
kwargs[child.tag] = soap_lite.decode_result(child)
args.append(kwargs[child.tag])
#p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1)
#methodName, args, kwargs, ns = p._name, p._aslist, p._asdict, p._ns
try:
headers['content-type'].index('text/xml')
except:
self._gotError(failure.Failure(errorCode(415)), request, methodName)
return server.NOT_DONE_YET
self.debug('headers: %r', headers)
function, useKeywords = self.lookupFunction(methodName)
#print 'function', function, 'keywords', useKeywords, 'args', args, 'kwargs', kwargs
if not function:
self._methodNotFound(request, methodName)
return server.NOT_DONE_YET
else:
keywords = {'soap_methodName': methodName}
if(headers.has_key('user-agent') and
headers['user-agent'].find('Xbox/') == 0):
keywords['X_UPnPClient'] = 'XBox'
#if(headers.has_key('user-agent') and
# headers['user-agent'].startswith("""Mozilla/4.0 (compatible; UPnP/1.0; Windows""")):
# keywords['X_UPnPClient'] = 'XBox'
if(headers.has_key('x-av-client-info') and
headers['x-av-client-info'].find('"PLAYSTATION3') > 0):
keywords['X_UPnPClient'] = 'PLAYSTATION3'
if(headers.has_key('user-agent') and
headers['user-agent'].find('Philips-Software-WebClient/4.32') == 0):
keywords['X_UPnPClient'] = 'Philips-TV'
for k, v in kwargs.items():
keywords[str(k)] = v
self.info('call %s %s', methodName, keywords)
if hasattr(function, "useKeywords"):
d = defer.maybeDeferred(function, **keywords)
else:
d = defer.maybeDeferred(function, *args, **keywords)
d.addCallback(self._gotResult, request, methodName, ns)
d.addErrback(self._gotError, request, methodName, ns)
return server.NOT_DONE_YET
| 35.630952 | 105 | 0.600568 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2007 - Frank Scholz <coherence@beebits.net>
from twisted.web import server, resource
from twisted.python import failure
from twisted.internet import defer
from coherence import log, SERVER_ID
from coherence.extern.et import ET, namespace_map_update
from coherence.upnp.core.utils import parse_xml
from coherence.upnp.core import soap_lite
import coherence.extern.louie as louie
class errorCode(Exception):
def __init__(self, status):
Exception.__init__(self)
self.status = status
class UPnPPublisher(resource.Resource, log.Loggable):
""" Based upon twisted.web.soap.SOAPPublisher and
extracted to remove the SOAPpy dependency
UPnP requires headers and OUT parameters to be returned
in a slightly
different way than the SOAPPublisher class does.
"""
logCategory = 'soap'
isLeaf = 1
encoding = "UTF-8"
envelope_attrib = None
def _sendResponse(self, request, response, status=200):
self.debug('_sendResponse %s %s', status, response)
if status == 200:
request.setResponseCode(200)
else:
request.setResponseCode(500)
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
else:
mimeType = "text/xml"
request.setHeader("Content-type", mimeType)
request.setHeader("Content-length", str(len(response)))
request.setHeader("EXT", '')
request.setHeader("SERVER", SERVER_ID)
request.write(response)
request.finish()
def _methodNotFound(self, request, methodName):
response = soap_lite.build_soap_error(401)
self._sendResponse(request, response, status=401)
def _gotResult(self, result, request, methodName, ns):
self.debug('_gotResult %s %s %s %s', result, request, methodName, ns)
response = soap_lite.build_soap_call("{%s}%s" % (ns, methodName), result,
is_response=True,
encoding=None)
#print "SOAP-lite response", response
self._sendResponse(request, response)
def _gotError(self, failure, request, methodName, ns):
self.info('_gotError %s %s', failure, failure.value)
e = failure.value
status = 500
if isinstance(e, errorCode):
status = e.status
else:
failure.printTraceback()
response = soap_lite.build_soap_error(status)
self._sendResponse(request, response, status=status)
def lookupFunction(self, functionName):
function = getattr(self, "soap_%s" % functionName, None)
if not function:
function = getattr(self, "soap__generic", None)
if function:
return function, getattr(function, "useKeywords", False)
else:
return None, None
def render(self, request):
"""Handle a SOAP command."""
data = request.content.read()
headers = request.getAllHeaders()
self.info('soap_request: %s', headers)
# allow external check of data
louie.send('UPnPTest.Control.Client.CommandReceived', None, headers, data)
def print_c(e):
for c in e.getchildren():
print c, c.tag
print_c(c)
tree = parse_xml(data)
#root = tree.getroot()
#print_c(root)
body = tree.find('{http://schemas.xmlsoap.org/soap/envelope/}Body')
method = body.getchildren()[0]
methodName = method.tag
ns = None
if methodName.startswith('{') and methodName.rfind('}') > 1:
ns, methodName = methodName[1:].split('}')
args = []
kwargs = {}
for child in method.getchildren():
kwargs[child.tag] = soap_lite.decode_result(child)
args.append(kwargs[child.tag])
#p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1)
#methodName, args, kwargs, ns = p._name, p._aslist, p._asdict, p._ns
try:
headers['content-type'].index('text/xml')
except:
self._gotError(failure.Failure(errorCode(415)), request, methodName)
return server.NOT_DONE_YET
self.debug('headers: %r', headers)
function, useKeywords = self.lookupFunction(methodName)
#print 'function', function, 'keywords', useKeywords, 'args', args, 'kwargs', kwargs
if not function:
self._methodNotFound(request, methodName)
return server.NOT_DONE_YET
else:
keywords = {'soap_methodName': methodName}
if(headers.has_key('user-agent') and
headers['user-agent'].find('Xbox/') == 0):
keywords['X_UPnPClient'] = 'XBox'
#if(headers.has_key('user-agent') and
# headers['user-agent'].startswith("""Mozilla/4.0 (compatible; UPnP/1.0; Windows""")):
# keywords['X_UPnPClient'] = 'XBox'
if(headers.has_key('x-av-client-info') and
headers['x-av-client-info'].find('"PLAYSTATION3') > 0):
keywords['X_UPnPClient'] = 'PLAYSTATION3'
if(headers.has_key('user-agent') and
headers['user-agent'].find('Philips-Software-WebClient/4.32') == 0):
keywords['X_UPnPClient'] = 'Philips-TV'
for k, v in kwargs.items():
keywords[str(k)] = v
self.info('call %s %s', methodName, keywords)
if hasattr(function, "useKeywords"):
d = defer.maybeDeferred(function, **keywords)
else:
d = defer.maybeDeferred(function, *args, **keywords)
d.addCallback(self._gotResult, request, methodName, ns)
d.addErrback(self._gotError, request, methodName, ns)
return server.NOT_DONE_YET
| 2,010 | 6 | 215 |
a0879d95dd3d00d9199837cf041da7dad6c67c02 | 431 | py | Python | setup.py | ptcane/mkdocs-bulma | b40a90369ac273abb5fe45295cceadf5297ec356 | [
"MIT"
] | 6 | 2018-05-26T00:51:29.000Z | 2021-03-18T18:03:26.000Z | setup.py | ptcane/mkdocs-bulma | b40a90369ac273abb5fe45295cceadf5297ec356 | [
"MIT"
] | 4 | 2019-02-28T14:51:24.000Z | 2021-05-06T08:31:13.000Z | setup.py | ptcane/mkdocs-bulma | b40a90369ac273abb5fe45295cceadf5297ec356 | [
"MIT"
] | 10 | 2018-05-26T00:53:25.000Z | 2021-04-03T05:46:27.000Z | from setuptools import setup, find_packages
VERSION = "0.0.5"
setup(
name="mkdocs-bulma",
version=VERSION,
url="https://github.com/rajasimon/mkdocs-bulma",
license="MIT",
description="Bulma for mkdocs",
author="Raja Simon",
author_email="rajasimon@icloud.com",
packages=find_packages(),
include_package_data=True,
entry_points={"mkdocs.themes": ["bulma = bulma",]},
zip_safe=False,
)
| 22.684211 | 55 | 0.675174 | from setuptools import setup, find_packages
VERSION = "0.0.5"
setup(
name="mkdocs-bulma",
version=VERSION,
url="https://github.com/rajasimon/mkdocs-bulma",
license="MIT",
description="Bulma for mkdocs",
author="Raja Simon",
author_email="rajasimon@icloud.com",
packages=find_packages(),
include_package_data=True,
entry_points={"mkdocs.themes": ["bulma = bulma",]},
zip_safe=False,
)
| 0 | 0 | 0 |
8b2507f349e76a89f6be4354bfaf0a6719ddc192 | 17,147 | py | Python | window.py | Chapsjrl/Genetico2018-2 | e1bf4ccb0da422156d8df541be50965c1d79c2b2 | [
"MIT"
] | null | null | null | window.py | Chapsjrl/Genetico2018-2 | e1bf4ccb0da422156d8df541be50965c1d79c2b2 | [
"MIT"
] | null | null | null | window.py | Chapsjrl/Genetico2018-2 | e1bf4ccb0da422156d8df541be50965c1d79c2b2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""GUI module generated by PAGE version 4.14.
# In conjunction with Tcl version 8.6
# Jun 04, 2018 08:42:31 PM
"""
import base64
import sys
from GaQueens import GaQueens
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
# spinbox = StringVar(root, '4')
# spinbox2 = StringVar(root, '10')
# spinbox3 = StringVar(root, '-1')
with open("7735732.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
root = Tk()
player1 = PhotoImage(data=encoded_string)
player1 = player1.subsample(3)
def vp_start_gui():
"""Start point when module is the main routine."""
global val, w, root, spinbox, spinbox2, spinbox3
# root = Tk()
spinbox = StringVar(root, '6')
spinbox2 = StringVar(root, '10')
spinbox3 = StringVar(root, '-1')
top = Algoritmo_gen_tico_con_N_reinas(root)
init(root, top)
root.mainloop()
w = None
def create_Algoritmo_gen_tico_con_N_reinas(root, *args, **kwargs):
"""Start point when module is imported by another program."""
global w, w_win, rt
rt = root
w = Toplevel(root)
top = Algoritmo_gen_tico_con_N_reinas(w)
init(w, top, *args, **kwargs)
return (w, top)
# The following code is added to facilitate the Scrolled widgets you specified.
class AutoScroll(object):
"""Configure the scrollbars for a widget."""
@staticmethod
def _autoscroll(sbar):
"""Hide and show scrollbar as needed."""
return wrapped
def _create_container(func):
"""Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget."""
return wrapped
class ScrolledTreeView(AutoScroll, ttk.Treeview):
"""A standard ttk Treeview widget with scrollbars that will
automatically show/hide as needed."""
@_create_container
| 39.327982 | 79 | 0.619642 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""GUI module generated by PAGE version 4.14.
# In conjunction with Tcl version 8.6
# Jun 04, 2018 08:42:31 PM
"""
import base64
import sys
from GaQueens import GaQueens
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
# spinbox = StringVar(root, '4')
# spinbox2 = StringVar(root, '10')
# spinbox3 = StringVar(root, '-1')
with open("7735732.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
root = Tk()
player1 = PhotoImage(data=encoded_string)
player1 = player1.subsample(3)
def vp_start_gui():
"""Start point when module is the main routine."""
global val, w, root, spinbox, spinbox2, spinbox3
# root = Tk()
spinbox = StringVar(root, '6')
spinbox2 = StringVar(root, '10')
spinbox3 = StringVar(root, '-1')
top = Algoritmo_gen_tico_con_N_reinas(root)
init(root, top)
root.mainloop()
w = None
def create_Algoritmo_gen_tico_con_N_reinas(root, *args, **kwargs):
"""Start point when module is imported by another program."""
global w, w_win, rt
rt = root
w = Toplevel(root)
top = Algoritmo_gen_tico_con_N_reinas(w)
init(w, top, *args, **kwargs)
return (w, top)
def destroy_Algoritmo_gen_tico_con_N_reinas():
global w
w.destroy()
w = None
class Algoritmo_gen_tico_con_N_reinas:
def __init__(self, top=None):
"""Class that configures and populates the toplevel window.
Top is the toplevel containing window.
"""
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font11 = "-family {Segoe UI} -size 9 -weight bold -slant roman" \
" -underline 0 -overstrike 0"
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.', background=_bgcolor)
self.style.configure('.', foreground=_fgcolor)
self.style.configure('.', font="TkDefaultFont")
self.style.map('.', background=[
('selected', _compcolor), ('active', _ana2color)])
top.geometry("901x585+163+100")
top.title("Algoritmo genético con N reinas")
img = Image("photo", file="qeen.png")
top.call('wm', 'iconphoto', top._w, img)
top.configure(background="#d9d9d9")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.Label1 = Label(top)
self.Label1.place(relx=0.02, rely=0.09, height=21, width=142)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(activeforeground="black")
self.Label1.configure(anchor=W)
self.Label1.configure(background="#d9d9d9")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(foreground="#000000")
self.Label1.configure(highlightbackground="#d9d9d9")
self.Label1.configure(highlightcolor="black")
self.Label1.configure(text="""Tamaño de población:""")
self.Label2 = Label(top)
self.Label2.place(relx=0.02, rely=0.14, height=21, width=184)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(activeforeground="black")
self.Label2.configure(anchor=W)
self.Label2.configure(background="#d9d9d9")
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(foreground="#000000")
self.Label2.configure(highlightbackground="#d9d9d9")
self.Label2.configure(highlightcolor="black")
self.Label2.configure(text="""Generaciones (-1 para infinito):""")
self.Label3 = Label(top)
self.Label3.place(relx=0.02, rely=0.03, height=21, width=174)
self.Label3.configure(activebackground="#f9f9f9")
self.Label3.configure(activeforeground="black")
self.Label3.configure(anchor=W)
self.Label3.configure(background="#d9d9d9")
self.Label3.configure(disabledforeground="#a3a3a3")
self.Label3.configure(foreground="#000000")
self.Label3.configure(highlightbackground="#d9d9d9")
self.Label3.configure(highlightcolor="black")
self.Label3.configure(text="""Tamaño de tablero:""")
self.style.configure('Treeview.Heading', font="TkDefaultFont")
self.Scrolledtreeview1 = ScrolledTreeView(top)
self.Scrolledtreeview1.place(
relx=0.02, rely=0.27, relheight=0.68, relwidth=0.41)
self.Scrolledtreeview1.configure(columns="Col1")
self.Scrolledtreeview1.heading("#0", text="Solucion")
self.Scrolledtreeview1.heading("#0", anchor="center")
# self.Scrolledtreeview1.heading("#0", command=lambda:)
self.Scrolledtreeview1.column("#0", width="175")
self.Scrolledtreeview1.column("#0", minwidth="20")
self.Scrolledtreeview1.column("#0", stretch="1")
self.Scrolledtreeview1.column("#0", anchor="w")
self.Scrolledtreeview1.heading("Col1", text="Aptitud")
self.Scrolledtreeview1.heading("Col1", anchor="center")
self.Scrolledtreeview1.column("Col1", width="46")
self.Scrolledtreeview1.column("Col1", minwidth="20")
self.Scrolledtreeview1.column("Col1", stretch="1")
self.Scrolledtreeview1.column("Col1", anchor="w")
self.TButton1 = ttk.Button(top)
self.TButton1.place(relx=0.17, rely=0.19, height=35, width=106)
self.TButton1.configure(takefocus="")
self.TButton1.configure(text='''Comenzar''')
self.TButton1.configure(width=106)
self.TButton1.configure(command=lambda: self.start())
self.Spinbox1 = Spinbox(top, from_=4.0, to=100.0, textvariable=spinbox)
self.Spinbox1.place(relx=0.28, rely=0.03,
relheight=0.03, relwidth=0.13)
self.Spinbox1.configure(activebackground="#f9f9f9")
self.Spinbox1.configure(background="white")
self.Spinbox1.configure(buttonbackground="#d9d9d9")
self.Spinbox1.configure(disabledforeground="#a3a3a3")
self.Spinbox1.configure(foreground="black")
self.Spinbox1.configure(from_="4.0")
self.Spinbox1.configure(highlightbackground="black")
self.Spinbox1.configure(highlightcolor="black")
self.Spinbox1.configure(insertbackground="black")
self.Spinbox1.configure(selectbackground="#c4c4c4")
self.Spinbox1.configure(selectforeground="black")
self.Spinbox1.configure(textvariable=spinbox)
self.Spinbox1.configure(to="100.0")
self.Spinbox2 = Spinbox(
top, from_=10.0, to=100.0, textvariable=spinbox2)
self.Spinbox2.place(relx=0.28, rely=0.09,
relheight=0.03, relwidth=0.13)
self.Spinbox2.configure(activebackground="#f9f9f9")
self.Spinbox2.configure(background="white")
self.Spinbox2.configure(buttonbackground="#d9d9d9")
self.Spinbox2.configure(disabledforeground="#a3a3a3")
self.Spinbox2.configure(foreground="black")
self.Spinbox2.configure(from_="10.0")
self.Spinbox2.configure(highlightbackground="black")
self.Spinbox2.configure(highlightcolor="black")
self.Spinbox2.configure(insertbackground="black")
self.Spinbox2.configure(selectbackground="#c4c4c4")
self.Spinbox2.configure(selectforeground="black")
self.Spinbox2.configure(textvariable=spinbox2)
self.Spinbox2.configure(to="100.0")
self.Spinbox3 = Spinbox(
top, from_=-1.0, to=10000.0, textvariable=spinbox3)
self.Spinbox3.place(relx=0.28, rely=0.14,
relheight=0.03, relwidth=0.13)
self.Spinbox3.configure(activebackground="#f9f9f9")
self.Spinbox3.configure(background="white")
self.Spinbox3.configure(buttonbackground="#d9d9d9")
self.Spinbox3.configure(disabledforeground="#a3a3a3")
self.Spinbox3.configure(foreground="black")
self.Spinbox3.configure(from_="-1.0")
self.Spinbox3.configure(highlightbackground="black")
self.Spinbox3.configure(highlightcolor="black")
self.Spinbox3.configure(insertbackground="black")
self.Spinbox3.configure(selectbackground="#c4c4c4")
self.Spinbox3.configure(selectforeground="black")
self.Spinbox3.configure(textvariable=spinbox3)
self.Spinbox3.configure(to="100.0")
self.TLabel1 = ttk.Label(top)
self.TLabel1.place(relx=0.44, rely=0.03, height=19, width=46)
self.TLabel1.configure(background="#d9d9d9")
self.TLabel1.configure(foreground="#000000")
self.TLabel1.configure(font=font11)
self.TLabel1.configure(relief=FLAT)
self.TLabel1.configure(text="""Tablero""")
self.Canvas1 = Canvas(top)
self.Canvas1.place(relx=0.44, rely=0.1, relheight=0.84, relwidth=0.54)
self.Canvas1.configure(background="#d9d9d9")
self.Canvas1.configure(borderwidth="0")
self.Canvas1.configure(highlightthickness="0")
self.Canvas1.configure(insertbackground="black")
self.Canvas1.configure(relief=RIDGE)
self.Canvas1.configure(selectbackground="#c4c4c4")
self.Canvas1.configure(selectforeground="black")
self.Canvas1.configure(width=int(spinbox.get()) * 20)
self.Canvas1.bind("<Configure>", self.refresh)
# self.Canvas1.bind("<Button-1>", self.refresh)
self.Label4 = Label(top)
self.Label4.place(relx=0.53, rely=0.03, height=31, width=394)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(activeforeground="black")
self.Label4.configure(background="#d9d9d9")
self.Label4.configure(disabledforeground="#a3a3a3")
self.Label4.configure(foreground="#000000")
self.Label4.configure(highlightbackground="#d9d9d9")
self.Label4.configure(highlightcolor="black")
self.Label4.configure(width=394)
self.pieces = {}
self.size = 20
def refresh(self, event):
"""Redraw the board, possibly in response to window being resized."""
color1 = "#b1cbdd"
color2 = "#b8e0d2"
xsize = int((event.width - 1) / int(spinbox.get()))
ysize = int((event.height - 1) / int(spinbox.get()))
self.size = min(xsize, ysize)
self.Canvas1.delete("square")
color = color1
for row in range(int(spinbox.get())):
color = color2 if color == color1 else color1
for col in range(int(spinbox.get())):
x1 = (col * self.size)
y1 = (row * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
self.Canvas1.create_rectangle(
x1, y1, x2, y2, outline=color2, fill=color, tags="square")
color = color2 if color == color1 else color1
for name in self.pieces:
self.placepiece(name, self.pieces[name][0], self.pieces[name][1])
self.Canvas1.tag_raise("piece")
self.Canvas1.tag_lower("square")
def refresh2(self):
"""Redraw the board qhen pres the button."""
color1 = "#b1cbdd"
color2 = "#b8e0d2"
width, height = self.Canvas1.winfo_width(), self.Canvas1.winfo_height()
xsize = int((width - 1) / int(spinbox.get()))
ysize = int((height - 1) / int(spinbox.get()))
self.size = min(xsize, ysize)
self.Canvas1.delete("square")
color = color1
for row in range(int(spinbox.get())):
color = color2 if color == color1 else color1
for col in range(int(spinbox.get())):
x1 = (col * self.size)
y1 = (row * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
self.Canvas1.create_rectangle(
x1, y1, x2, y2, outline=color2, fill=color, tags="square")
color = color2 if color == color1 else color1
for name in self.pieces:
self.placepiece(name, self.pieces[name][0], self.pieces[name][1])
self.Canvas1.tag_raise("piece")
self.Canvas1.tag_lower("square")
def addpiece(self, name, image, row=0, column=0):
"""Add a piece to the playing board."""
self.Canvas1.create_image(
0, 0, image=image, tags=(name, "piece"), anchor="c")
self.placepiece(name, row, column)
def placepiece(self, name, row, column):
"""Place a piece at the given row/column."""
self.pieces[name] = (row, column)
x0 = (column * self.size) + int(self.size / 2)
y0 = (row * self.size) + int(self.size / 2)
self.Canvas1.coords(name, x0, y0)
def clear_tree(self):
x = self.Scrolledtreeview1.get_children()
if x != '()':
for child in x:
self.Scrolledtreeview1.delete(child)
def get_queens(self, algoritmo):
list = algoritmo.solution.list_coords()
i = 0
for tupla in list:
id = "player{}".format(i)
self.addpiece(id, player1, tupla[0], tupla[1])
i += 1
def set_tree(self, algoritmo):
for g in range(algoritmo.generation_count + 1):
population = algoritmo.generations[g]
gen_str = "Generación {}".format(g)
id_str = "gen{}".format(g)
id = self.Scrolledtreeview1.insert("", g, id_str, text=gen_str)
for item in population:
self.Scrolledtreeview1.insert(id, "end",
text=str(item.queens),
values=(item.fitness))
def start(self):
sl = GaQueens(int(spinbox.get()), int(spinbox2.get()),
int(spinbox3.get()))
self.Label4.configure(text=sl.status)
self.get_queens(sl)
self.clear_tree()
self.set_tree(sl)
self.refresh2()
# The following code is added to facilitate the Scrolled widgets you specified.
class AutoScroll(object):
"""Configure the scrollbars for a widget."""
def __init__(self, master):
# Rozen. Added the try-except clauses so that this class
# could be used for scrolled entry widget for which vertical
# scrolling is not supported. 5/7/14.
try:
vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview)
except:
pass
hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview)
# self.configure(yscrollcommand=_autoscroll(vsb),
# xscrollcommand=_autoscroll(hsb))
try:
self.configure(yscrollcommand=self._autoscroll(vsb))
except:
pass
self.configure(xscrollcommand=self._autoscroll(hsb))
self.grid(column=0, row=0, sticky='nsew')
try:
vsb.grid(column=1, row=0, sticky='ns')
except:
pass
hsb.grid(column=0, row=1, sticky='ew')
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
# Copy geometry methods of master (taken from ScrolledText.py)
if py3:
methods = Pack.__dict__.keys() | Grid.__dict__.keys() \
| Place.__dict__.keys()
else:
methods = Pack.__dict__.keys() + Grid.__dict__.keys() \
+ Place.__dict__.keys()
for meth in methods:
if meth[0] != '_' and meth not in ('config', 'configure'):
setattr(self, meth, getattr(master, meth))
@staticmethod
def _autoscroll(sbar):
"""Hide and show scrollbar as needed."""
def wrapped(first, last):
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
return wrapped
def __str__(self):
return str(self.master)
def _create_container(func):
"""Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget."""
def wrapped(cls, master, **kw):
container = ttk.Frame(master)
return func(cls, container, **kw)
return wrapped
class ScrolledTreeView(AutoScroll, ttk.Treeview):
"""A standard ttk Treeview widget with scrollbars that will
automatically show/hide as needed."""
@_create_container
def __init__(self, master, **kw):
ttk.Treeview.__init__(self, master, **kw)
AutoScroll.__init__(self, master)
def init(top, gui, *args, **kwargs):
global w, top_level, root
w = gui
top_level = top
root = top
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
| 3,268 | 11,677 | 228 |
322dda659558109f88d9fdc1b584c49ba748b072 | 722 | py | Python | apps/medicamento/admin.py | alejandrobolivar/sist_inv_coesbicop | 36a068f21adb28f1f711b540841786538dbf8411 | [
"CC0-1.0"
] | null | null | null | apps/medicamento/admin.py | alejandrobolivar/sist_inv_coesbicop | 36a068f21adb28f1f711b540841786538dbf8411 | [
"CC0-1.0"
] | null | null | null | apps/medicamento/admin.py | alejandrobolivar/sist_inv_coesbicop | 36a068f21adb28f1f711b540841786538dbf8411 | [
"CC0-1.0"
] | null | null | null | from django.contrib import admin
# Register your models here.
from apps.medicamento.models import Medicamento
#admin.site.register(Medicamento)
@admin.register(Medicamento) | 45.125 | 150 | 0.760388 | from django.contrib import admin
# Register your models here.
from apps.medicamento.models import Medicamento
#admin.site.register(Medicamento)
@admin.register(Medicamento)
class PostAdmin(admin.ModelAdmin):
list_display = ('cod_med', 'principio_activo_med', 'nombre_comercial_med', 'nombre_lab_med', 'grupo_med', 'subgrupo_med', 'fecha_vencimiento_med')
list_filter = ('cod_med', 'principio_activo_med', 'nombre_comercial_med', 'nombre_lab_med')
search_fields = ('cod_med', 'principio_activo_med')
prepopulated_fields = {'principio_activo_med': ('cod_med',)}
# raw_id_fields = ('nombre_comercial_med',)
date_hierarchy = 'fecha_vencimiento_med'
ordering = ('cod_med', 'nombre_comercial_med') | 0 | 525 | 22 |
d3468bc4c6b972e250c94b3a6607f2ace5a9fe6c | 474 | py | Python | mtaa/migrations/0004_auto_20190914_1612.py | macymuhia/My_mtaa | ade06c1d30d8f293963ed09924419e3b3a881dbc | [
"MIT"
] | null | null | null | mtaa/migrations/0004_auto_20190914_1612.py | macymuhia/My_mtaa | ade06c1d30d8f293963ed09924419e3b3a881dbc | [
"MIT"
] | 8 | 2020-06-05T23:02:57.000Z | 2022-02-10T12:51:58.000Z | mtaa/migrations/0004_auto_20190914_1612.py | macymuhia/My_mtaa | ade06c1d30d8f293963ed09924419e3b3a881dbc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-09-14 13:12
from django.db import migrations, models
import django.db.models.deletion
| 23.7 | 107 | 0.64135 | # Generated by Django 2.2.4 on 2019-09-14 13:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mtaa', '0003_auto_20190914_1607'),
]
operations = [
migrations.AlterField(
model_name='business',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='mtaa.UserProfile'),
),
]
| 0 | 327 | 23 |
61f07c63c841bd7eb1642f0876d4c78eff8a1acb | 364 | py | Python | poker.py | JonLinC07/Poker | f21ea29acb5a9e674c665cf56033471634955e68 | [
"MIT"
] | null | null | null | poker.py | JonLinC07/Poker | f21ea29acb5a9e674c665cf56033471634955e68 | [
"MIT"
] | null | null | null | poker.py | JonLinC07/Poker | f21ea29acb5a9e674c665cf56033471634955e68 | [
"MIT"
] | null | null | null | from Dealer import Dealer
from Player import Player
dealer = Dealer()
player_name = input('Ingrese el nombre del jugador \n|>> ')
player = Player(player_name)
players = player.make_players()
players.append(player)
dealer.shuffle()
dealer.deal_cards(players)
for player in players:
print(player)
for card in player.show_hand():
print(card)
| 17.333333 | 59 | 0.722527 | from Dealer import Dealer
from Player import Player
dealer = Dealer()
player_name = input('Ingrese el nombre del jugador \n|>> ')
player = Player(player_name)
players = player.make_players()
players.append(player)
dealer.shuffle()
dealer.deal_cards(players)
for player in players:
print(player)
for card in player.show_hand():
print(card)
| 0 | 0 | 0 |
4ba54d1d1cf1debc358b58ec9824ff07dc0b5f88 | 424 | py | Python | 3to2-1.0/lib3to2/tests/test_getcwd.py | jrialland/python-brain | 1b2b1bc52d068f37283edd4c1528fea5c175fb29 | [
"Apache-2.0"
] | 6 | 2015-04-08T11:01:17.000Z | 2020-06-25T07:20:16.000Z | 3to2-1.0/lib3to2/tests/test_getcwd.py | jrialland/python-brain | 1b2b1bc52d068f37283edd4c1528fea5c175fb29 | [
"Apache-2.0"
] | 1 | 2018-03-05T17:41:27.000Z | 2018-03-05T17:41:27.000Z | 3to2-1.0/lib3to2/tests/test_getcwd.py | jrialland/python-brain | 1b2b1bc52d068f37283edd4c1528fea5c175fb29 | [
"Apache-2.0"
] | 3 | 2017-03-23T15:02:05.000Z | 2019-09-18T02:34:43.000Z | from test_all_fixers import lib3to2FixerTestCase
| 28.266667 | 55 | 0.528302 | from test_all_fixers import lib3to2FixerTestCase
class Test_getcwd(lib3to2FixerTestCase):
fixer = u"getcwd"
def test_prefix_preservation(self):
b = u"""ls = os.listdir( os.getcwd() )"""
a = u"""ls = os.listdir( os.getcwdu() )"""
self.check(b, a)
b = u"""whatdir = os.getcwd ( )"""
a = u"""whatdir = os.getcwdu ( )"""
self.check(b, a)
| 283 | 68 | 23 |
bc1a420a889f511f32028279e0393433650fda9a | 786 | py | Python | CUSTOM-SERVER/50-config.py | tdmorello/omero-docker-compose | 6c023615852ead560bbdc86542c3d30e838f3f27 | [
"BSD-2-Clause"
] | null | null | null | CUSTOM-SERVER/50-config.py | tdmorello/omero-docker-compose | 6c023615852ead560bbdc86542c3d30e838f3f27 | [
"BSD-2-Clause"
] | null | null | null | CUSTOM-SERVER/50-config.py | tdmorello/omero-docker-compose | 6c023615852ead560bbdc86542c3d30e838f3f27 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# 1. Run .omero files from /opt/omero/server/config/
# 2. Set omero config properties from CONFIG_ envvars
# Variable names should replace "." with "_" and "_" with "__"
# E.g. CONFIG_omero_web_public_enabled=false
import os
from subprocess import call
from re import sub
CONFIG_OMERO = '/opt/omero/server/config/omero-server-config-update.sh'
OMERO = '/opt/omero/server/venv3/bin/omero'
if os.access(CONFIG_OMERO, os.X_OK):
rc = call([CONFIG_OMERO])
assert rc == 0
for (k, v) in os.environ.items():
if k.startswith('CONFIG_'):
prop = k[7:]
prop = sub('([^_])_([^_])', r'\1.\2', prop)
prop = sub('__', '_', prop)
value = v
rc = call([OMERO, 'config', 'set', '--', prop, value])
assert rc == 0
| 29.111111 | 71 | 0.620865 | #!/usr/bin/env python
# 1. Run .omero files from /opt/omero/server/config/
# 2. Set omero config properties from CONFIG_ envvars
# Variable names should replace "." with "_" and "_" with "__"
# E.g. CONFIG_omero_web_public_enabled=false
import os
from subprocess import call
from re import sub
CONFIG_OMERO = '/opt/omero/server/config/omero-server-config-update.sh'
OMERO = '/opt/omero/server/venv3/bin/omero'
if os.access(CONFIG_OMERO, os.X_OK):
rc = call([CONFIG_OMERO])
assert rc == 0
for (k, v) in os.environ.items():
if k.startswith('CONFIG_'):
prop = k[7:]
prop = sub('([^_])_([^_])', r'\1.\2', prop)
prop = sub('__', '_', prop)
value = v
rc = call([OMERO, 'config', 'set', '--', prop, value])
assert rc == 0
| 0 | 0 | 0 |
4c8808c50b2dd4ec244e24b3a2ff51b5549508b2 | 8,849 | py | Python | gpflux/layers/basis_functions/fourier_features/random.py | tensorlicious/GPflux | 8a2c66310b2a43b6259591ee142a29c618ef18be | [
"Apache-2.0"
] | null | null | null | gpflux/layers/basis_functions/fourier_features/random.py | tensorlicious/GPflux | 8a2c66310b2a43b6259591ee142a29c618ef18be | [
"Apache-2.0"
] | null | null | null | gpflux/layers/basis_functions/fourier_features/random.py | tensorlicious/GPflux | 8a2c66310b2a43b6259591ee142a29c618ef18be | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" A kernel's features and coefficients using Random Fourier Features (RFF). """
from typing import Mapping, Optional
import numpy as np
import tensorflow as tf
import gpflow
from gpflow.base import DType, TensorType
from gpflux.layers.basis_functions.fourier_features.base import FourierFeaturesBase
from gpflux.layers.basis_functions.fourier_features.utils import (
ORF_SUPPORTED_KERNELS,
RFF_SUPPORTED_KERNELS,
_bases_concat,
_bases_cosine,
_ceil_divide,
_matern_number,
_sample_chi,
_sample_students_t,
)
from gpflux.types import ShapeType
class RandomFourierFeatures(RandomFourierFeaturesBase):
r"""
Random Fourier features (RFF) is a method for approximating kernels. The essential
element of the RFF approach :cite:p:`rahimi2007random` is the realization that Bochner's theorem
for stationary kernels can be approximated by a Monte Carlo sum.
We will approximate the kernel :math:`k(\mathbf{x}, \mathbf{x}')`
by :math:`\Phi(\mathbf{x})^\top \Phi(\mathbf{x}')`
where :math:`\Phi: \mathbb{R}^{D} \to \mathbb{R}^{M}` is a finite-dimensional feature map.
The feature map is defined as:
.. math::
\Phi(\mathbf{x}) = \sqrt{\frac{2 \sigma^2}{\ell}}
\begin{bmatrix}
\cos(\boldsymbol{\theta}_1^\top \mathbf{x}) \\
\sin(\boldsymbol{\theta}_1^\top \mathbf{x}) \\
\vdots \\
\cos(\boldsymbol{\theta}_{\frac{M}{2}}^\top \mathbf{x}) \\
\sin(\boldsymbol{\theta}_{\frac{M}{2}}^\top \mathbf{x})
\end{bmatrix}
where :math:`\sigma^2` is the kernel variance.
The features are parameterised by random weights:
- :math:`\boldsymbol{\theta} \sim p(\boldsymbol{\theta})`
where :math:`p(\boldsymbol{\theta})` is the spectral density of the kernel.
At least for the squared exponential kernel, this variant of the feature
mapping has more desirable theoretical properties than its counterpart form
from phase-shifted cosines :class:`RandomFourierFeaturesCosine` :cite:p:`sutherland2015error`.
"""
def _compute_bases(self, inputs: TensorType) -> tf.Tensor:
"""
Compute basis functions.
:return: A tensor with the shape ``[N, 2M]``.
"""
return _bases_concat(inputs, self.W)
def _compute_constant(self) -> tf.Tensor:
"""
Compute normalizing constant for basis functions.
:return: A tensor with the shape ``[]`` (i.e. a scalar).
"""
return self.rff_constant(self.kernel.variance, output_dim=2 * self.n_components)
class RandomFourierFeaturesCosine(RandomFourierFeaturesBase):
r"""
Random Fourier Features (RFF) is a method for approximating kernels. The essential
element of the RFF approach :cite:p:`rahimi2007random` is the realization that Bochner's theorem
for stationary kernels can be approximated by a Monte Carlo sum.
We will approximate the kernel :math:`k(\mathbf{x}, \mathbf{x}')`
by :math:`\Phi(\mathbf{x})^\top \Phi(\mathbf{x}')` where
:math:`\Phi: \mathbb{R}^{D} \to \mathbb{R}^{M}` is a finite-dimensional feature map.
The feature map is defined as:
.. math::
\Phi(\mathbf{x}) = \sqrt{\frac{2 \sigma^2}{\ell}}
\begin{bmatrix}
\cos(\boldsymbol{\theta}_1^\top \mathbf{x} + \tau) \\
\vdots \\
\cos(\boldsymbol{\theta}_M^\top \mathbf{x} + \tau)
\end{bmatrix}
where :math:`\sigma^2` is the kernel variance.
The features are parameterised by random weights:
- :math:`\boldsymbol{\theta} \sim p(\boldsymbol{\theta})`
where :math:`p(\boldsymbol{\theta})` is the spectral density of the kernel
- :math:`\tau \sim \mathcal{U}(0, 2\pi)`
Equivalent to :class:`RandomFourierFeatures` by elementary trigonometric identities.
"""
def build(self, input_shape: ShapeType) -> None:
"""
Creates the variables of the layer.
See `tf.keras.layers.Layer.build()
<https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#build>`_.
"""
self._bias_build(n_components=self.n_components)
super(RandomFourierFeaturesCosine, self).build(input_shape)
def _compute_bases(self, inputs: TensorType) -> tf.Tensor:
"""
Compute basis functions.
:return: A tensor with the shape ``[N, M]``.
"""
return _bases_cosine(inputs, self.W, self.b)
def _compute_constant(self) -> tf.Tensor:
"""
Compute normalizing constant for basis functions.
:return: A tensor with the shape ``[]`` (i.e. a scalar).
"""
return self.rff_constant(self.kernel.variance, output_dim=self.n_components)
class OrthogonalRandomFeatures(RandomFourierFeatures):
r"""
Orthogonal random Fourier features (ORF) :cite:p:`yu2016orthogonal` for more
efficient and accurate kernel approximations than :class:`RandomFourierFeatures`.
"""
| 38.641921 | 100 | 0.659623 | #
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" A kernel's features and coefficients using Random Fourier Features (RFF). """
from typing import Mapping, Optional
import numpy as np
import tensorflow as tf
import gpflow
from gpflow.base import DType, TensorType
from gpflux.layers.basis_functions.fourier_features.base import FourierFeaturesBase
from gpflux.layers.basis_functions.fourier_features.utils import (
ORF_SUPPORTED_KERNELS,
RFF_SUPPORTED_KERNELS,
_bases_concat,
_bases_cosine,
_ceil_divide,
_matern_number,
_sample_chi,
_sample_students_t,
)
from gpflux.types import ShapeType
class RandomFourierFeaturesBase(FourierFeaturesBase):
def __init__(self, kernel: gpflow.kernels.Kernel, n_components: int, **kwargs: Mapping):
assert isinstance(kernel, RFF_SUPPORTED_KERNELS), "Unsupported Kernel"
super(RandomFourierFeaturesBase, self).__init__(kernel, n_components, **kwargs)
def build(self, input_shape: ShapeType) -> None:
"""
Creates the variables of the layer.
See `tf.keras.layers.Layer.build()
<https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#build>`_.
"""
input_dim = input_shape[-1]
self._weights_build(input_dim, n_components=self.n_components)
super(RandomFourierFeaturesBase, self).build(input_shape)
def _weights_build(self, input_dim: int, n_components: int) -> None:
shape = (n_components, input_dim)
self.W = self.add_weight(
name="weights",
trainable=False,
shape=shape,
dtype=self.dtype,
initializer=self._weights_init,
)
def _weights_init(self, shape: TensorType, dtype: Optional[DType] = None) -> TensorType:
if isinstance(self.kernel, gpflow.kernels.SquaredExponential):
return tf.random.normal(shape, dtype=dtype)
else:
p = _matern_number(self.kernel)
nu = 2.0 * p + 1.0 # degrees of freedom
return _sample_students_t(nu, shape, dtype)
@staticmethod
def rff_constant(variance: TensorType, output_dim: int) -> tf.Tensor:
"""
Normalizing constant for random Fourier features.
"""
return tf.sqrt(tf.math.truediv(2.0 * variance, output_dim))
class RandomFourierFeatures(RandomFourierFeaturesBase):
r"""
Random Fourier features (RFF) is a method for approximating kernels. The essential
element of the RFF approach :cite:p:`rahimi2007random` is the realization that Bochner's theorem
for stationary kernels can be approximated by a Monte Carlo sum.
We will approximate the kernel :math:`k(\mathbf{x}, \mathbf{x}')`
by :math:`\Phi(\mathbf{x})^\top \Phi(\mathbf{x}')`
where :math:`\Phi: \mathbb{R}^{D} \to \mathbb{R}^{M}` is a finite-dimensional feature map.
The feature map is defined as:
.. math::
\Phi(\mathbf{x}) = \sqrt{\frac{2 \sigma^2}{\ell}}
\begin{bmatrix}
\cos(\boldsymbol{\theta}_1^\top \mathbf{x}) \\
\sin(\boldsymbol{\theta}_1^\top \mathbf{x}) \\
\vdots \\
\cos(\boldsymbol{\theta}_{\frac{M}{2}}^\top \mathbf{x}) \\
\sin(\boldsymbol{\theta}_{\frac{M}{2}}^\top \mathbf{x})
\end{bmatrix}
where :math:`\sigma^2` is the kernel variance.
The features are parameterised by random weights:
- :math:`\boldsymbol{\theta} \sim p(\boldsymbol{\theta})`
where :math:`p(\boldsymbol{\theta})` is the spectral density of the kernel.
At least for the squared exponential kernel, this variant of the feature
mapping has more desirable theoretical properties than its counterpart form
from phase-shifted cosines :class:`RandomFourierFeaturesCosine` :cite:p:`sutherland2015error`.
"""
def _compute_output_dim(self, input_shape: ShapeType) -> int:
return 2 * self.n_components
def _compute_bases(self, inputs: TensorType) -> tf.Tensor:
"""
Compute basis functions.
:return: A tensor with the shape ``[N, 2M]``.
"""
return _bases_concat(inputs, self.W)
def _compute_constant(self) -> tf.Tensor:
"""
Compute normalizing constant for basis functions.
:return: A tensor with the shape ``[]`` (i.e. a scalar).
"""
return self.rff_constant(self.kernel.variance, output_dim=2 * self.n_components)
class RandomFourierFeaturesCosine(RandomFourierFeaturesBase):
r"""
Random Fourier Features (RFF) is a method for approximating kernels. The essential
element of the RFF approach :cite:p:`rahimi2007random` is the realization that Bochner's theorem
for stationary kernels can be approximated by a Monte Carlo sum.
We will approximate the kernel :math:`k(\mathbf{x}, \mathbf{x}')`
by :math:`\Phi(\mathbf{x})^\top \Phi(\mathbf{x}')` where
:math:`\Phi: \mathbb{R}^{D} \to \mathbb{R}^{M}` is a finite-dimensional feature map.
The feature map is defined as:
.. math::
\Phi(\mathbf{x}) = \sqrt{\frac{2 \sigma^2}{\ell}}
\begin{bmatrix}
\cos(\boldsymbol{\theta}_1^\top \mathbf{x} + \tau) \\
\vdots \\
\cos(\boldsymbol{\theta}_M^\top \mathbf{x} + \tau)
\end{bmatrix}
where :math:`\sigma^2` is the kernel variance.
The features are parameterised by random weights:
- :math:`\boldsymbol{\theta} \sim p(\boldsymbol{\theta})`
where :math:`p(\boldsymbol{\theta})` is the spectral density of the kernel
- :math:`\tau \sim \mathcal{U}(0, 2\pi)`
Equivalent to :class:`RandomFourierFeatures` by elementary trigonometric identities.
"""
def build(self, input_shape: ShapeType) -> None:
"""
Creates the variables of the layer.
See `tf.keras.layers.Layer.build()
<https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#build>`_.
"""
self._bias_build(n_components=self.n_components)
super(RandomFourierFeaturesCosine, self).build(input_shape)
def _bias_build(self, n_components: int) -> None:
shape = (1, n_components)
self.b = self.add_weight(
name="bias",
trainable=False,
shape=shape,
dtype=self.dtype,
initializer=self._bias_init,
)
def _bias_init(self, shape: TensorType, dtype: Optional[DType] = None) -> TensorType:
return tf.random.uniform(shape=shape, maxval=2.0 * np.pi, dtype=dtype)
def _compute_output_dim(self, input_shape: ShapeType) -> int:
return self.n_components
def _compute_bases(self, inputs: TensorType) -> tf.Tensor:
"""
Compute basis functions.
:return: A tensor with the shape ``[N, M]``.
"""
return _bases_cosine(inputs, self.W, self.b)
def _compute_constant(self) -> tf.Tensor:
"""
Compute normalizing constant for basis functions.
:return: A tensor with the shape ``[]`` (i.e. a scalar).
"""
return self.rff_constant(self.kernel.variance, output_dim=self.n_components)
class OrthogonalRandomFeatures(RandomFourierFeatures):
r"""
Orthogonal random Fourier features (ORF) :cite:p:`yu2016orthogonal` for more
efficient and accurate kernel approximations than :class:`RandomFourierFeatures`.
"""
def __init__(self, kernel: gpflow.kernels.Kernel, n_components: int, **kwargs: Mapping):
assert isinstance(kernel, ORF_SUPPORTED_KERNELS), "Unsupported Kernel"
super(OrthogonalRandomFeatures, self).__init__(kernel, n_components, **kwargs)
def _weights_init(self, shape: TensorType, dtype: Optional[DType] = None) -> TensorType:
n_components, input_dim = shape # M, D
n_reps = _ceil_divide(n_components, input_dim) # K, smallest integer s.t. K*D >= M
W = tf.random.normal(shape=(n_reps, input_dim, input_dim), dtype=dtype)
Q, _ = tf.linalg.qr(W) # throw away R; shape [K, D, D]
s = _sample_chi(nu=input_dim, shape=(n_reps, input_dim), dtype=dtype) # shape [K, D]
U = tf.expand_dims(s, axis=-1) * Q # equiv: S @ Q where S = diag(s); shape [K, D, D]
V = tf.reshape(U, shape=(-1, input_dim)) # shape [K*D, D]
return V[: self.n_components] # shape [M, D] (throw away K*D - M rows)
| 2,355 | 777 | 185 |
647e8700e2ff520ca30c729150b246177b55fa27 | 3,319 | py | Python | utils/error_rates.py | grieggs/Ge-ez-HWR | 03481f4b24d2c3355d1ff99c2b48671b397ca949 | [
"MIT"
] | null | null | null | utils/error_rates.py | grieggs/Ge-ez-HWR | 03481f4b24d2c3355d1ff99c2b48671b397ca949 | [
"MIT"
] | null | null | null | utils/error_rates.py | grieggs/Ge-ez-HWR | 03481f4b24d2c3355d1ff99c2b48671b397ca949 | [
"MIT"
] | null | null | null | import editdistance
import re
| 28.86087 | 54 | 0.292859 | import editdistance
import re
def g_families(in_str):
families = {"hoy" : ["ሀ","ሁ","ሂ","ሃ","ሄ","ህ","ሆ"],
"lawe":["ለ","ሉ","ሊ","ላ","ሌ","ል","ሎ","ሏ"],
"hawt" : ["ሐ","ሑ","ሒ","ሓ","ሔ","ሕ","ሖ","ሗ"],
"may" : ["መ","ሙ","ሚ","ማ","ሜ","ም","ሞ","ሟ","ፙ"],
"sawt" : ["ሠ","ሡ","ሢ","ሣ","ሤ","ሥ","ሦ","ሧ"],
"res" : ["ረ","ሩ","ሪ","ራ","ሬ","ር","ሮ","ሯ","ፘ"],
"sat" : ["ሰ","ሱ","ሲ","ሳ","ሴ","ስ","ሶ","ሷ"],
"caf" : ["ቀ","ቁ","ቂ","ቃ","ቄ","ቅ","ቆ","ቋ"],
"bet" : ["በ","ቡ","ቢ","ባ","ቤ","ብ","ቦ","ቧ"],
"tawe" : ["ተ","ቱ","ቲ","ታ","ቴ","ት","ቶ","ቷ"],
"harm" : ["ኀ","ኁ","ኂ","ኃ","ኄ","ኅ","ኆ","ኋ"],
"nahas" : ["ነ","ኑ","ኒ","ና","ኔ","ን","ኖ","ኗ"],
"alf" : ["አ","ኡ","ኢ","ኣ","ኤ","እ","ኦ","ኧ"],
"kaf" : ["ከ","ኩ","ኪ","ካ","ኬ","ክ","ኮ","ኳ"],
"wawe" : ["ወ","ዉ","ዊ","ዋ","ዌ","ው","ዎ"],
"ayn" : ["ዐ","ዑ","ዒ","ዓ","ዔ","ዕ","ዖ"],
"zay" : ["ዘ","ዙ","ዚ","ዛ","ዜ","ዝ","ዞ","ዟ"],
"yaman" : ["የ","ዩ","ዪ","ያ","ዬ","ይ","ዮ"],
"dant" : ["ደ","ዱ","ዲ","ዳ","ዴ","ድ","ዶ","ዷ"],
"gaml" : ["ገ","ጉ","ጊ","ጋ","ጌ","ግ","ጎ","ጓ"],
"tayt" : ["ጠ","ጡ","ጢ","ጣ","ጤ","ጥ","ጦ","ጧ"],
"payt" : ["ጰ","ጱ","ጲ","ጳ","ጴ","ጵ","ጶ","ጷ"],
"saday" : ["ጸ","ጹ","ጺ","ጻ","ጼ","ጽ","ጾ","ጿ"],
"sappa" : ["ፀ","ፁ","ፂ","ፃ","ፄ","ፅ","ፆ"],
"af" : ["ፈ","ፉ","ፊ","ፋ","ፌ","ፍ","ፎ","ፏ","ፚ"],
"psa" : ["ፐ","ፑ","ፒ","ፓ","ፔ","ፕ","ፖ","ፗ"],
"cw" : ["ቈ","ቊ","ቋ","ቌ","ቍ"],
"hw" : ["ኈ","ኊ","ኋ","ኌ","ኍ"],
"kw" : ["ኰ","ኲ","ኳ","ኴ","ኵ"],
"gw" : ["ጐ","ጒ","ጓ","ጔ","ጕ "]}
replace = {"hoy": "0",
"lawe": "1",
"hawt": "2",
"may": "3",
"sawt": "4",
"res": "5",
"sat": "6",
"caf": "7",
"bet": "8",
"tawe": "9",
"harm": "q",
"nahas": "w",
"alf": "e",
"kaf": "r",
"wawe": "t",
"ayn": "y",
"zay": "u",
"yaman": "i",
"dant": "o",
"gaml": "p",
"tayt": "a",
"payt": "s",
"saday": "d",
"sappa": "f",
"af": "g",
"psa": "h",
"cw": "j",
"hw": "k",
"kw": "l",
"gw": "z"}
# print(len(families))
out = ""
other = "፼፵፴(፪)፱፲ጕ፻ ፰፳፡ዠ፯፺ሽ:ሸ፸ሻ፶።፩.፬፣]፷፫፹፭፮"
for x in in_str:
good = False
for y in families:
if x in families[y]:
out += replace[y]
good = True
if not good:
out += x
return out
def fcer(r,h):
r = g_families(r)
h = g_families(h)
r = u' '.join(r.split())
h = u' '.join(h.split())
return err(r, h)
def cer(r, h):
#Remove any double or trailing
# r = r.lower()
# h = h.lower()
# r = re.sub(r'([^\s\w]|_)+', '', r)
# h = re.sub(r'([^\s\w]|_)+', '', h)
r = u' '.join(r.split())
h = u' '.join(h.split())
return err(r, h)
def err(r, h):
dis = editdistance.eval(r, h)
if len(r) == 0.0:
return len(h)
return float(dis) / float(len(r))
def wer(r, h):
# r = r.lower()
# h = h.lower()
# r = re.sub(r'([^\s\w]|_)+', '', r)
# h = re.sub(r'([^\s\w]|_)+', '', h)
r = r.split()
h = h.split()
return err(r,h)
| 3,679 | 0 | 115 |
39a07c09f6024565b3147c8504611623c2108b19 | 75 | py | Python | tests/__init__.py | Purg/SMQTK-Indexing | 24b5f875ec01a93f1c4842381a6de88041166604 | [
"BSD-3-Clause"
] | 82 | 2015-01-07T15:33:29.000Z | 2021-08-11T18:34:05.000Z | tests/__init__.py | Purg/SMQTK-Indexing | 24b5f875ec01a93f1c4842381a6de88041166604 | [
"BSD-3-Clause"
] | 230 | 2015-04-08T14:36:51.000Z | 2022-03-14T17:55:30.000Z | tests/__init__.py | Purg/SMQTK-Indexing | 24b5f875ec01a93f1c4842381a6de88041166604 | [
"BSD-3-Clause"
] | 65 | 2015-01-04T15:00:16.000Z | 2021-11-19T18:09:11.000Z | import os
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
| 18.75 | 63 | 0.746667 | import os
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
| 0 | 0 | 0 |
b0bb9262cf421e6c6ef21d8135a4ed13a6df9c0c | 2,484 | py | Python | Chapter07/Ch7.AmazonSP.py | AcornPublishing/keras-projects | 1a8486a375af3bacf9aa78e93c9fc1736ac16d52 | [
"MIT"
] | null | null | null | Chapter07/Ch7.AmazonSP.py | AcornPublishing/keras-projects | 1a8486a375af3bacf9aa78e93c9fc1736ac16d52 | [
"MIT"
] | null | null | null | Chapter07/Ch7.AmazonSP.py | AcornPublishing/keras-projects | 1a8486a375af3bacf9aa78e93c9fc1736ac16d52 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(3)
Data = pd.read_csv('AMZN.csv',header=0, usecols=['Date',
'Close'],parse_dates=True,index_col='Date')
print(Data.info())
print(Data.head())
print(Data.describe())
plt.figure(figsize=(10,5))
plt.plot(Data)
plt.show()
DataPCh = Data.pct_change()
LogReturns = np.log(1 + DataPCh)
print(LogReturns.tail(10))
plt.figure(figsize=(10,5))
plt.plot(LogReturns)
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
DataScaled = scaler.fit_transform(Data)
TrainLen = int(len(DataScaled) * 0.70)
TestLen = len(DataScaled) - TrainLen
TrainData = DataScaled[0:TrainLen,:]
TestData = DataScaled[TrainLen:len(DataScaled),:]
print(len(TrainData), len(TestData))
TimeStep = 1
TrainX, TrainY = DatasetCreation(TrainData, TimeStep)
TestX, TestY = DatasetCreation(TestData, TimeStep)
TrainX = np.reshape(TrainX, (TrainX.shape[0], 1, TrainX.shape[1]))
TestX = np.reshape(TestX, (TestX.shape[0], 1, TestX.shape[1]))
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from tensorflow import set_random_seed
set_random_seed(3)
model = Sequential()
model.add(LSTM(256, input_shape=(1, TimeStep)))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam',metrics=['accuracy'])
model.fit(TrainX, TrainY, epochs=10, batch_size=1, verbose=1)
model.summary()
score = model.evaluate(TrainX, TrainY, verbose=0)
print('Keras Model Loss = ',score[0])
print('Keras Model Accuracy = ',score[1])
TrainPred = model.predict(TrainX)
TestPred = model.predict(TestX)
TrainPred = scaler.inverse_transform(TrainPred)
TrainY = scaler.inverse_transform([TrainY])
TestPred = scaler.inverse_transform(TestPred)
TestY = scaler.inverse_transform([TestY])
TrainPredictPlot = np.empty_like(DataScaled)
TrainPredictPlot[:, :] = np.nan
TrainPredictPlot[1:len(TrainPred)+1, :] = TrainPred
TestPredictPlot = np.empty_like(DataScaled)
TestPredictPlot[:, :] = np.nan
TestPredictPlot[len(TrainPred)+(1*2)+1:len(DataScaled)-1, :] = TestPred
plt.plot(scaler.inverse_transform(DataScaled))
plt.plot(TrainPredictPlot)
plt.plot(TestPredictPlot)
plt.show()
| 25.875 | 78 | 0.73591 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(3)
Data = pd.read_csv('AMZN.csv',header=0, usecols=['Date',
'Close'],parse_dates=True,index_col='Date')
print(Data.info())
print(Data.head())
print(Data.describe())
plt.figure(figsize=(10,5))
plt.plot(Data)
plt.show()
DataPCh = Data.pct_change()
LogReturns = np.log(1 + DataPCh)
print(LogReturns.tail(10))
plt.figure(figsize=(10,5))
plt.plot(LogReturns)
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
DataScaled = scaler.fit_transform(Data)
TrainLen = int(len(DataScaled) * 0.70)
TestLen = len(DataScaled) - TrainLen
TrainData = DataScaled[0:TrainLen,:]
TestData = DataScaled[TrainLen:len(DataScaled),:]
print(len(TrainData), len(TestData))
def DatasetCreation(dataset, TimeStep=1):
DataX, DataY = [], []
for i in range(len(dataset)- TimeStep -1):
a = dataset[i:(i+ TimeStep), 0]
DataX.append(a)
DataY.append(dataset[i + TimeStep, 0])
return np.array(DataX), np.array(DataY)
TimeStep = 1
TrainX, TrainY = DatasetCreation(TrainData, TimeStep)
TestX, TestY = DatasetCreation(TestData, TimeStep)
TrainX = np.reshape(TrainX, (TrainX.shape[0], 1, TrainX.shape[1]))
TestX = np.reshape(TestX, (TestX.shape[0], 1, TestX.shape[1]))
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from tensorflow import set_random_seed
set_random_seed(3)
model = Sequential()
model.add(LSTM(256, input_shape=(1, TimeStep)))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam',metrics=['accuracy'])
model.fit(TrainX, TrainY, epochs=10, batch_size=1, verbose=1)
model.summary()
score = model.evaluate(TrainX, TrainY, verbose=0)
print('Keras Model Loss = ',score[0])
print('Keras Model Accuracy = ',score[1])
TrainPred = model.predict(TrainX)
TestPred = model.predict(TestX)
TrainPred = scaler.inverse_transform(TrainPred)
TrainY = scaler.inverse_transform([TrainY])
TestPred = scaler.inverse_transform(TestPred)
TestY = scaler.inverse_transform([TestY])
TrainPredictPlot = np.empty_like(DataScaled)
TrainPredictPlot[:, :] = np.nan
TrainPredictPlot[1:len(TrainPred)+1, :] = TrainPred
TestPredictPlot = np.empty_like(DataScaled)
TestPredictPlot[:, :] = np.nan
TestPredictPlot[len(TrainPred)+(1*2)+1:len(DataScaled)-1, :] = TestPred
plt.plot(scaler.inverse_transform(DataScaled))
plt.plot(TrainPredictPlot)
plt.plot(TestPredictPlot)
plt.show()
| 260 | 0 | 23 |
7f50763a80caa2b617b0e496c76ddba132736dd1 | 752 | py | Python | tpu_main.py | shfshf/seq2annotation | d4bf88a869631b43fa2974c2ffa1c5dd6a7623ed | [
"Apache-2.0"
] | 90 | 2018-11-29T07:05:16.000Z | 2021-11-22T11:32:58.000Z | tpu_main.py | shfshf/seq2annotation | d4bf88a869631b43fa2974c2ffa1c5dd6a7623ed | [
"Apache-2.0"
] | 50 | 2019-06-27T07:11:18.000Z | 2022-02-10T00:01:02.000Z | tpu_main.py | lanSeFangZhou/seq2annotation | a824520d46f0b3d70268fae422976a5ce1b3f4ce | [
"Apache-2.0"
] | 23 | 2019-01-03T14:57:15.000Z | 2022-03-08T07:50:33.000Z | from seq2annotation.trainer.train_model import train_model
from seq2annotation.algorithms.BiLSTM_CRF_model import BilstmCrfModel
from seq2annotation.algorithms.IDCNN_CRF_model import IdcnnCrfModel
# train_model(data_dir='./data', result_dir='./result', model_fn=IdcnnCrfModel.model_fn, **IdcnnCrfModel.default_params())
result = train_model(
data_dir='./data', result_dir='./results',
train_spec={'max_steps': None},
hook={
'stop_if_no_increase': {
'min_steps': 100,
'run_every_secs': 60,
'max_steps_without_increase': 10000
}
},
use_gpu=True,
tpu_config={
'tpu_name': 'u1mail2me',
},
model=BilstmCrfModel, **BilstmCrfModel.default_params()
)
print(result)
| 30.08 | 122 | 0.692819 | from seq2annotation.trainer.train_model import train_model
from seq2annotation.algorithms.BiLSTM_CRF_model import BilstmCrfModel
from seq2annotation.algorithms.IDCNN_CRF_model import IdcnnCrfModel
# train_model(data_dir='./data', result_dir='./result', model_fn=IdcnnCrfModel.model_fn, **IdcnnCrfModel.default_params())
result = train_model(
data_dir='./data', result_dir='./results',
train_spec={'max_steps': None},
hook={
'stop_if_no_increase': {
'min_steps': 100,
'run_every_secs': 60,
'max_steps_without_increase': 10000
}
},
use_gpu=True,
tpu_config={
'tpu_name': 'u1mail2me',
},
model=BilstmCrfModel, **BilstmCrfModel.default_params()
)
print(result)
| 0 | 0 | 0 |
b1de41b00c9777f9ef77d27cf37128daac1c1eae | 2,301 | py | Python | src/m6_your_turtles.py | jasminescott18/01-IntroductionToPython | ab3daadd9be0651cc42fff6323647b067c15b134 | [
"MIT"
] | null | null | null | src/m6_your_turtles.py | jasminescott18/01-IntroductionToPython | ab3daadd9be0651cc42fff6323647b067c15b134 | [
"MIT"
] | null | null | null | src/m6_your_turtles.py | jasminescott18/01-IntroductionToPython | ab3daadd9be0651cc42fff6323647b067c15b134 | [
"MIT"
] | null | null | null | """
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,
their colleagues and Jasmine Scott
"""
###############################################################################
# COMPLETED: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
###############################################################################
###############################################################################
# COMPLETED: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
###############################################################################
import rosegraphics as rg
window = rg.TurtleWindow()
son_goku = rg.SimpleTurtle('arrow')
son_goku.pen = rg.Pen('orange',5)
son_goku.speed = 2
for k in range(3):
son_goku.forward(100)
son_goku.pen_up()
son_goku.right(90)
son_goku.forward(50)
son_goku.pen_down()
son_goku.right(90)
son_goku.forward(100)
son_goku.pen_up()
son_goku.left(90)
son_goku.forward(50)
son_goku.left(90)
son_goku.pen_down()
prince_vegeta = rg.SimpleTurtle('arrow')
prince_vegeta.pen = rg.Pen('blue',5)
prince_vegeta.speed = 2
prince_vegeta.right(90)
prince_vegeta.pen_up()
prince_vegeta.forward(25)
for k in range(3):
prince_vegeta.pen_down()
prince_vegeta.left(90)
prince_vegeta.forward(100)
prince_vegeta.pen_up()
prince_vegeta.right(90)
prince_vegeta.forward(50)
prince_vegeta.pen_down()
prince_vegeta.right(90)
prince_vegeta.forward(100)
prince_vegeta.pen_up()
prince_vegeta.left(90)
prince_vegeta.forward(50)
window.close_on_mouse_click() | 30.68 | 79 | 0.611039 | """
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,
their colleagues and Jasmine Scott
"""
###############################################################################
# COMPLETED: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
###############################################################################
###############################################################################
# COMPLETED: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
###############################################################################
import rosegraphics as rg
window = rg.TurtleWindow()
son_goku = rg.SimpleTurtle('arrow')
son_goku.pen = rg.Pen('orange',5)
son_goku.speed = 2
for k in range(3):
son_goku.forward(100)
son_goku.pen_up()
son_goku.right(90)
son_goku.forward(50)
son_goku.pen_down()
son_goku.right(90)
son_goku.forward(100)
son_goku.pen_up()
son_goku.left(90)
son_goku.forward(50)
son_goku.left(90)
son_goku.pen_down()
prince_vegeta = rg.SimpleTurtle('arrow')
prince_vegeta.pen = rg.Pen('blue',5)
prince_vegeta.speed = 2
prince_vegeta.right(90)
prince_vegeta.pen_up()
prince_vegeta.forward(25)
for k in range(3):
prince_vegeta.pen_down()
prince_vegeta.left(90)
prince_vegeta.forward(100)
prince_vegeta.pen_up()
prince_vegeta.right(90)
prince_vegeta.forward(50)
prince_vegeta.pen_down()
prince_vegeta.right(90)
prince_vegeta.forward(100)
prince_vegeta.pen_up()
prince_vegeta.left(90)
prince_vegeta.forward(50)
window.close_on_mouse_click() | 0 | 0 | 0 |
6a2930c078c01007b8afaabbbd8918a486f730c7 | 363 | py | Python | 1_PythonDataProcessing/4_18_read_csv_parse_dates.py | hnwarid/DQLabAcademy | e03d82f97536ae103b6abc65db0ae16520fb68c7 | [
"MIT"
] | null | null | null | 1_PythonDataProcessing/4_18_read_csv_parse_dates.py | hnwarid/DQLabAcademy | e03d82f97536ae103b6abc65db0ae16520fb68c7 | [
"MIT"
] | null | null | null | 1_PythonDataProcessing/4_18_read_csv_parse_dates.py | hnwarid/DQLabAcademy | e03d82f97536ae103b6abc65db0ae16520fb68c7 | [
"MIT"
] | null | null | null | import pandas as pd
# Load dataset https://storage.googleapis.com/dqlab-dataset/LO4/global_air_quality_4000rows.csv
gaq = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/LO4/global_air_quality_4000rows.csv', parse_dates=True, index_col='timestamp')
# Cetak 5 data teratas
print(gaq.head())
# Cetak info dari dataframe gaq
print('info')
print(gaq.info()) | 45.375 | 142 | 0.793388 | import pandas as pd
# Load dataset https://storage.googleapis.com/dqlab-dataset/LO4/global_air_quality_4000rows.csv
gaq = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/LO4/global_air_quality_4000rows.csv', parse_dates=True, index_col='timestamp')
# Cetak 5 data teratas
print(gaq.head())
# Cetak info dari dataframe gaq
print('info')
print(gaq.info()) | 0 | 0 | 0 |
fcb53cd4210d941389826d9bb52da3658b50b8ce | 7,714 | py | Python | bugbane/tools/send/dd_api/official_customized.py | gardatech/bugbane | b19a2c28732697ce7fd277f4256d14c307900678 | [
"Apache-2.0"
] | 9 | 2022-02-14T11:21:06.000Z | 2022-03-21T22:06:06.000Z | bugbane/tools/send/dd_api/official_customized.py | gardatech/bugbane | b19a2c28732697ce7fd277f4256d14c307900678 | [
"Apache-2.0"
] | 4 | 2022-02-21T09:45:27.000Z | 2022-03-14T14:09:52.000Z | bugbane/tools/send/dd_api/official_customized.py | gardatech/bugbane | b19a2c28732697ce7fd277f4256d14c307900678 | [
"Apache-2.0"
] | 1 | 2022-03-14T13:56:37.000Z | 2022-03-14T13:56:37.000Z | # Copyright 2022 Garda Technologies, LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Originally written by Valery Korolyov <fuzzah@tuta.io>
# Partially overwrites original class DefectDojoAPI in
# defectdojo_api library which is licensed under the MIT License
# For more details on defectdojo_api visit https://github.com/DefectDojo/defectdojo_api
import json
import requests
from defectdojo_api.defectdojo_apiv2 import DefectDojoAPIv2
from .abc import DefectDojoAPIError, DefectDojoResponse
from .factory import DefectDojoAPIFactory
from .official import DefectDojoAPI_official
@DefectDojoAPIFactory.register("official_customized")
| 38.378109 | 87 | 0.525668 | # Copyright 2022 Garda Technologies, LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Originally written by Valery Korolyov <fuzzah@tuta.io>
# Partially overwrites original class DefectDojoAPI in
# defectdojo_api library which is licensed under the MIT License
# For more details on defectdojo_api visit https://github.com/DefectDojo/defectdojo_api
import json
import requests
from defectdojo_api.defectdojo_apiv2 import DefectDojoAPIv2
from .abc import DefectDojoAPIError, DefectDojoResponse
from .factory import DefectDojoAPIFactory
from .official import DefectDojoAPI_official
@DefectDojoAPIFactory.register("official_customized")
class DefectDojoAPI_official_customized(DefectDojoAPI_official):
def instantiate_underlying_api(self):
self.api = CustomDefectDojoAPIv2(
self.host,
self.user_token,
self.user_name,
debug=self.debug,
verify_ssl=self.verify_ssl,
api_version="v2",
)
class CustomDefectDojoAPIv2(DefectDojoAPIv2):
def _request(self, method, url, params=None, data=None, files=None):
"""Common handler for all HTTP requests."""
if not params:
params = {}
headers = {
"User-Agent": self.user_agent,
"Authorization": (
("ApiKey " + self.user + ":" + self.api_token)
if (self.api_version == "v1")
else ("Token " + self.api_token)
),
}
# if data:
# data = json.dumps(data)
if not files:
headers["Accept"] = "application/json"
headers["Content-Type"] = "application/json"
# custom change: make data json only if there were no files
if data:
data = json.dumps(data)
if self.proxies:
proxies = self.proxies
else:
proxies = {}
try:
self.logger.debug("request:")
self.logger.debug(method + " " + url)
self.logger.debug("headers: " + str(headers))
self.logger.debug("params:" + str(params))
self.logger.debug("data:" + str(data))
self.logger.debug("files:" + str(files))
response = requests.request(
method=method,
url=self.host + url,
params=params,
data=data,
files=files,
headers=headers,
timeout=self.timeout,
verify=self.verify_ssl,
cert=self.cert,
proxies=proxies,
)
self.logger.debug("response:")
self.logger.debug(response.status_code)
self.logger.debug(response.text)
try:
if response.status_code == 201: # Created new object
try:
object_id = response.headers["Location"].split("/")
key_id = object_id[-2]
data = int(key_id)
except:
data = response.json()
return DefectDojoResponse(
message="Upload complete",
response_code=response.status_code,
data=data,
success=True,
)
elif response.status_code == 204: # Object updates
return DefectDojoResponse(
message="Object updated.",
response_code=response.status_code,
success=True,
)
elif response.status_code == 400: # Object not created
return DefectDojoResponse(
message="Error occured in API.",
response_code=response.status_code,
success=False,
data=response.text,
)
elif response.status_code == 404: # Object not created
return DefectDojoResponse(
message="Object id does not exist.",
response_code=response.status_code,
success=False,
data=response.text,
)
elif response.status_code == 401:
return DefectDojoResponse(
message="Unauthorized.",
response_code=response.status_code,
success=False,
data=response.text,
)
elif response.status_code == 414:
return DefectDojoResponse(
message="Request-URI Too Large.",
response_code=response.status_code,
success=False,
)
elif response.status_code == 500:
return DefectDojoResponse(
message="An error 500 occured in the API.",
response_code=response.status_code,
success=False,
data=response.text,
)
else:
data = response.json()
return DefectDojoResponse(
message="Success",
data=data,
success=True,
response_code=response.status_code,
)
except ValueError:
return DefectDojoResponse(
message="JSON response could not be decoded.",
response_code=response.status_code,
success=False,
data=response.text,
)
except requests.exceptions.SSLError:
self.logger.warning("An SSL error occurred.")
return DefectDojoResponse(
message="An SSL error occurred.",
response_code=response.status_code,
success=False,
)
except requests.exceptions.ConnectionError:
self.logger.warning("A connection error occurred.")
return DefectDojoResponse(
message="A connection error occurred.",
response_code=response.status_code,
success=False,
)
except requests.exceptions.Timeout:
self.logger.warning("The request timed out")
return DefectDojoResponse(
message="The request timed out after "
+ str(self.timeout)
+ " seconds.",
response_code=response.status_code,
success=False,
)
except requests.exceptions.RequestException as e:
self.logger.warning("There was an error while handling the request.")
self.logger.exception(e)
return DefectDojoResponse(
message="There was an error while handling the request.",
response_code=response.status_code,
success=False,
)
| 248 | 6,227 | 71 |
4e35d75032ed2762b5cce0ddc64ea4ee177879df | 10,527 | py | Python | codes/dgmpm_stability/2Dcomparison_random.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2021-06-18T14:52:03.000Z | 2021-06-18T14:52:03.000Z | codes/dgmpm_stability/2Dcomparison_random.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2019-01-07T13:11:11.000Z | 2019-01-07T13:11:11.000Z | codes/dgmpm_stability/2Dcomparison_random.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import numpy as np
from scipy import optimize
from sympy import *
import matplotlib.pyplot as plt
import random
import pdb
import os
# Symbolic function to evaluate shape functions
shape_functions=lambda x,y: np.array([(1.-x)*(1.-y)/4.,(1.+x)*(1.-y)/4.,(1.+x)*(1.+y)/4.,(1.-x)*(1.+y)/4.])
grad_xi=lambda y:np.array([-(1.-y)/4.,(1.-y)/4.,(1.+y)/4.,-(1.+y)/4.])
grad_eta=lambda x:np.array([-(1.-x)/4.,-(1.+x)/4.,(1.+x)/4.,(1.-x)/4.])
# shapes=| N1(Xp1) N1(Xp2) ... N1(XNp) |
# | N2(Xp1) N2(Xp2) ... N2(XNp) |
# | N3(Xp1) N3(Xp2) ... N3(XNp) |
# | N4(Xp1) N4(Xp2) ... N4(XNp) |
# grad_z=| N1_z(Xp1) N1_z(Xp2) ... N1_z(XNp) |
# | N2_z(Xp1) N2_z(Xp2) ... N2_z(XNp) |
# | N3_z(Xp1) N3_z(Xp2) ... N3_z(XNp) |
# | N4_z(Xp1) N4_z(Xp2) ... N4_z(XNp) |
# where Ni(Xj) is the shape function of node i evaluated at the jth particles position
# samples=20
# cx=np.linspace(2.,80.,samples)
# cy=cx[0]
cx=2.
cy=2.
dx=2.
samples=1000
number_left = Rand(1, 4, samples)
position_left = RandPosition(number_left)
number_bott = Rand(1, 4, samples)
position_bott = RandPosition(number_bott)
number_curr = Rand(1, 4, samples)
position_curr = RandPosition(number_curr)
number_botle = Rand(1, 4, samples)
position_botle = RandPosition(number_botle)
if not os.path.exists('dcuRandom.npy'):
dcuSolution=[]
dcuSolution_id=[]
ctuSolution=[]
ctuSolution_id=[]
for i in range(samples):
print "Computing critical CFL for sample ",i,": ",number_curr[i]," particles"
solution_dcu=[]
solution_dcu_id=[]
solution_ctu=[]
solution_ctu_id=[]
for k in range(number_curr[i]):
# if number_curr[i]<number_prev[i] :
# print "Attention ca va merder !!!!!!"
# else:
# print "Ca va le faire..."
XL = position_left[i][:,0] ; YL = position_left[i][:,1]
XB = position_bott[i][:,0] ; YB = position_bott[i][:,1]
XBL = position_botle[i][:,0] ; YBL = position_botle[i][:,1]
XC = position_curr[i][:,0] ; YC = position_curr[i][:,1]
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XB,YB),(XL,YL))
solution_dcu.append(gridSearch(res,dx,cx))
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XC,YC),(XC,YC))
solution_dcu_id.append(gridSearch(res,dx,cx))
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XB,YB),(XL,YL),(XBL,YBL))
solution_ctu.append(gridSearch(res,dx,cx))
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XC,YC),(XC,YC),(XC,YC))
solution_ctu_id.append(gridSearch(res,dx,cx))
dcuSolution.append(min(solution_dcu))
dcuSolution_id.append(min(solution_dcu_id))
ctuSolution.append(min(solution_ctu))
ctuSolution_id.append(min(solution_ctu_id))
np.save('dcuRandom.npy',dcuSolution)
np.save('dcuRandom_id.npy',dcuSolution_id)
np.save('ctuRandom.npy',ctuSolution)
np.save('ctuRandom_id.npy',ctuSolution_id)
else :
dcuSolution=np.load('dcuRandom.npy')
dcuSolution_id=np.load('dcuRandom_id.npy')
ctuSolution=np.load('ctuRandom.npy')
ctuSolution_id=np.load('ctuRandom_id.npy')
import statistics
plt.figure()
plt.hist(dcuSolution,bins='auto',color='blue')
plt.grid()
plt.figure()
plt.hist(dcuSolution_id,bins='auto',color='red')
plt.grid()
plt.show()
pdb.set_trace()
plt.figure()
plt.hist(ctuSolution,bins='auto',color='blue')
plt.grid()
plt.figure()
plt.hist(ctuSolution_id,bins='auto',color='red')
plt.grid()
plt.show()
| 37.066901 | 158 | 0.588582 | #!/usr/bin/python
import numpy as np
from scipy import optimize
from sympy import *
import matplotlib.pyplot as plt
import random
import pdb
import os
def export2DTeXFile(fileName,xField,fields,*kwargs):
TeXFile=open(fileName,"w")
n_fields = np.shape(fields)[0]
n_labels = np.shape(kwargs)[0]
# Define Paul Tol's colors (purple to red)
color=['Blue','Red','Green','Red','black','black','black']
marker=['+','x','star','+','none','none','none']
size=['very thick','very thick','very thick','very thick','thin','thin',]
line=['solid','solid','dashed','dashed']
TeXFile.write(r'\begin{tikzpicture}[scale=0.5]')
TeXFile.write('\n')
TeXFile.write(r'\begin{axis}[xlabel=$s_1/s_2$,ymajorgrids=true,xmajorgrids=true,xmin=1,xmax=41,xtick={1,10,20,30,40}]')
TeXFile.write('\n')
TeXFile.write('%%%%%%%%%%% NATURAL CONFIGURATION')
TeXFile.write('\n')
#pdb.set_trace()
for i in range(np.shape(fields)[0]):
TeXFile.write(r'\addplot['+str(color[i])+',mark='+str(marker[i])+',very thick,mark size=5pt] coordinates {')
for j in range(len(fields[i,:])):
TeXFile.write('('+str(xField[j])+','+str(fields[i,j])+') ')
TeXFile.write('};\n')
if i==0:
TeXFile.write('%%%%%%%%%%% MODIFIED CONFIGURATION')
TeXFile.write('\n')
TeXFile.write(r'\end{axis}')
TeXFile.write('\n')
TeXFile.write('\end{tikzpicture}')
TeXFile.write('\n')
TeXFile.write('%%% Local Variables:')
TeXFile.write('\n')
TeXFile.write('%%% mode: latex')
TeXFile.write('\n')
TeXFile.write('%%% TeX-master: "../../mainManuscript"')
TeXFile.write('\n')
TeXFile.write('%%% End:')
TeXFile.write('\n')
TeXFile.close()
# Symbolic function to evaluate shape functions
shape_functions=lambda x,y: np.array([(1.-x)*(1.-y)/4.,(1.+x)*(1.-y)/4.,(1.+x)*(1.+y)/4.,(1.-x)*(1.+y)/4.])
grad_xi=lambda y:np.array([-(1.-y)/4.,(1.-y)/4.,(1.+y)/4.,-(1.+y)/4.])
grad_eta=lambda x:np.array([-(1.-x)/4.,-(1.+x)/4.,(1.+x)/4.,(1.-x)/4.])
# shapes=| N1(Xp1) N1(Xp2) ... N1(XNp) |
# | N2(Xp1) N2(Xp2) ... N2(XNp) |
# | N3(Xp1) N3(Xp2) ... N3(XNp) |
# | N4(Xp1) N4(Xp2) ... N4(XNp) |
# grad_z=| N1_z(Xp1) N1_z(Xp2) ... N1_z(XNp) |
# | N2_z(Xp1) N2_z(Xp2) ... N2_z(XNp) |
# | N3_z(Xp1) N3_z(Xp2) ... N3_z(XNp) |
# | N4_z(Xp1) N4_z(Xp2) ... N4_z(XNp) |
# where Ni(Xj) is the shape function of node i evaluated at the jth particles position
def symbolResidual(point,dx,cx,cy,XC,XB,XL,XBL=0):
transverse=True
if XBL==0: transverse=False
shapesC=shape_functions(XC[0],XC[1])
dSxi_C=grad_xi(XC[1])
dSeta_C=grad_eta(XC[0])
shapesB=shape_functions(XB[0],XB[1])
dSxi_B=grad_xi(XB[1])
dSeta_B=grad_eta(XB[0])
shapesL=shape_functions(XL[0],XL[1])
dSxi_L=grad_xi(XL[1])
dSeta_L=grad_eta(XL[0])
## Number of material points in cells
NmpC=len(XC[0])
NmpL=len(XL[0])
NmpB=len(XB[0])
if XBL!=0:
shapesBL=shape_functions(XBL[0],XBL[1])
dSxi_BL=grad_xi(XBL[1])
dSeta_BL=grad_eta(XBL[0])
NmpBL=len(XBL[0])
else:
NmpBL=0
dt = symbols('dt')
## sum_i^K = np.sum(shapesK[i,:]) with cell K and node i
## shape functions evaluated at edges centers to weight fluxes contributions
## o -- 3 -- o
## | |
## 4 2
## | |
## o -- 1 -- o
shapeOnEdge=shape_functions(np.array([0.,1.,0.,-1.]),np.array([-1.,0.,1.,0.]))
## Define the normal to edges
Nx=np.array([0.,1.,0.,-1.])
Ny=np.array([-1.,0.,1.,0.])
Nnodes=4
Nedges=4
Res=0.
for P in range(NmpC):
## Contributions of material points sharing the same cell
D_PI=0.
for i in range(Nnodes):
# 0th-order contributions
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
D_PI+=wheightings*shapesC[i,P]
# 1st-order contributions
for j in range(Nnodes):
D_PI+=2.*dt*wheightings*(shapesC[j,P]/np.sum(shapesC[j,:]))*(cx*np.dot(dSxi_C[i,:],shapesC[j,:])/dx + cy*np.dot(dSeta_C[i,:],shapesC[j,:])/dx)
# Contributions of edges 2 and 3
#pdb.set_trace()
D_PI-=0.5*(dt/dx)*wheightings*shapeOnEdge[i,1]*NmpC*cx*(shapesC[1,P]/np.sum(shapesC[1,:])+shapesC[2,P]/np.sum(shapesC[2,:]))
D_PI-=0.5*(dt/dx)*wheightings*shapeOnEdge[i,2]*NmpC*cy*(shapesC[2,P]/np.sum(shapesC[2,:])+shapesC[3,P]/np.sum(shapesC[3,:]))
# Transverse contributions
if transverse:
D_PI+= 0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,1]*NmpC*cx*cy*(shapesC[0,P]/np.sum(shapesC[0,:])+shapesC[1,P]/np.sum(shapesC[1,:]))
D_PI+= 0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,2]*NmpC*cx*cy*(shapesC[0,P]/np.sum(shapesC[0,:])+shapesC[3,P]/np.sum(shapesC[3,:]))
Res+=np.abs(D_PI)
## Contributions of material points of left cell
for P in range(NmpL):
D_PI=0.
for i in range(Nnodes):
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
## edge 4 contribution
D_PI+= 0.5*(dt/dx)*wheightings*shapeOnEdge[i,3]*NmpC*cx*(shapesL[1,P]/np.sum(shapesL[1,:])+shapesL[2,P]/np.sum(shapesL[2,:]))
if transverse:
D_PI-=0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,3]*NmpC*cx*cy*(shapesL[0,P]/np.sum(shapesL[0,:])+shapesL[1,P]/np.sum(shapesL[1,:]))
## edge 3 contribution
D_PI-=0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,2]*NmpC*cy*cx*(shapesL[1,P]/np.sum(shapesL[1,:])+shapesL[2,P]/np.sum(shapesL[2,:]))
Res+=np.abs(D_PI)
## Contributions of material points of bottom cell
for P in range(NmpB):
D_PI=0.
for i in range(Nnodes):
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
## edge 1 contribution
D_PI+= 0.5*(dt/dx)*wheightings*shapeOnEdge[i,0]*NmpC*cy*(shapesB[2,P]/np.sum(shapesB[2,:])+shapesB[3,P]/np.sum(shapesB[3,:]))
if transverse:
D_PI-=0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,0]*NmpC*cy*cx*(shapesB[0,P]/np.sum(shapesB[0,:])+shapesB[3,P]/np.sum(shapesB[3,:]))
## edge 2 contribution
D_PI-=0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,1]*NmpC*cx*cy*(shapesB[2,P]/np.sum(shapesB[2,:])+shapesB[3,P]/np.sum(shapesB[3,:]))
Res+=np.abs(D_PI)
## Contributions of material points of bottom-left cell
for P in range(NmpBL):
D_PI=0.
for i in range(Nnodes):
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
## edge 1 contribution
D_PI+=0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,0]*NmpC*cy*cx*(shapesBL[1,P]/np.sum(shapesBL[1,:])+shapesBL[2,P]/np.sum(shapesBL[2,:]))
## edge 4 contribution
D_PI+=0.25*(dt/dx)**2*wheightings*shapeOnEdge[i,3]*NmpC*cx*cy*(shapesBL[2,P]/np.sum(shapesBL[2,:])+shapesBL[3,P]/np.sum(shapesBL[3,:]))
Res+=np.abs(D_PI)
Residual = lambdify((dt),Res-1.)
return Residual
def gridSearch(function,dx,cx,tol=1.e-2):
samples=10000
# Find the bigest root of the residual by grid search algorithm
CFL=np.linspace(0.,1.,samples)
for i in range(samples):
value=CFL[-1-i]
a0=function(value*dx/cx)
if a0<tol:
return value
else:
continue
return 0.
def Rand(start, end, num):
res = []
for j in range(num):
res.append(random.randint(start, end))
return np.asarray(res)
def RandPosition(numberOfPoints):
res=[]
for nPoints in(numberOfPoints):
position=np.zeros((nPoints,2))
for i in range(nPoints):
position[i,0]=random.uniform(-1., 1.)
position[i,1]=random.uniform(-1., 1.)
res.append(position)
return res
# samples=20
# cx=np.linspace(2.,80.,samples)
# cy=cx[0]
cx=2.
cy=2.
dx=2.
samples=1000
number_left = Rand(1, 4, samples)
position_left = RandPosition(number_left)
number_bott = Rand(1, 4, samples)
position_bott = RandPosition(number_bott)
number_curr = Rand(1, 4, samples)
position_curr = RandPosition(number_curr)
number_botle = Rand(1, 4, samples)
position_botle = RandPosition(number_botle)
if not os.path.exists('dcuRandom.npy'):
dcuSolution=[]
dcuSolution_id=[]
ctuSolution=[]
ctuSolution_id=[]
for i in range(samples):
print "Computing critical CFL for sample ",i,": ",number_curr[i]," particles"
solution_dcu=[]
solution_dcu_id=[]
solution_ctu=[]
solution_ctu_id=[]
for k in range(number_curr[i]):
# if number_curr[i]<number_prev[i] :
# print "Attention ca va merder !!!!!!"
# else:
# print "Ca va le faire..."
XL = position_left[i][:,0] ; YL = position_left[i][:,1]
XB = position_bott[i][:,0] ; YB = position_bott[i][:,1]
XBL = position_botle[i][:,0] ; YBL = position_botle[i][:,1]
XC = position_curr[i][:,0] ; YC = position_curr[i][:,1]
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XB,YB),(XL,YL))
solution_dcu.append(gridSearch(res,dx,cx))
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XC,YC),(XC,YC))
solution_dcu_id.append(gridSearch(res,dx,cx))
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XB,YB),(XL,YL),(XBL,YBL))
solution_ctu.append(gridSearch(res,dx,cx))
res=symbolResidual(k,dx,cx,cy,(XC,YC),(XC,YC),(XC,YC),(XC,YC))
solution_ctu_id.append(gridSearch(res,dx,cx))
dcuSolution.append(min(solution_dcu))
dcuSolution_id.append(min(solution_dcu_id))
ctuSolution.append(min(solution_ctu))
ctuSolution_id.append(min(solution_ctu_id))
np.save('dcuRandom.npy',dcuSolution)
np.save('dcuRandom_id.npy',dcuSolution_id)
np.save('ctuRandom.npy',ctuSolution)
np.save('ctuRandom_id.npy',ctuSolution_id)
else :
dcuSolution=np.load('dcuRandom.npy')
dcuSolution_id=np.load('dcuRandom_id.npy')
ctuSolution=np.load('ctuRandom.npy')
ctuSolution_id=np.load('ctuRandom_id.npy')
import statistics
plt.figure()
plt.hist(dcuSolution,bins='auto',color='blue')
plt.grid()
plt.figure()
plt.hist(dcuSolution_id,bins='auto',color='red')
plt.grid()
plt.show()
pdb.set_trace()
plt.figure()
plt.hist(ctuSolution,bins='auto',color='blue')
plt.grid()
plt.figure()
plt.hist(ctuSolution_id,bins='auto',color='red')
plt.grid()
plt.show()
| 6,837 | 0 | 115 |
8e8a2eb679aeb3ae22f2f3709ecb4bbc7bc8cd46 | 3,261 | py | Python | src/test/conftest.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
] | null | null | null | src/test/conftest.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
] | null | null | null | src/test/conftest.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
] | null | null | null | # pylint: disable=redefined-outer-name,no-member
import json
import os
import shutil
import subprocess
import time
import pytest
from parse import parse
from sqlalchemy import create_engine
from alembic_utils.testbase import TEST_VERSIONS_ROOT, reset_event_listener_registry
PYTEST_DB = "postgresql://alem_user:password@localhost:5680/alem_db"
@pytest.fixture(scope="session")
def maybe_start_pg() -> None:
"""Creates a postgres 12 docker container that can be connected
to using the PYTEST_DB connection string"""
container_name = "alembic_utils_pg"
image = "postgres:12"
connection_template = "postgresql://{user}:{pw}@{host}:{port:d}/{db}"
conn_args = parse(connection_template, PYTEST_DB)
# Don't attempt to instantiate a container if
# we're on CI
if "GITHUB_SHA" in os.environ:
yield
return
try:
is_running = (
subprocess.check_output(
["docker", "inspect", "-f", "{{.State.Running}}", container_name]
)
.decode()
.strip()
== "true"
)
except subprocess.CalledProcessError:
# Can't inspect container if it isn't running
is_running = False
if is_running:
yield
return
subprocess.call(
[
"docker",
"run",
"--rm",
"--name",
container_name,
"-p",
f"{conn_args['port']}:5432",
"-d",
"-e",
f"POSTGRES_DB={conn_args['db']}",
"-e",
f"POSTGRES_PASSWORD={conn_args['pw']}",
"-e",
f"POSTGRES_USER={conn_args['user']}",
"--health-cmd",
"pg_isready",
"--health-interval",
"3s",
"--health-timeout",
"3s",
"--health-retries",
"15",
image,
]
)
# Wait for postgres to become healthy
for _ in range(10):
out = subprocess.check_output(["docker", "inspect", container_name])
inspect_info = json.loads(out)[0]
health_status = inspect_info["State"]["Health"]["Status"]
if health_status == "healthy":
break
else:
time.sleep(1)
else:
raise Exception("Could not reach postgres comtainer. Check docker installation")
yield
# subprocess.call(["docker", "stop", container_name])
return
@pytest.fixture(scope="session")
def raw_engine(maybe_start_pg: None):
"""sqlalchemy engine fixture"""
eng = create_engine(PYTEST_DB)
yield eng
eng.dispose()
@pytest.fixture(scope="function")
def engine(raw_engine):
"""Engine that has been reset between tests"""
run_cleaners()
yield raw_engine
run_cleaners()
| 26.950413 | 88 | 0.592763 | # pylint: disable=redefined-outer-name,no-member
import json
import os
import shutil
import subprocess
import time
import pytest
from parse import parse
from sqlalchemy import create_engine
from alembic_utils.testbase import TEST_VERSIONS_ROOT, reset_event_listener_registry
PYTEST_DB = "postgresql://alem_user:password@localhost:5680/alem_db"
@pytest.fixture(scope="session")
def maybe_start_pg() -> None:
"""Creates a postgres 12 docker container that can be connected
to using the PYTEST_DB connection string"""
container_name = "alembic_utils_pg"
image = "postgres:12"
connection_template = "postgresql://{user}:{pw}@{host}:{port:d}/{db}"
conn_args = parse(connection_template, PYTEST_DB)
# Don't attempt to instantiate a container if
# we're on CI
if "GITHUB_SHA" in os.environ:
yield
return
try:
is_running = (
subprocess.check_output(
["docker", "inspect", "-f", "{{.State.Running}}", container_name]
)
.decode()
.strip()
== "true"
)
except subprocess.CalledProcessError:
# Can't inspect container if it isn't running
is_running = False
if is_running:
yield
return
subprocess.call(
[
"docker",
"run",
"--rm",
"--name",
container_name,
"-p",
f"{conn_args['port']}:5432",
"-d",
"-e",
f"POSTGRES_DB={conn_args['db']}",
"-e",
f"POSTGRES_PASSWORD={conn_args['pw']}",
"-e",
f"POSTGRES_USER={conn_args['user']}",
"--health-cmd",
"pg_isready",
"--health-interval",
"3s",
"--health-timeout",
"3s",
"--health-retries",
"15",
image,
]
)
# Wait for postgres to become healthy
for _ in range(10):
out = subprocess.check_output(["docker", "inspect", container_name])
inspect_info = json.loads(out)[0]
health_status = inspect_info["State"]["Health"]["Status"]
if health_status == "healthy":
break
else:
time.sleep(1)
else:
raise Exception("Could not reach postgres comtainer. Check docker installation")
yield
# subprocess.call(["docker", "stop", container_name])
return
@pytest.fixture(scope="session")
def raw_engine(maybe_start_pg: None):
"""sqlalchemy engine fixture"""
eng = create_engine(PYTEST_DB)
yield eng
eng.dispose()
@pytest.fixture(scope="function")
def engine(raw_engine):
"""Engine that has been reset between tests"""
def run_cleaners():
reset_event_listener_registry()
raw_engine.execute("drop schema public cascade; create schema public;")
raw_engine.execute('drop schema if exists "DEV" cascade; create schema "DEV";')
# Remove any migrations that were left behind
TEST_VERSIONS_ROOT.mkdir(exist_ok=True, parents=True)
shutil.rmtree(TEST_VERSIONS_ROOT)
TEST_VERSIONS_ROOT.mkdir(exist_ok=True, parents=True)
run_cleaners()
yield raw_engine
run_cleaners()
| 426 | 0 | 27 |
dff4f076230460df34341907153dd1c94b54a8a1 | 239 | py | Python | env/script/python_console.py | ZhuoZhuoCrayon/AcousticKeyBoard-Web | 0a0ead78aec7ed03898fd51e076aa57df966508c | [
"MIT"
] | null | null | null | env/script/python_console.py | ZhuoZhuoCrayon/AcousticKeyBoard-Web | 0a0ead78aec7ed03898fd51e076aa57df966508c | [
"MIT"
] | null | null | null | env/script/python_console.py | ZhuoZhuoCrayon/AcousticKeyBoard-Web | 0a0ead78aec7ed03898fd51e076aa57df966508c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import dotenv
# 打印系统信息
print("Python %s on %s" % (sys.version, sys.platform))
sys.path.extend([WORKING_DIR_AND_PYTHON_PATHS])
# 导入环境变量
dotenv.load_dotenv(dotenv_path=PROJECT_ROOT + "/env/dc_dev.env")
| 18.384615 | 64 | 0.719665 | # -*- coding: utf-8 -*-
import sys
import dotenv
# 打印系统信息
print("Python %s on %s" % (sys.version, sys.platform))
sys.path.extend([WORKING_DIR_AND_PYTHON_PATHS])
# 导入环境变量
dotenv.load_dotenv(dotenv_path=PROJECT_ROOT + "/env/dc_dev.env")
| 0 | 0 | 0 |
d765d8e2258ab69e90c67075674e072f5706b65f | 1,295 | py | Python | repoman/signature.py | jsoriano/python-repoman | 308c141ce7177238c70f78facf1fc2642cf485aa | [
"Apache-2.0"
] | 6 | 2015-08-10T09:42:55.000Z | 2021-11-08T10:26:02.000Z | repoman/signature.py | jsoriano/python-repoman | 308c141ce7177238c70f78facf1fc2642cf485aa | [
"Apache-2.0"
] | 11 | 2017-08-28T17:38:24.000Z | 2019-05-31T12:49:31.000Z | repoman/signature.py | jsoriano/python-repoman | 308c141ce7177238c70f78facf1fc2642cf485aa | [
"Apache-2.0"
] | 7 | 2015-02-14T16:15:41.000Z | 2021-09-29T09:53:26.000Z | #!/usr/bin/env python
#
# Copyright 2014 Tuenti Technologies S.L.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import socket
| 26.428571 | 74 | 0.649421 | #!/usr/bin/env python
#
# Copyright 2014 Tuenti Technologies S.L.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import socket
class Signature(dict):
@property
def user(self):
if 'user' in self:
return self['user']
return getpass.getuser()
@property
def email(self):
if 'email' in self:
return self['email']
return "%s@%s" % (self.user, socket.gethostname())
@property
def author(self):
if 'author' in self:
return self['author']
return self.user
@property
def author_email(self):
if 'author_email' in self:
return self['author_email']
return self.email
def __str__(self):
return "%s <%s>" % (self.user, self.email)
| 436 | 191 | 23 |
f0e08aab4dd3fdb3c99ddb4779230554bbc7482d | 12,546 | py | Python | app/api/tests/test_pool.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | app/api/tests/test_pool.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | app/api/tests/test_pool.py | snakrani/discovery | 99690f186a194cabef6a5d1ad18fca715be1e187 | [
"CC0-1.0"
] | null | null | null | from django.test import tag
from test import cases as case
from test import fixtures as data
@tag('pool')
| 40.340836 | 198 | 0.403794 | from django.test import tag
from test import cases as case
from test import fixtures as data
@tag('pool')
class PoolTest(case.APITestCase, metaclass = case.MetaAPISchema):
fixtures = data.get_category_fixtures()
schema = {
'object': {
'tags': ('pool_object',),
'&HCATS_1': ('name', 'iexact', 'HCATS Pool 1'),
'&BMO_4': ('name', 'iexact', 'Electrical Maintenance'),
'&OASIS_SB_4': ('name', 'iexact', 'Scientific Research and Development'),
'#345': (),
'#ABCDEFG': ()
},
'ordering': {
'tags': ('pool_ordering',),
'fields': ('id', 'name', 'number', 'threshold', 'vehicle__id', 'vehicle__name')
},
'pagination': {
'tags': ('pool_pagination',),
'@no_args': {},
'!page': {'page': 15},
'@count': {'count': 3},
'@mixed': {'page': 2, 'count': 3}
},
'search': {
'tags': ('pool_search',),
'*search1': ('id', 'exact', 'BMO_SB_3'),
'@search2': ('number', 'regex', '2')
},
'fields': {
'id': {
'tags': ('pool_field', 'token_text'),
'*exact': 'BMO_SB_10',
'*iexact': 'hcaTs_Sb_2',
'@in': ("BMO_8", "OASIS_4", "HCATS_SB_1")
},
'name': {
'tags': ('pool_field', 'fuzzy_text'),
'@exact': 'Elevator Maintenance',
'@iexact': 'janitoRial',
'@in': ("Roofing Services", "Plumbing and Pipefitting"),
'@contains': 'Waste',
'@icontains': 'energy engineering',
'@startswith': 'HVAC',
'@istartswith': 'hvac',
'@endswith': 'Maintenance',
'@iendswith': 'dEVelopment',
'@regex': '\d+$',
'@iregex': 'air.*development$'
},
'number': {
'tags': ('pool_field', 'token_text'),
'@exact': '8',
'@iexact': '9',
'@in': ('1', '3', '5B', '16')
},
'threshold': {
'tags': ('pool_field', 'fuzzy_text'),
'@exact': '$15 million',
'@iexact': '$7.5 MILLION',
'@in': ("1000 employee", "$18 million", "500 employee"),
'@contains': 'employee',
'@icontains': 'EmplOYeE',
'@startswith': '$38.5',
'@istartswith': '$38.5',
'@endswith': 'million',
'@iendswith': 'MillIon',
'@regex': '^\d+\s+',
'@iregex': '(500 EMPLOYEE|MILLION)'
},
'vehicle__id': {
'tags': ('pool_field', 'vehicle_field', 'token_text'),
'@exact': 'BMO_SB',
'@iexact': 'hcaTs_Sb',
'@in': ("BMO", "OASIS", "HCATS_SB")
},
'vehicle__name': {
'tags': ('pool_field', 'vehicle_field', 'fuzzy_text'),
'@exact': 'HCATS Small Business',
'@iexact': 'hcats small business',
'@in': ("BMO Small Business", "OASIS Unrestricted"),
'@contains': 'OASIS',
'@icontains': 'bmo',
'@startswith': 'HCATS',
'@istartswith': 'hcats',
'@endswith': 'Business',
'@iendswith': 'unrestricted',
'@regex': 'Prof.*$',
'@iregex': 'prof.*$'
},
'vehicle__tier__number': {
'tags': ('pool_field', 'vehicle_field', 'tier_field', 'number'),
'@exact': 3,
'@lt': 3,
'@lte': 2,
'@gt': 2,
'@gte': 2,
'@range': (2, 3),
'@in': (1, 2, 3)
},
'vehicle__tier__name': {
'tags': ('pool_field', 'vehicle_field', 'tier_field', 'fuzzy_text'),
'@exact': 'Multi-Agency Solutions',
'@iexact': 'multi-agency solutions',
'@in': ("Multi-Agency Solutions", "Best-in-Class (BIC)"),
'@contains': 'Agency',
'@icontains': 'agency',
'@startswith': 'Multi',
'@istartswith': 'multi',
'@endswith': 'Solutions',
'@iendswith': 'solutions',
'@regex': 'Best-in-Class.*$',
'@iregex': '(multi|class)'
},
'vehicle__poc': {
'tags': ('pool_field', 'vehicle_field', 'fuzzy_text'),
'@exact': 'oasissb@gsa.gov',
'@iexact': 'OASIS@GSA.GOV',
'@in': ("oasissb@gsa.gov", "sbhcats@gsa.gov", "fssi.bmo@gsa.gov"),
'@contains': 'professionalservices',
'@icontains': 'ProfessionalServices',
'@startswith': 'oasis',
'@istartswith': 'OASIS',
'@endswith': 'gsa.gov',
'@iendswith': 'GSA.GOV',
'@regex': '\.gov$',
'@iregex': '(OASIS|HCATS)'
},
'vehicle__ordering_guide': {
'tags': ('pool_field', 'vehicle_field', 'fuzzy_text'),
'@exact': 'https://www.gsa.gov/cdnstatic/CONSOLIDATED_OASIS_U_SB_Ordering_Guide_8-15-2018.pdf',
'@iexact': 'https://WWW.GSA.GOV/cdnstatic/CONSOLIDATED_OASIS_U_SB_Ordering_Guide_8-15-2018.pdf',
'@in': ("https://www.gsa.gov/cdnstatic/CONSOLIDATED_OASIS_U_SB_Ordering_Guide_8-15-2018.pdf", "https://www.gsa.gov/cdnstatic/General_Supplies__Services/Ordering%20Guide%20V5_0.pdf"),
'@contains': 'OASIS',
'@icontains': 'oasis',
'@startswith': 'https',
'@istartswith': 'HTTPS',
'@endswith': 'pdf',
'@iendswith': 'PDF',
'@regex': '(OASIS|HCaTS)',
'@iregex': '(oasis|hcats)'
},
'vehicle__small_business': {
'tags': ('pool_field', 'vehicle_field', 'boolean'),
'[1]@exact': True,
'[2]@exact': False,
},
'vehicle__numeric_pool': {
'tags': ('pool_field', 'vehicle_field', 'boolean'),
'[1]@exact': True,
'[2]@exact': False,
},
'vehicle__display_number': {
'tags': ('pool_field', 'vehicle_field', 'boolean'),
'[1]@exact': True,
'[2]@exact': False,
},
'naics__code': {
'tags': ('pool_field', 'naics_field', 'fuzzy_text'),
'@exact': '541330',
'@iexact': '561710',
'@in': ("541711", "238290", "561730"),
'@contains': '622',
'@icontains': '622',
'@startswith': '54',
'@istartswith': '2382',
'@endswith': '30',
'@iendswith': '30',
'@regex': '^54\d+0$',
'@iregex': '^(23|56)'
},
'naics__description': {
'tags': ('pool_field', 'naics_field', 'fuzzy_text'),
'@exact': 'Outdoor Advertising',
'@iexact': 'meDIA representatives',
'@in': ("Payroll Services", "Commissioning Services", "Testing Laboratories"),
'@contains': 'Accounting',
'@icontains': 'heating',
'@startswith': 'Engineering',
'@istartswith': 'r',
'@endswith': 'Services',
'@iendswith': 'advertIsing',
'@regex': 'Services$',
'@iregex': 'environment(al)?'
},
'naics__sin__code': {
'tags': ('pool_field', 'naics_field', 'sin_field', 'fuzzy_text'),
'@exact': '100-03',
'@iexact': 'c871-202',
'@in': ("100-03", "520-14", "541-4G", "51-B36-2A"),
'@contains': '4B',
'@icontains': '-4b',
'@startswith': '51',
'@istartswith': 'c132',
'@endswith': '03',
'@iendswith': '2a',
'@regex': '[A-Z]\d+\-\d+$',
'@iregex': '^(C87|51)'
},
'psc__code': {
'tags': ('pool_field', 'psc_field', 'fuzzy_text'),
'@exact': 'R413',
'@iexact': 'r413',
'@in': ("S202", "Z1DZ", "R413"),
'@contains': 'R4',
'@icontains': 'r4',
'@startswith': 'R',
'@istartswith': 'r',
'@endswith': '06',
'@iendswith': '06',
'@regex': '[^\d]+$',
'@iregex': '^(r|s)'
},
'psc__description': {
'tags': ('pool_field', 'psc_field', 'fuzzy_text'),
'@exact': 'Advertising Services',
'@iexact': 'advertising services',
'@in': ("Advertising Services", "Aircraft Components / Accessories"),
'@contains': 'Services',
'@icontains': 'services',
'@startswith': 'Logistics',
'@istartswith': 'logisticS',
'@endswith': 'Services',
'@iendswith': 'SERVICES',
'@regex': '[/]+',
'@iregex': '^air(craft)?'
},
'psc__sin__code': {
'tags': ('pool_field', 'psc_field', 'sin_field', 'fuzzy_text'),
'@exact': '520-19',
'@iexact': 'c871-202',
'@in': ("100-03", "520-14", "541-4G", "51-B36-2A"),
'@contains': '1-5',
'@icontains': 'c54',
'@startswith': '51',
'@istartswith': 'c5',
'@endswith': 'C',
'@iendswith': 'c',
'@regex': '[A-Z]\d+\-\d+$',
'@iregex': '^(C87|51)'
},
'keywords__id': {
'tags': ('pool_field', 'keyword_field', 'number'),
'@exact': 54,
'@lt': 500,
'@lte': 500,
'@gt': 500,
'@gte': 500,
'@range': (100, 300),
'@in': (43, 3, 54)
},
'keywords__parent__id': {
'tags': ('pool_field', 'keyword_field', 'number'),
'@exact': 43,
'@lt': 500,
'@lte': 500,
'@gt': 500,
'@gte': 500,
'@range': (100, 300),
'@in': (43, 326, 568)
},
'keywords__name': {
'tags': ('pool_field', 'keyword_field', 'fuzzy_text'),
'@exact': 'Disaster Management',
'@iexact': 'disaster MANAGEMENT',
'@in': ("Inventory Management", "Disaster Management"),
'@contains': 'Processing',
'@icontains': 'processing',
'@startswith': 'Integrated',
'@istartswith': 'INTEGRATED',
'@endswith': 'Services',
'@iendswith': 'services',
'@regex': '[/]+',
'@iregex': 'clearing(house)'
},
'keywords__calc': {
'tags': ('pool_field', 'keyword_field', 'fuzzy_text'),
'@exact': 'Logistician',
'@iexact': 'logisticIAN',
'@in': ("Clerk", "Logistician"),
'@contains': 'Res',
'@icontains': 'res',
'@startswith': 'Consult',
'@istartswith': 'consult',
'@endswith': 'Analyst',
'@iendswith': 'analyst',
'@regex': '(Business|Data)\s+Analyst',
'@iregex': '^(business|data)'
}
}
}
def initialize(self):
self.router = 'pools'
def validate_object(self, resp, base_key = []):
resp.is_not_empty(base_key + ['id'])
resp.is_not_empty(base_key + ['name'])
resp.is_not_empty(base_key + ['number'])
resp.is_not_empty(base_key + ['vehicle'])
#resp.is_not_empty(base_key + ['threshold'])
resp.is_not_empty(base_key + ['naics'])
#resp.is_not_empty(base_key + ['psc'])
| 395 | 12,020 | 22 |
575da1aa4f49004db0d40f9f3a5e6c15334882ed | 272 | py | Python | base/fork_test.py | victor999999/play_python | e3d777a3c7f000206ac69765bf27d3f38812e274 | [
"MIT"
] | null | null | null | base/fork_test.py | victor999999/play_python | e3d777a3c7f000206ac69765bf27d3f38812e274 | [
"MIT"
] | null | null | null | base/fork_test.py | victor999999/play_python | e3d777a3c7f000206ac69765bf27d3f38812e274 | [
"MIT"
] | null | null | null | import os
from time import sleep
print('***********************')
a = 1
pid = os.fork()
if pid < 0:
print("创建进程失败")
elif pid == 0:
print('这是新的进程')
print("a =",a)
a = 10000
else:
sleep(1)
print("这是原有进程")
print("psarent a =",a)
print("演示完毕")
| 12.952381 | 32 | 0.5 | import os
from time import sleep
print('***********************')
a = 1
pid = os.fork()
if pid < 0:
print("创建进程失败")
elif pid == 0:
print('这是新的进程')
print("a =",a)
a = 10000
else:
sleep(1)
print("这是原有进程")
print("psarent a =",a)
print("演示完毕")
| 0 | 0 | 0 |
cc5ce244934e2fbe53fde388a5e411e3b23b1335 | 567 | py | Python | pyglet-gui-master/tests/runtests.py | jorvasquezr/IA_ChessGame_Solver | 976e8098feb53bc033a8a7b11475e4d5405db56b | [
"MIT"
] | 52 | 2015-04-18T20:45:52.000Z | 2021-11-21T14:50:10.000Z | pyglet-gui-master/tests/runtests.py | jorvasquezr/IA_ChessGame_Solver | 976e8098feb53bc033a8a7b11475e4d5405db56b | [
"MIT"
] | 8 | 2015-06-14T19:35:55.000Z | 2018-06-29T13:52:28.000Z | tests/runtests.py | jorgecarleitao/pyglet-gui | 20ec4b335c9af3698dfa8328894544d4d0417973 | [
"BSD-3-Clause"
] | 21 | 2015-07-22T16:21:11.000Z | 2021-09-23T09:37:43.000Z | import glob
import os
import unittest
import sys
if __name__ == "__main__":
suite = build_test_suite()
runner = unittest.TextTestRunner()
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
| 23.625 | 72 | 0.659612 | import glob
import os
import unittest
import sys
def build_test_suite():
suite = unittest.TestSuite()
for test_case in glob.glob('tests/test_*.py'):
modname = os.path.splitext(test_case)[0]
modname = modname.replace('/', '.')
module = __import__(modname, {}, {}, ['1'])
suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
return suite
if __name__ == "__main__":
suite = build_test_suite()
runner = unittest.TextTestRunner()
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
| 323 | 0 | 23 |
6a770b59826299201618e3ab24fef6c1130fc5fa | 3,159 | py | Python | builder/frameworks/arduino/arduino-common.py | Niruyi/platform-senseboxsam | 32617df06332b0631609c043a5ba0703e96fda9e | [
"Apache-2.0"
] | null | null | null | builder/frameworks/arduino/arduino-common.py | Niruyi/platform-senseboxsam | 32617df06332b0631609c043a5ba0703e96fda9e | [
"Apache-2.0"
] | null | null | null | builder/frameworks/arduino/arduino-common.py | Niruyi/platform-senseboxsam | 32617df06332b0631609c043a5ba0703e96fda9e | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Arduino
Arduino Wiring-based Framework allows writing cross-platform software to
control devices attached to a wide range of Arduino boards to create all
kinds of creative coding, interactive objects, spaces or physical experiences.
http://arduino.cc/en/Reference/HomePage
"""
import os
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
framework_package = "framework-arduino-sensebox"
if board.get("build.core", "").lower() != "arduino":
framework_package += "-%s" % board.get("build.core").lower()
FRAMEWORK_DIR = platform.get_package_dir(framework_package)
assert os.path.isdir(FRAMEWORK_DIR)
env.Append(
ASFLAGS=["-x", "assembler-with-cpp"],
CFLAGS=[
"-std=gnu11"
],
CCFLAGS=[
"-Os", # optimize for size
"-ffunction-sections", # place each function in its own section
"-fdata-sections",
"-Wall",
"-mcpu=%s" % board.get("build.cpu"),
"-mthumb",
"-nostdlib",
"--param", "max-inline-insns-single=500"
],
CXXFLAGS=[
"-fno-rtti",
"-fno-exceptions",
"-std=gnu++11",
"-fno-threadsafe-statics"
],
CPPDEFINES=[
("ARDUINO", 10805),
("F_CPU", "$BOARD_F_CPU"),
"USBCON"
],
LIBSOURCE_DIRS=[
os.path.join(FRAMEWORK_DIR, "libraries")
],
LINKFLAGS=[
"-Os",
"-mcpu=%s" % board.get("build.cpu"),
"-mthumb",
"-Wl,--gc-sections",
"-Wl,--check-sections",
"-Wl,--unresolved-symbols=report-all",
"-Wl,--warn-common",
"-Wl,--warn-section-align"
],
LIBS=["m"]
)
variants_dir = os.path.join(
"$PROJECT_DIR", board.get("build.variants_dir")) if board.get(
"build.variants_dir", "") else os.path.join(FRAMEWORK_DIR, "variants")
if not board.get("build.ldscript", ""):
env.Append(
LIBPATH=[
os.path.join(variants_dir, board.get("build.variant"), "linker_scripts", "gcc")
]
)
env.Replace(
LDSCRIPT_PATH=board.get("build.arduino.ldscript", "")
)
if "build.usb_product" in board:
env.Append(
CPPDEFINES=[
("USB_VID", board.get("build.hwids")[0][0]),
("USB_PID", board.get("build.hwids")[0][1]),
("USB_PRODUCT", '\\"%s\\"' %
board.get("build.usb_product", "").replace('"', "")),
("USB_MANUFACTURER", '\\"%s\\"' %
board.get("vendor", "").replace('"', ""))
]
)
| 27.710526 | 91 | 0.608737 | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Arduino
Arduino Wiring-based Framework allows writing cross-platform software to
control devices attached to a wide range of Arduino boards to create all
kinds of creative coding, interactive objects, spaces or physical experiences.
http://arduino.cc/en/Reference/HomePage
"""
import os
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
framework_package = "framework-arduino-sensebox"
if board.get("build.core", "").lower() != "arduino":
framework_package += "-%s" % board.get("build.core").lower()
FRAMEWORK_DIR = platform.get_package_dir(framework_package)
assert os.path.isdir(FRAMEWORK_DIR)
env.Append(
ASFLAGS=["-x", "assembler-with-cpp"],
CFLAGS=[
"-std=gnu11"
],
CCFLAGS=[
"-Os", # optimize for size
"-ffunction-sections", # place each function in its own section
"-fdata-sections",
"-Wall",
"-mcpu=%s" % board.get("build.cpu"),
"-mthumb",
"-nostdlib",
"--param", "max-inline-insns-single=500"
],
CXXFLAGS=[
"-fno-rtti",
"-fno-exceptions",
"-std=gnu++11",
"-fno-threadsafe-statics"
],
CPPDEFINES=[
("ARDUINO", 10805),
("F_CPU", "$BOARD_F_CPU"),
"USBCON"
],
LIBSOURCE_DIRS=[
os.path.join(FRAMEWORK_DIR, "libraries")
],
LINKFLAGS=[
"-Os",
"-mcpu=%s" % board.get("build.cpu"),
"-mthumb",
"-Wl,--gc-sections",
"-Wl,--check-sections",
"-Wl,--unresolved-symbols=report-all",
"-Wl,--warn-common",
"-Wl,--warn-section-align"
],
LIBS=["m"]
)
variants_dir = os.path.join(
"$PROJECT_DIR", board.get("build.variants_dir")) if board.get(
"build.variants_dir", "") else os.path.join(FRAMEWORK_DIR, "variants")
if not board.get("build.ldscript", ""):
env.Append(
LIBPATH=[
os.path.join(variants_dir, board.get("build.variant"), "linker_scripts", "gcc")
]
)
env.Replace(
LDSCRIPT_PATH=board.get("build.arduino.ldscript", "")
)
if "build.usb_product" in board:
env.Append(
CPPDEFINES=[
("USB_VID", board.get("build.hwids")[0][0]),
("USB_PID", board.get("build.hwids")[0][1]),
("USB_PRODUCT", '\\"%s\\"' %
board.get("build.usb_product", "").replace('"', "")),
("USB_MANUFACTURER", '\\"%s\\"' %
board.get("vendor", "").replace('"', ""))
]
)
| 0 | 0 | 0 |
1756d64f7ac98d743a36980cccd531b6a44525f4 | 4,170 | py | Python | Data_Production/full_LHCO_wrangler.py | pwinslow/Lepton-Number-Violation-at-100-TeV | e697142e8e1222a423d1e7bd1ea1e65d1b6f94b8 | [
"MIT"
] | null | null | null | Data_Production/full_LHCO_wrangler.py | pwinslow/Lepton-Number-Violation-at-100-TeV | e697142e8e1222a423d1e7bd1ea1e65d1b6f94b8 | [
"MIT"
] | null | null | null | Data_Production/full_LHCO_wrangler.py | pwinslow/Lepton-Number-Violation-at-100-TeV | e697142e8e1222a423d1e7bd1ea1e65d1b6f94b8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#####################################################################################################################################
# #
# The purpose of this script is to collect data generated from parallel computation of parton level cross sections into one #
# dat file. This file should contain the average matched cross section (after PYTHIA) and all events over all runs. The script also #
# creates a repository to store this dat file and all root files for sharing with collaborators. #
# #
#####################################################################################################################################
# Imports
import os.path
import sys
import re
import numpy as np
# Define the background to collect results for
BG = 'jjWZ'
# Create an event repository for the results
repoBase = '/fdata/hepx/store/user/pwinslow/' + BG + '_Results/'
if os.path.isdir(repoBase) == True:
sys.exit('Repository already exists...')
else:
os.system('mkdir ' + repoBase)
# Loop through MG5 run folders and populate the repository with the corresponding pythia log files and delphes root + lhco files
print '\nPopulating event repository...'
for run in range(1,21,1):
# Define path to run files
EventBase = '/fdata/hepx/store/user/pwinslow/MGRecord/100TeV_LNV_Results/DiBoson/' + BG + 'BG/job{0}/MG5_aMC_v2_3_3/'.format(run) + BG + 'BG_100TeV/Events/'
# Copy relevant files to event repository
os.system('cp ' + EventBase + 'pythia_output.log' + ' ' + repoBase + 'pythia_output_job{0}.log'.format(run))
os.system('cp ' + EventBase + 'delphes_events.root' + ' ' + repoBase + 'delphes_events_job{0}.root'.format(run))
os.system('cp ' + EventBase + 'delphes_events.lhco' + ' ' + repoBase + 'delphes_events_job{0}.lhco'.format(run))
print 'Done populating repository.'
# Enter event repository
os.chdir(repoBase)
# Open a dat file to hold the full set of amalgamated events and averaged matched cross section information
print 'Amalgamating full LHCO events...'
with open('full_' + BG + '_lhco_events.dat', 'w') as full_event_file:
# Create list to store all matched cross sections
sigma_list = []
# Loop through all MG5 run folders and extract the average matched cross section
for arg in range(1,21,1):
# Define pythia file
pythia_file = 'pythia_output_job{0}.log'.format(arg)
# Check if pythia file exists
if os.path.isfile(pythia_file) == False:
print 'File not found...'
# Open pythia log file and extract the matched cross section, saving them all to a single list
with open(pythia_file, 'r+') as File:
sigma_string = File.readlines()[-1]
sigma = float(re.findall("-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?", sigma_string)[0])
sigma_list.append(sigma)
# Write the average of all the matched cross sections to the dat file
full_event_file.write('Average matched cross section (pb): {0}\n'.format(np.mean(sigma_list)))
# Indicate beginning of event info
full_event_file.write('Begin event output...\n\n')
# Include header info for events
full_event_file.write(' # typ eta phi pt jmas ntrk btag had/em dum1 dum2\n')
# Loop through all MG5 runs again, this time extracting all events from all delphes event files
for run in range(1,21,1):
# Define delphes file
delphes_file = 'delphes_events_job{0}.lhco'.format(run)
# Check if delphes file exists
if os.path.isfile(delphes_file) == False:
print 'File not found...'
# Open delphes file and read in all events
with open(delphes_file, 'r+') as File:
delphes_events = File.readlines()
# While skipping header info, parse all events, printing each event separated by a line with a single 0
line = 1
while line < len(delphes_events):
if float(delphes_events[line].strip().split()[0]) != 0:
full_event_file.write(delphes_events[line])
line += 1
else:
full_event_file.write('0\n')
line += 1
# Delete individual leftover lhco files
print 'Cleaning repository...'
os.system('rm *.lhco')
print 'Full LHCO events collected and stored in repository.'
print 'Repository is complete.\n'
| 36.578947 | 157 | 0.656835 | #!/usr/bin/python
#####################################################################################################################################
# #
# The purpose of this script is to collect data generated from parallel computation of parton level cross sections into one #
# dat file. This file should contain the average matched cross section (after PYTHIA) and all events over all runs. The script also #
# creates a repository to store this dat file and all root files for sharing with collaborators. #
# #
#####################################################################################################################################
# Imports
import os.path
import sys
import re
import numpy as np
# Define the background to collect results for
BG = 'jjWZ'
# Create an event repository for the results
repoBase = '/fdata/hepx/store/user/pwinslow/' + BG + '_Results/'
if os.path.isdir(repoBase) == True:
sys.exit('Repository already exists...')
else:
os.system('mkdir ' + repoBase)
# Loop through MG5 run folders and populate the repository with the corresponding pythia log files and delphes root + lhco files
print '\nPopulating event repository...'
for run in range(1,21,1):
# Define path to run files
EventBase = '/fdata/hepx/store/user/pwinslow/MGRecord/100TeV_LNV_Results/DiBoson/' + BG + 'BG/job{0}/MG5_aMC_v2_3_3/'.format(run) + BG + 'BG_100TeV/Events/'
# Copy relevant files to event repository
os.system('cp ' + EventBase + 'pythia_output.log' + ' ' + repoBase + 'pythia_output_job{0}.log'.format(run))
os.system('cp ' + EventBase + 'delphes_events.root' + ' ' + repoBase + 'delphes_events_job{0}.root'.format(run))
os.system('cp ' + EventBase + 'delphes_events.lhco' + ' ' + repoBase + 'delphes_events_job{0}.lhco'.format(run))
print 'Done populating repository.'
# Enter event repository
os.chdir(repoBase)
# Open a dat file to hold the full set of amalgamated events and averaged matched cross section information
print 'Amalgamating full LHCO events...'
with open('full_' + BG + '_lhco_events.dat', 'w') as full_event_file:
# Create list to store all matched cross sections
sigma_list = []
# Loop through all MG5 run folders and extract the average matched cross section
for arg in range(1,21,1):
# Define pythia file
pythia_file = 'pythia_output_job{0}.log'.format(arg)
# Check if pythia file exists
if os.path.isfile(pythia_file) == False:
print 'File not found...'
# Open pythia log file and extract the matched cross section, saving them all to a single list
with open(pythia_file, 'r+') as File:
sigma_string = File.readlines()[-1]
sigma = float(re.findall("-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?", sigma_string)[0])
sigma_list.append(sigma)
# Write the average of all the matched cross sections to the dat file
full_event_file.write('Average matched cross section (pb): {0}\n'.format(np.mean(sigma_list)))
# Indicate beginning of event info
full_event_file.write('Begin event output...\n\n')
# Include header info for events
full_event_file.write(' # typ eta phi pt jmas ntrk btag had/em dum1 dum2\n')
# Loop through all MG5 runs again, this time extracting all events from all delphes event files
for run in range(1,21,1):
# Define delphes file
delphes_file = 'delphes_events_job{0}.lhco'.format(run)
# Check if delphes file exists
if os.path.isfile(delphes_file) == False:
print 'File not found...'
# Open delphes file and read in all events
with open(delphes_file, 'r+') as File:
delphes_events = File.readlines()
# While skipping header info, parse all events, printing each event separated by a line with a single 0
line = 1
while line < len(delphes_events):
if float(delphes_events[line].strip().split()[0]) != 0:
full_event_file.write(delphes_events[line])
line += 1
else:
full_event_file.write('0\n')
line += 1
# Delete individual leftover lhco files
print 'Cleaning repository...'
os.system('rm *.lhco')
print 'Full LHCO events collected and stored in repository.'
print 'Repository is complete.\n'
| 0 | 0 | 0 |
cd45bbf4d6328c07ba9b9fafbbd06f49720255b5 | 252 | py | Python | scheduler/SubModels/schedulingAndUsage.py | shreya2592/ResourceNinja | 553f0d54a294700710ee9ced67f13a71f82fad76 | [
"MIT"
] | null | null | null | scheduler/SubModels/schedulingAndUsage.py | shreya2592/ResourceNinja | 553f0d54a294700710ee9ced67f13a71f82fad76 | [
"MIT"
] | null | null | null | scheduler/SubModels/schedulingAndUsage.py | shreya2592/ResourceNinja | 553f0d54a294700710ee9ced67f13a71f82fad76 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
| 28 | 49 | 0.785714 | from django.db import models
from django.utils import timezone
class schedulingAndUsage(models.Model):
machineStatus=models.CharField(max_length=50)
time=models.DateTimeField()
date=models.DateTimeField()
laborID=models.IntegerField()
| 0 | 166 | 23 |
f4c0876f67f202919fe4d69469a2fbfed191908a | 14,012 | py | Python | chemreader/readers/basereader.py | thomasly/chemreader | 409508d3145413b99066324c4f9334735f68cff4 | [
"MIT"
] | 1 | 2020-04-24T04:24:11.000Z | 2020-04-24T04:24:11.000Z | chemreader/readers/basereader.py | thomasly/chemreader | 409508d3145413b99066324c4f9334735f68cff4 | [
"MIT"
] | null | null | null | chemreader/readers/basereader.py | thomasly/chemreader | 409508d3145413b99066324c4f9334735f68cff4 | [
"MIT"
] | 1 | 2020-04-24T04:24:15.000Z | 2020-04-24T04:24:15.000Z | import os
from copy import deepcopy
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Descriptors import ExactMolWt
from rdkit.Chem import AllChem
from rdkit import DataStructs
from scipy import sparse as sp
from ..utils.tools import property_getter
class MolFragmentsLabel:
""" Label atoms in a molecule with the fragments they belong to. The fragment
library is built from PubChem fingerprint section 3 to section 7. The labels are
fingerprint like vectors for each atom of the molecule.
Args:
ref_file (str): path to the reference file (csv format) that contains the SMARTS
strings to match molecular fragments.
"""
ref_smarts = None
@classmethod
def create_labels_for(self, mol, sparse=True):
""" Create fragment labels for a molecule:
Args:
mol (SMILES str or RDKit Mol object): the molecule to create labels for.
sparse (bool): return the matrix in sparse format. Default: True.
"""
if isinstance(mol, str):
mol = Chem.MolFromSmiles(mol)
if mol is None:
raise ValueError(f"{mol} is not a valid SMILES string.")
# add hydrogens to the molecule
mol = Chem.AddHs(mol)
# initiate the vectors
labels = np.zeros((len(self.ref_smarts), mol.GetNumAtoms()), dtype=np.int)
# search for the fragments in the molecule
for i, pattern in enumerate(self.ref_smarts):
mat_substructs = mol.GetSubstructMatches(pattern)
# convert tuple of tuples to a set
mat_atoms = set()
for atoms in mat_substructs:
mat_atoms = mat_atoms.union(set(atoms))
mat_atoms = list(mat_atoms)
labels[i, mat_atoms] = 1
if sparse:
labels = sp.coo_matrix(labels)
return labels
| 32.360277 | 88 | 0.56994 | import os
from copy import deepcopy
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Descriptors import ExactMolWt
from rdkit.Chem import AllChem
from rdkit import DataStructs
from scipy import sparse as sp
from ..utils.tools import property_getter
class MolFragmentsLabel:
""" Label atoms in a molecule with the fragments they belong to. The fragment
library is built from PubChem fingerprint section 3 to section 7. The labels are
fingerprint like vectors for each atom of the molecule.
Args:
ref_file (str): path to the reference file (csv format) that contains the SMARTS
strings to match molecular fragments.
"""
ref_smarts = None
def __init__(self, ref_file=None):
if ref_file is None:
cwd = os.path.dirname(__file__)
self.ref_file = os.path.join(
cwd, "..", "..", "resources", "pubchemFPKeys_to_SMARTSpattern.csv"
)
else:
self.ref_file = ref_file
if MolFragmentsLabel.ref_smarts is None:
self._build_ref(self.ref_file)
@classmethod
def _build_ref(cls, ref_file):
df = pd.read_csv(ref_file)
cls.ref_smarts = [Chem.MolFromSmarts(smarts) for smarts in df["SMARTS"]]
def create_labels_for(self, mol, sparse=True):
""" Create fragment labels for a molecule:
Args:
mol (SMILES str or RDKit Mol object): the molecule to create labels for.
sparse (bool): return the matrix in sparse format. Default: True.
"""
if isinstance(mol, str):
mol = Chem.MolFromSmiles(mol)
if mol is None:
raise ValueError(f"{mol} is not a valid SMILES string.")
# add hydrogens to the molecule
mol = Chem.AddHs(mol)
# initiate the vectors
labels = np.zeros((len(self.ref_smarts), mol.GetNumAtoms()), dtype=np.int)
# search for the fragments in the molecule
for i, pattern in enumerate(self.ref_smarts):
mat_substructs = mol.GetSubstructMatches(pattern)
# convert tuple of tuples to a set
mat_atoms = set()
for atoms in mat_substructs:
mat_atoms = mat_atoms.union(set(atoms))
mat_atoms = list(mat_atoms)
labels[i, mat_atoms] = 1
if sparse:
labels = sp.coo_matrix(labels)
return labels
class _BaseReader(metaclass=ABCMeta):
# https://github.com/shionhonda/gae-dgl/blob/master/gae_dgl/prepare_data.py
_avail_atom_types = [
"C",
"N",
"O",
"S",
"F",
"Si",
"P",
"Cl",
"Br",
"Mg",
"Li",
"Na",
"Ca",
"Fe",
"Al",
"I",
"B",
"K",
"Se",
"Zn",
"H",
"Cu",
"Mn",
"As",
"unknown",
]
_atom2int = {atom.upper(): idx for idx, atom in enumerate(_avail_atom_types)}
_bond_types = ["1", "2", "3", "am", "ar", "du", "un"]
_bond2int = {bond.upper(): idx for idx, bond in enumerate(_bond_types)}
@classmethod
def atom_to_num(cls, atom_type):
return cls._atom2int.get(atom_type.upper(), cls._atom2int["UNKNOWN"])
@classmethod
def bond_to_num(cls, bond_type):
return cls._bond2int.get(bond_type.upper(), cls._bond2int["UN"])
@staticmethod
def rebuild_adj(adj, new_idx):
""" Rebuld adjacency matrix with the new indices.
Args:
adj (numpy 2D array or matrix): The adjacency matrix to rebuild.
new_idx (list of int): The list of new indices of the old nodes. For
example, an old adjacency matrix with 3 nodes changes its first node
index to 1 and second node index to 0. The new_idx should be [1, 0, 2].
Returns:
numpy 2D array or matrix: The rebuilt adjacency matrix.
"""
new_idx = {old: new for new, old in enumerate(new_idx)}
new_adj = np.zeros(adj.shape, dtype=np.int)
for row in range(adj.shape[0]):
for col in range(adj.shape[1]):
if adj[row, col] == 0:
continue
new_r = new_idx[row]
new_c = new_idx[col]
new_adj[new_r, new_c] = 1
return new_adj
def one_of_k_encoding(self, x, allowable_set):
if x not in allowable_set:
raise Exception(
"input {0} not in allowable set{1}:".format(x, allowable_set)
)
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(self, x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
@abstractproperty
def num_atoms(self):
""" Number of atoms
"""
@abstractproperty
def bonds(self):
""" Bonds
"""
@abstractproperty
def rdkit_mol(self):
""" RDKit Mol object
"""
@abstractproperty
def atom_types(self):
""" Atom types
"""
@abstractmethod
def get_adjacency_matrix(self):
""" Get the adjacency matrix
"""
@property
def sorted_atoms(self):
try:
return self._sorted_atoms
except AttributeError:
self._sortAtoms(self.rdkit_mol.GetAtoms())
return self._sorted_atoms
def _sortAtoms(self, atoms):
def key(atom):
type_ = self._atom2int.get(atom.GetSymbol(), len(self._atom2int))
degree = atom.GetDegree()
idx = atom.GetIdx()
return (type_, degree, idx)
self._sorted_atoms = sorted(atoms, key=key)
def get_atom_features(
self, numeric=False, sort_atoms=False, fragment_label=False, padding=None
):
r""" Get the atom features in the block. The feature contains
coordinate and atom type for each atom.
Args:
numeric (bool): if True, return the atom type as a number.
sort_atoms (bool): Default is False. If True, sort the atoms by atom type.
padding (None or int): Pad atom feature matrix to a fix length. The number
must be larger than the number of atoms in the molecules.
Returns:
list: list of tuples. Features are atom type, atom mass, atom
degree, and atom aromatic
"""
features = list()
if sort_atoms:
atoms = self.sorted_atoms
else:
atoms = self.rdkit_mol.GetAtoms()
if fragment_label:
mfl = MolFragmentsLabel()
frag_labels = mfl.create_labels_for(self.rdkit_mol, sparse=False)
for i, atom in enumerate(atoms):
feature = list()
# the features of an atom includes: atom type, degree, formal charge,
# hybridization, aromatic, and chirality
atom_type = self.atom_types[i]
if numeric:
atom_type = self.atom_to_num(atom_type)
feature.append(atom_type)
feature.append(atom.GetDegree())
# feature.append(atom.GetImplicitValence())
feature.append(atom.GetFormalCharge())
# feature.append(atom.GetNumRadicalElectrons())
feature.append(int(atom.GetHybridization()))
feature.append(int(atom.GetIsAromatic()))
feature.append(int(atom.GetChiralTag()))
if fragment_label:
feature.extend(frag_labels[:, atom.GetIdx()].tolist())
features.append(tuple(feature))
if padding is not None:
if padding < len(features):
raise ValueError(
"Padding number should be larger than the feature number."
"Got {} < {}".format(padding, len(features))
)
pad = (
[tuple([self.atom_to_num("unknown")] + [0] * (len(features[0]) - 1))]
) * (padding - len(features))
features.extend(pad)
return features
def sort_bonds(self, unsorted_bonds):
""" Sort bonds based on sorted atoms.
Args:
unsorted_bonds (list): list of bonds in chemical compound.
Returns:
dict: Bond feature dict.
"""
new_idx = {old.GetIdx(): new for new, old in enumerate(self.sorted_atoms)}
sorted_bonds = list()
for bond in unsorted_bonds:
start, end = bond["connect"]
new_bond = deepcopy(bond)
new_bond["connect"] = [0, 0]
new_bond["connect"][0] = new_idx[start]
new_bond["connect"][1] = new_idx[end]
sorted_bonds.append(new_bond)
return sorted_bonds
def get_bond_features(self, numeric=False, sort_atoms=False):
r""" Get the bond features/types in the block.
numeric (bool): if True, return the bond type as a number.
=======================================================================
return (list): list of bond types.
"""
features = dict()
if sort_atoms:
bonds = self.sort_bonds(self.bonds)
else:
bonds = self.bonds
for bond in bonds:
type_ = bond["type"]
conn = str(bond["connect"][0]) + "-" + str(bond["connect"][1])
conn2 = str(bond["connect"][1]) + "-" + str(bond["connect"][0])
if numeric:
type_ = self.bond_to_num(type_)
features[conn] = type_
features[conn2] = type_
return features
@abstractmethod
def to_graph(self):
""" Convert molecule to graph
"""
class GraphFromRDKitMol(_BaseReader):
def __init__(self, mol):
r"""
Args:
mol (rdkit Mol object)
"""
self._rdkit_mol = mol
@property
def rdkit_mol(self):
return self._rdkit_mol
@property
@property_getter
def num_atoms(self):
r""" Number of atoms in the molecule
"""
return self._num_atoms
def _get_num_atoms(self):
return self.rdkit_mol.GetNumAtoms()
@property
@property_getter
def num_bonds(self):
return self._num_bonds
def _get_num_bonds(self):
return self.rdkit_mol.GetNumBonds()
@property
@property_getter
def atom_names(self):
return self._atom_names
def _get_atom_names(self):
atoms = self.rdkit_mol.GetAtoms()
return [atom.GetSymbol() for atom in atoms]
@property
@property_getter
def atom_types(self):
return self._atom_types
def _get_atom_types(self):
atom_types = list()
for atom in self.rdkit_mol.GetAtoms():
symbol = atom.GetSymbol().upper()
atom_types.append(symbol)
return atom_types
@property
@property_getter
def fingerprint(self):
return self._fingerprint
def _get_fingerprint(self):
if self.rdkit_mol is None:
return None
fingerprint = AllChem.GetMorganFingerprintAsBitVect(self.rdkit_mol, 2)
return fingerprint
@property
@property_getter
def bonds(self):
return self._bonds
def _get_bonds(self):
bonds = list()
for bond in self.rdkit_mol.GetBonds():
b = dict()
if bond.GetIsAromatic():
type_ = "ar"
else:
type_ = str(int(bond.GetBondType()))
b["connect"] = tuple([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])
b["type"] = type_
bonds.append(b)
return bonds
@property
@property_getter
def molecular_weight(self):
return self._molecular_weight
def _get_molecular_weight(self):
return ExactMolWt(self.rdkit_mol)
def get_adjacency_matrix(self, sparse=False, sort_atoms=False, padding=None):
r""" Get the adjacency matrix of the molecular graph.
spase (bool): if True, return the matrix in sparse format
=======================================================================
return (numpy.array or scipy.sparse.csc_matrix)
"""
if padding is None:
matrix = np.zeros((self.num_atoms, self.num_atoms), dtype=np.int8)
else:
if padding < self.num_atoms:
raise ValueError(
"Padding number should be larger than the atoms number."
"Got {} < {}".format(padding, self.num_atoms)
)
matrix = np.zeros((padding, padding), dtype=np.int8)
for bond in self.bonds:
edge = [c for c in bond["connect"]]
matrix[edge, edge[::-1]] = 1
if sort_atoms:
matrix = self.rebuild_adj(matrix, [at.GetIdx() for at in self.sorted_atoms])
if sparse:
matrix = sp.csr_matrix(matrix)
return matrix
def to_graph(
self,
sparse=False,
sort_atoms=False,
fragment_label=False,
pad_atom=None,
pad_bond=None,
):
graph = dict()
graph["adjacency"] = self.get_adjacency_matrix(
sparse=sparse, sort_atoms=sort_atoms, padding=pad_atom,
)
graph["atom_features"] = self.get_atom_features(
numeric=True,
sort_atoms=sort_atoms,
fragment_label=fragment_label,
padding=pad_atom,
)
graph["bond_features"] = self.get_bond_features(
numeric=True, sort_atoms=sort_atoms
)
return graph
def similar_to(self, other, threshold=0.5):
sim = DataStructs.FingerprintSimilarity(self.fingerprint, other.fingerprint)
if sim > threshold:
return True
return False
| 3,387 | 8,570 | 99 |
d7ffcee15a996978d3c096439336ad48cfbfa2f9 | 7,400 | py | Python | pyfiles/AutoDetectCircle.py | Rylu12/CircleD | d275d7804acd460f4ad13b9ee9342976df900fee | [
"MIT"
] | 25 | 2020-02-27T18:34:30.000Z | 2022-03-03T01:24:33.000Z | pyfiles/AutoDetectCircle.py | Rylu12/CircleD | d275d7804acd460f4ad13b9ee9342976df900fee | [
"MIT"
] | null | null | null | pyfiles/AutoDetectCircle.py | Rylu12/CircleD | d275d7804acd460f4ad13b9ee9342976df900fee | [
"MIT"
] | 4 | 2020-03-04T00:16:50.000Z | 2020-07-01T05:19:25.000Z | import cv2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import openpyxl
#Get pixel/distance (using ImageJ software) to output actual diameters of circles
dp = 1
accum_ratio = 1
min_dist = 5
p1 = 40
p2 = 30
minDiam = 1
maxDiam = 30
scalebar = 10
min_range = 0
max_range = 100
intervals = 10
rad_list =[]
detected_circles = []
dataForTable = {}
# pd.DataFrame(rad_list).to_excel('emulsions_D50_list_1.xlsx',header=False, index=False)
| 33.944954 | 124 | 0.597568 | import cv2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import openpyxl
#Get pixel/distance (using ImageJ software) to output actual diameters of circles
dp = 1
accum_ratio = 1
min_dist = 5
p1 = 40
p2 = 30
minDiam = 1
maxDiam = 30
scalebar = 10
min_range = 0
max_range = 100
intervals = 10
rad_list =[]
detected_circles = []
dataForTable = {}
def clear_plt():
plt.clf()
def autoDetect(resized_img, accum_ratio, min_dist, p1, p2, minDiam, maxDiam, pixel_distance):
global result, img, table_data, rad_list, detected_circles
# Convert to grayscale.
img = resized_img
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Blur using 3 * 3 kernel.
gray_blurred = cv2.blur(gray, (3, 3))
minDist = int(min_dist*pixel_distance)
minRadius = int(minDiam*pixel_distance/2)
maxRadius = int(maxDiam*pixel_distance/2)
if minDist < 1:
minDist = 1
if minRadius <1:
minRadius =1
if minRadius <1:
minRadius =1
# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(gray_blurred,
cv2.HOUGH_GRADIENT, dp = int(accum_ratio), minDist = minDist,
param1 = int(p1), param2 = int(p2), minRadius = minRadius, maxRadius = maxRadius)
def autoDetectBin(resized_img, threshold,accum_ratio, min_dist, p1, p2, minDiam, maxDiam, pixel_distance):
global result, img, table_data, rad_list, detected_circles
img = resized_img
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
thres,binImg = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
# Blur using 3 * 3 kernel.
blurred = cv2.blur(binImg, (3, 3))
minDist = int(min_dist*pixel_distance)
minRadius = int(minDiam*pixel_distance/2)
maxRadius = int(maxDiam*pixel_distance/2)
if minDist < 1:
minDist = 1
if minRadius <1:
minRadius =1
if minRadius <1:
minRadius =1
# Apply Hough transform on the blurred image.
detected_circles = cv2.HoughCircles(blurred,
cv2.HOUGH_GRADIENT, dp = int(accum_ratio), minDist = minDist,
param1 = int(p1), param2 = int(p2), minRadius = minRadius, maxRadius = maxRadius)
def processCircles(state, resized_img, filename, pixel_distance, manual_list):
global detected_circles, rad_list, img, result, bottom_10percentile, top_90percentile, new_name
# Draw circles that are detected.
img = resized_img
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rad_list=[]
if state == False:
detected_circles = None
result = '\n\n'
try:
if (detected_circles is None) and (len(manual_list) == 0):
return '\nNo circles found!\n'
elif len(manual_list) > 0 and (detected_circles is None):
manual_list.sort()
bottom_10percentile = int(len(manual_list)*0.1)
top_90percentile = int(len(manual_list)*0.9)
result += '# of circles found: ' + str(len(manual_list))
rad_list = manual_list
else:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
for pt in detected_circles[0, :]:
a, b, r = pt[0], pt[1], pt[2]
# Draw the circumference of the circle.
cv2.circle(img, (a, b), r, (0, 255, 0), 2)
# Draw a small circle (of radius 1) to show the center.
cv2.circle(img, (a, b), 1, (0, 0, 255), 2)
new_name = filename[:-4] + '_detected' + filename[-4:]
cv2.imwrite(new_name, img)
#Loop to convert radius (pixel) values to diameter
for x in range(detected_circles.shape[1]):
diam = detected_circles[0,x,2]*2/pixel_distance
rad_list.append(round(diam,1))
rad_list.sort()
bottom_10percentile = int(len(rad_list)*0.1)
top_90percentile = int(len(rad_list)*0.9)
result += '# of circles found: ' + str(detected_circles.shape[1])
result +='\nAvg diam. = ' + "%.1f"%np.average(rad_list) + 'um'
result +='\nD10 = '+ str(rad_list[bottom_10percentile])+'um'+'\nD50 = ' + "%.1f"%np.median(rad_list) + "um"
result +='\nD90 = '+ str(rad_list[top_90percentile])+'um'
except IndexError:
pass
return result
def tableData():
global rad_list, row_list, dataForTable, col_list, bottom_10percentile, top_90percentile, detected_circles, dataForTable
col_list = []
row_list = []
Diam_um = 'Diameter (um)'
temp_2 = ' '
temp_3 = ' '
temp_4 = ' '
temp_5 = ' '
if len(rad_list)>0:
for items in range(len(rad_list)):
col_list.append(dict(Diam_um = rad_list[items]))
for rows in range(len(rad_list)):
row_list.append('rec'+ str(rows+1))
dataForTable = dict(zip(row_list,col_list))
try:
if len(dataForTable) < 2:
temp_1 = dataForTable['rec1']['Diam_um']
elif len(dataForTable) < 3:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
elif len(dataForTable) < 4:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
temp_3 = dataForTable['rec3']['Diam_um']
elif len(dataForTable) < 5:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
temp_3 = dataForTable['rec3']['Diam_um']
temp_4 = dataForTable['rec4']['Diam_um']
elif len(dataForTable) >= 5:
temp_1 = dataForTable['rec1']['Diam_um']
temp_2 = dataForTable['rec2']['Diam_um']
temp_3 = dataForTable['rec3']['Diam_um']
temp_4 = dataForTable['rec4']['Diam_um']
temp_5 = dataForTable['rec5']['Diam_um']
dataForTable.update({'rec1':{'Diam_um': str(temp_1) , 'Col2': '# of Circles', 'Col3': str(len(rad_list))},
'rec2':{'Diam_um': str(temp_2),'Col2': 'Avg Diam (um)', 'Col3': "%.1f"%np.average(rad_list)},
'rec3':{'Diam_um': str(temp_3) ,'Col2': 'D10 (um)', 'Col3': str(rad_list[bottom_10percentile])},
'rec4':{'Diam_um': str(temp_4),'Col2': 'D50 (um)', 'Col3': "%.1f"%np.median(rad_list)},
'rec5':{'Diam_um': str(temp_5) ,'Col2': 'D90 (um)', 'Col3': str(rad_list[top_90percentile])}
})
except KeyError:
pass
return dataForTable
def histoPlot(filename, min_range, max_range, intervals):
global rad_list
#Plot histogram
plt.xlabel('Diameter (um)')
plt.ylabel('Frequency')
plt.title('Particle Size Distribution')
(n, bins, patch) = plt.hist([rad_list], bins=np.arange(min_range,max_range+1,intervals), rwidth=0.9)
plt.xticks(np.arange(min_range,max_range,intervals))
# plt.gca().grid(which='major', axis='y')
plt.savefig((filename[:-4] + '_histogram.png'), dpi = 500)
plt.clf()
# pd.DataFrame(rad_list).to_excel('emulsions_D50_list_1.xlsx',header=False, index=False)
| 6,781 | 0 | 138 |
1e119287bd02cbf67397abfc87053e83d67df483 | 2,801 | py | Python | scripts/fit_whitepoint_matrices.py | 99991/foreground-estimation-evaluation | d13bb0657df502e32da18235beb984bacaa50591 | [
"MIT"
] | 1 | 2021-01-04T15:57:07.000Z | 2021-01-04T15:57:07.000Z | scripts/fit_whitepoint_matrices.py | 99991/foreground-estimation-evaluation | d13bb0657df502e32da18235beb984bacaa50591 | [
"MIT"
] | 1 | 2021-01-05T16:44:22.000Z | 2021-02-11T10:10:19.000Z | scripts/fit_whitepoint_matrices.py | pymatting/foreground-estimation-evaluation | d13bb0657df502e32da18235beb984bacaa50591 | [
"MIT"
] | null | null | null | import numpy as np
import os, json, util
if __name__ == "__main__":
fit_whitepoint_matrices(util.find_data_directory())
| 31.122222 | 84 | 0.584791 | import numpy as np
import os, json, util
def fit_whitepoint_matrices(directory, gamma=2.0):
output_path = os.path.join(directory, "whitepoint_matrices.json")
matrices = {}
print("Computing whitepoint transform matrices")
# fit matrix M to transform from have_lrgb to want_lrgb in least square sense
for index in range(1, 28):
print("image", index, "of", 27)
path = os.path.join(directory, "input_with_gt_fgd/input/GT%02d.tif" % index)
have_lrgb = util.load_image(path)
path = os.path.join(directory, "input_training_highres/GT%02d.png" % index)
want_srgb = util.load_image(path)
assert have_lrgb.shape[2] == 3
assert want_srgb.shape[2] == 3
want_lrgb = util.srgb_to_lrgb(want_srgb, gamma)
V = have_lrgb.reshape(-1, 3)
W = want_lrgb.reshape(-1, 3)
# minimize error function for M
# i.e. find 3-by-3 matrix M such that
# want_lrgb and have_lrgb are close
M = (W.T @ V) @ np.linalg.inv(V.T @ V)
# convert matrix entries to float so json.dump can handle them
matrices[index] = [[float(x) for x in row] for row in M]
# error function
error = np.mean(np.square(M @ V.T - W.T))
print("mean squared error: %f" % error)
# Remove "continue" statement to see differences between images
continue
import matplotlib.pyplot as plt
lrgb = (M @ V.T).T.reshape(want_lrgb.shape)
lrgb = np.maximum(0, lrgb)
srgb = util.lrgb_to_srgb(lrgb, gamma)
srgb = np.clip(srgb, 0, 1)
have_srgb = srgb
difference = np.abs(have_srgb - want_srgb)
nx = 2
ny = 3
plt.subplot(ny, nx, 1)
plt.title("have")
plt.imshow(have_srgb, vmin=0, vmax=1)
plt.axis("off")
plt.subplot(ny, nx, 2)
plt.title("want")
plt.imshow(want_srgb, vmin=0, vmax=1)
plt.axis("off")
plt.subplot(ny, nx, 3)
plt.title("clip(10*|difference|, 0, 1)")
plt.imshow(np.clip(10 * difference, 0, 1), cmap="gray", vmin=0, vmax=1)
plt.axis("off")
for channel, name in enumerate(["red", "green", "blue"]):
plt.subplot(ny, nx, 4 + channel)
plt.title(name + " channel histogram")
bins = np.linspace(0, 1, 256)
values = want_srgb[:, :, channel].flatten()
plt.hist(values, bins=bins, label="want", alpha=0.5)
values = have_srgb[:, :, channel].flatten()
plt.hist(values, bins=bins, label="have", alpha=0.5)
plt.legend()
plt.show()
with open(output_path, "w") as f:
json.dump(matrices, f, indent=4)
if __name__ == "__main__":
fit_whitepoint_matrices(util.find_data_directory())
| 2,651 | 0 | 23 |
23f666b829e24c0e1320d1419f4dd8b48e8097c9 | 776 | py | Python | collection/models.py | sohdas/sohdas.github.io | ea8ca4c32f07ec9855792253f92fa77d0922ab65 | [
"MIT"
] | 2 | 2018-11-06T03:28:52.000Z | 2018-11-08T03:35:28.000Z | collection/models.py | sohdas/sohdas.github.io | ea8ca4c32f07ec9855792253f92fa77d0922ab65 | [
"MIT"
] | 12 | 2018-11-27T04:45:21.000Z | 2019-03-23T00:53:56.000Z | collection/models.py | sohdas/sohdas.github.io | ea8ca4c32f07ec9855792253f92fa77d0922ab65 | [
"MIT"
] | 1 | 2020-02-01T16:13:24.000Z | 2020-02-01T16:13:24.000Z | from django.db import models
from django.conf import settings
| 38.8 | 96 | 0.713918 | from django.db import models
from django.conf import settings
class Shelf(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete = models.CASCADE, default = 1)
shelf_name = models.CharField(max_length = 100)
shelf_size = models.PositiveIntegerField(default = 0)
def __str__(self):
return self.shelf_name
class Movie(models.Model):
shelf = models.ForeignKey(Shelf, on_delete = models.CASCADE)
movie_name = models.CharField(max_length = 100)
movie_genre = models.CharField(max_length = 100)
release_year = models.PositiveIntegerField(default = 2018)
movie_summary = models.TextField()
movie_poster = models.TextField(null = True)
def __str__(self):
return self.movie_name
| 58 | 602 | 48 |
7c19be2e05b9c3b06671bdeb178f870193035964 | 319 | py | Python | pdc/pdc/serial/comm_protocol.py | sergiorb/pdc | 0337daf7fd63e5a226af12aacfd502a3aac294d9 | [
"MIT"
] | null | null | null | pdc/pdc/serial/comm_protocol.py | sergiorb/pdc | 0337daf7fd63e5a226af12aacfd502a3aac294d9 | [
"MIT"
] | null | null | null | pdc/pdc/serial/comm_protocol.py | sergiorb/pdc | 0337daf7fd63e5a226af12aacfd502a3aac294d9 | [
"MIT"
] | null | null | null | # Inital char for detecting incoming order's String.
INITIALCHAR = '$'
# Separates order string from device id.
IDENTIFIERCHART = ":"
# Device Id and function separator in order's String.
DEFUSEPARATOR = '/'
# Variable separator
VARSEPARATOR = '&'
# Final char for detecting the final order's string.
STOPCHAR = ';' | 22.785714 | 53 | 0.730408 | # Inital char for detecting incoming order's String.
INITIALCHAR = '$'
# Separates order string from device id.
IDENTIFIERCHART = ":"
# Device Id and function separator in order's String.
DEFUSEPARATOR = '/'
# Variable separator
VARSEPARATOR = '&'
# Final char for detecting the final order's string.
STOPCHAR = ';' | 0 | 0 | 0 |
e2c3ae6efbddb1f0fe22e850b0bf384c5b3ddac8 | 616 | py | Python | randutils/__init__.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | 1 | 2021-08-03T17:34:31.000Z | 2021-08-03T17:34:31.000Z | randutils/__init__.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | null | null | null | randutils/__init__.py | JoelLefkowitz/randutils | 91bfb6a56676675edb241f11b602a46880520c72 | [
"MIT"
] | null | null | null | from .chance import by_chance # noqa
from .django import get_random_instance # noqa
from .django import get_random_instances # noqa
from .exceptions import EmptyListError # noqa
from .exceptions import NoObjectsError # noqa
from .generate import randint # noqa
from .generate import random_birthday # noqa
from .generate import random_number_str # noqa
from .generate import random_phone # noqa
from .generate import random_string # noqa
from .lists import pick_random_entry # noqa
from .lists import pop_random_entry # noqa
from .lists import randomly_filter # noqa
from .lists import scramble # noqa
| 41.066667 | 48 | 0.795455 | from .chance import by_chance # noqa
from .django import get_random_instance # noqa
from .django import get_random_instances # noqa
from .exceptions import EmptyListError # noqa
from .exceptions import NoObjectsError # noqa
from .generate import randint # noqa
from .generate import random_birthday # noqa
from .generate import random_number_str # noqa
from .generate import random_phone # noqa
from .generate import random_string # noqa
from .lists import pick_random_entry # noqa
from .lists import pop_random_entry # noqa
from .lists import randomly_filter # noqa
from .lists import scramble # noqa
| 0 | 0 | 0 |
baebb54a9b0f1bdd510ed77f643816b01fa3ea33 | 32,743 | py | Python | registrations/tests.py | praekeltfoundation/nurseconnect-registration | e8ec0a242d41bb80c75a976969dacb39b873761c | [
"BSD-3-Clause"
] | null | null | null | registrations/tests.py | praekeltfoundation/nurseconnect-registration | e8ec0a242d41bb80c75a976969dacb39b873761c | [
"BSD-3-Clause"
] | 7 | 2019-04-11T08:13:48.000Z | 2021-06-10T17:46:39.000Z | registrations/tests.py | praekeltfoundation/nurseconnect-registration | e8ec0a242d41bb80c75a976969dacb39b873761c | [
"BSD-3-Clause"
] | 1 | 2019-11-25T09:27:16.000Z | 2019-11-25T09:27:16.000Z | import json
import uuid
from datetime import datetime
from unittest import mock
from urllib.parse import urlencode
import responses
from django.contrib.messages import get_messages
from django.test import TestCase
from django.urls import reverse
from registrations.forms import RegistrationDetailsForm
from registrations.models import ReferralLink
from registrations.tasks import (
send_registration_to_openhim,
send_registration_to_rapidpro,
)
| 37.420571 | 88 | 0.554317 | import json
import uuid
from datetime import datetime
from unittest import mock
from urllib.parse import urlencode
import responses
from django.contrib.messages import get_messages
from django.test import TestCase
from django.urls import reverse
from registrations.forms import RegistrationDetailsForm
from registrations.models import ReferralLink
from registrations.tasks import (
send_registration_to_openhim,
send_registration_to_rapidpro,
)
class RegistrationDetailsTest(TestCase):
def test_get_referral_link(self):
"""
A GET request with a referral link should add the MSISDN of the referrer to the
context
"""
referral = ReferralLink.objects.create(msisdn="+27820001001")
url = reverse("registrations:registration-details", args=[referral.code])
r = self.client.get(url)
self.assertTemplateUsed(r, "registrations/registration_details.html")
self.assertEqual(self.client.session["registered_by"], referral.msisdn)
def test_bad_referral_link(self):
"""
If a bad referral code is supplied, we should not alert the user, and just act
like no code was given
"""
url = reverse("registrations:registration-details", args=["bad-code"])
r = self.client.get(url)
self.assertTemplateUsed(r, "registrations/registration_details.html")
self.assertNotIn("registered_by", self.client.session)
def test_get_form(self):
"""
A GET request should render the registration details form
"""
url = reverse("registrations:registration-details")
r = self.client.get(url)
self.assertTemplateUsed(r, "registrations/registration_details.html")
self.assertContains(r, '<form method="post">')
@responses.activate
def test_msisdn_validation(self):
"""
The phone number field should be validated, and returned in E164 format
"""
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json",
json={"next": None, "previous": None, "results": []},
status=200,
headers={"Authorization": "Token some_token"},
)
r = self.client.get(reverse("registrations:registration-details"))
form = RegistrationDetailsForm({"msisdn": "0820001001"}, request=r.wsgi_request)
form.is_valid()
self.assertNotIn("msisdn", form.errors)
self.assertEqual(form.clean_msisdn(), "+27820001001")
# Cannot parse
form = RegistrationDetailsForm({"msisdn": "foo"})
form.is_valid()
self.assertIn("msisdn", form.errors)
# Not possible number
form = RegistrationDetailsForm({"msisdn": "1234"})
form.is_valid()
self.assertIn("msisdn", form.errors)
# Invalid number
form = RegistrationDetailsForm({"msisdn": "+12001230101"})
form.is_valid()
self.assertIn("msisdn", form.errors)
@responses.activate
def test_contact_exists(self):
"""
If a contact exists in Rapidpro for this number, then we should return
an error message
"""
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"urn": "tel:+27820001001"}),
json={"next": None, "previous": None, "results": []},
status=200,
headers={"Authorization": "Token some_token"},
)
r = self.client.get(reverse("registrations:registration-details"))
form = RegistrationDetailsForm({"msisdn": "0820001001"}, request=r.wsgi_request)
form.is_valid()
self.assertNotIn("msisdn", form.errors)
self.assertIn("contact", r.wsgi_request.session)
self.assertEqual(r.wsgi_request.session["contact"], {})
contact_data = {
"next": None,
"previous": None,
"results": [
{
"uuid": "09d23a05-47fe-11e4-bfe9-b8f6b119e9ab",
"name": "Ben Haggerty",
"language": None,
"urns": ["tel:+27820001002"],
"groups": [
{
"name": "nurseconnect-sms",
"uuid": "5a4eb79e-1b1f-4ae3-8700-09384cca385f",
}
],
"fields": {},
"blocked": None,
"stopped": None,
"created_on": "2015-11-11T13:05:57.457742Z",
"modified_on": "2015-11-11T13:05:57.576056Z",
}
],
}
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"urn": "tel:+27820001002"}),
json=contact_data,
status=200,
headers={"Authorization": "Token some_token"},
)
form = RegistrationDetailsForm({"msisdn": "0820001002"}, request=r.wsgi_request)
form.is_valid()
self.assertIn("msisdn", form.errors)
self.assertIn("contact", r.wsgi_request.session)
self.assertIsNotNone(r.wsgi_request.session["contact"])
@responses.activate
def test_get_rp_contact_error(self):
"""
If there's an error making the HTTP request, an error message should be returned
to the user, asking them to try again.
"""
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"urn": "tel:+27820001002"}),
status=500,
)
form = RegistrationDetailsForm({"msisdn": "0820001002"})
with self.assertLogs(level="ERROR") as logs:
form.is_valid()
[error_log] = logs.output
self.assertIn("Error connecting to RapidPro", error_log)
self.assertIn("msisdn", form.errors)
self.assertIn(
"There was an error checking your details. Please try again.",
form.errors["msisdn"],
)
@responses.activate
def test_opted_out_contact_redirected_to_confirmation(self):
"""
If a contact has already opted out, then we should redirect to an optin
confirmation page
"""
contact_data = {
"next": None,
"previous": None,
"results": [
{
"uuid": "09d23a05-47fe-11e4-bfe9-b8f6b119e9ab",
"name": "Ben Haggerty",
"language": None,
"urns": ["tel:+27820001003"],
"groups": [
{
"name": "opted-out",
"uuid": "5a4eb79e-1b1f-4ae3-8700-09384cca385f",
}
],
"fields": {},
"blocked": None,
"stopped": None,
"created_on": "2015-11-11T13:05:57.457742Z",
"modified_on": "2015-11-11T13:05:57.576056Z",
}
],
}
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"urn": "tel:+27820001003"}),
json=contact_data,
status=200,
headers={"Authorization": "Token some_token"},
)
clinic_data = {
"title": "Facility Check Nurse Connect",
"headers": [],
"rows": [["123457", "yGVQRg2PXNh", "Test Clinic"]],
"width": 3,
"height": 1,
}
responses.add(
responses.GET,
"http://testopenhim/NCfacilityCheck?"
+ urlencode({"criteria": "value:123457"}),
json=clinic_data,
status=200,
)
referral = ReferralLink.objects.create(msisdn="+27820001001")
url = reverse("registrations:registration-details", args=[referral.code])
r = self.client.post(
url,
{"msisdn": ["0820001003"], "clinic_code": ["123457"], "consent": ["True"]},
)
self.assertRedirects(r, reverse("registrations:confirm-optin"))
self.assertEqual(self.client.session["clinic_name"], "Test Clinic")
self.assertEqual(self.client.session["clinic_code"], "123457")
@responses.activate
def test_clinic_code_validation(self):
"""
The clinic code should be digits and exist in DHIS2, and not be on the blacklist
"""
clinic_data = {
"title": "Facility Check Nurse Connect",
"headers": [],
"rows": [["123457", "yGVQRg2PXNh", "Test Clinic"]],
"width": 3,
"height": 1,
}
responses.add(
responses.GET,
"http://testopenhim/NCfacilityCheck?"
+ urlencode({"criteria": "value:123457"}),
json=clinic_data,
status=200,
)
r = self.client.get(reverse("registrations:registration-details"))
form = RegistrationDetailsForm(
{"clinic_code": "123457"}, request=r.wsgi_request
)
form.is_valid()
self.assertNotIn("clinic_code", form.errors)
self.assertEqual(form.clean_clinic_code(), "123457")
# not digits
form = RegistrationDetailsForm({"clinic_code": "foobar"})
form.is_valid()
self.assertIn("clinic_code", form.errors)
# not in DHIS2
clinic_data = {"title": "", "headers": [], "rows": [], "width": 0, "height": 0}
responses.add(
responses.GET,
"http://testopenhim/NCfacilityCheck?"
+ urlencode({"criteria": "value:654321"}),
json=clinic_data,
status=200,
)
form = RegistrationDetailsForm(
{"clinic_code": "654321"}, request=r.wsgi_request
)
form.is_valid()
self.assertIn("clinic_code", form.errors)
# in blacklist
form = RegistrationDetailsForm(
{"clinic_code": "123456"}, request=r.wsgi_request
)
form.is_valid()
self.assertIn("clinic_code", form.errors)
@responses.activate
def test_check_clinic_code_error(self):
"""
If there's an error making the HTTP request, an error message should be returned
to the user, asking them to try again.
"""
responses.add(responses.GET, "http://testopenhim/NCfacilityCheck", status=500)
r = self.client.get(reverse("registrations:registration-details"))
form = RegistrationDetailsForm(
{"clinic_code": "123457"}, request=r.wsgi_request
)
form.is_valid()
self.assertIn("clinic_code", form.errors)
self.assertIn("jembi_api_errors", r.wsgi_request.session)
self.assertEqual(r.wsgi_request.session["jembi_api_errors"], 1)
@responses.activate
def test_check_clinic_code_multiple_errors(self):
"""
If there are multiple HTTP errors, then it should be logged so that we know
about it
"""
responses.add(responses.GET, "http://testopenhim/NCfacilityCheck", status=500)
with self.assertLogs(level="ERROR") as logs:
self.client.post(
reverse("registrations:registration-details"), {"clinic_code": "123457"}
)
self.client.post(
reverse("registrations:registration-details"), {"clinic_code": "123457"}
)
self.client.post(
reverse("registrations:registration-details"), {"clinic_code": "123457"}
)
[error_log] = logs.output
self.assertIn("Jembi API error limit reached", error_log)
@responses.activate
def test_form_success(self):
"""
Should put the form details and clinic name in the session
"""
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json",
json={"next": None, "previous": None, "results": []},
status=200,
headers={"Authorization": "Token some_token"},
)
clinic_data = {
"title": "Facility Check Nurse Connect",
"headers": [],
"rows": [["123457", "yGVQRg2PXNh", "Test Clinic"]],
"width": 3,
"height": 1,
}
responses.add(
responses.GET,
"http://testopenhim/NCfacilityCheck?"
+ urlencode({"criteria": "value:123457"}),
json=clinic_data,
status=200,
)
r = self.client.post(
reverse("registrations:registration-details"),
{"msisdn": "0820001001", "clinic_code": "123457", "consent": ["True"]},
)
self.assertRedirects(r, reverse("registrations:confirm-clinic"))
self.assertEqual(
self.client.session["registration_details"],
{"msisdn": "+27820001001", "clinic_code": "123457", "consent": ["True"]},
)
self.assertEqual(self.client.session["clinic_name"], "Test Clinic")
self.assertEqual(self.client.session["clinic_code"], "123457")
class OptinConfirmTests(TestCase):
def test_redirect_on_invalid_session(self):
"""
If there isn't a msisdn in the session, then we should redirect to the
registration details page, as the user went to this page without first going
through the registration details page.
"""
r = self.client.get(reverse("registrations:confirm-optin"))
self.assertRedirects(r, reverse("registrations:registration-details"))
def test_goes_to_clinic_confirm_on_yes(self):
"""
If "yes" is selected, we should redirect to the clinic confirmation page
"""
session = self.client.session
session["registration_details"] = {"msisdn": "+27820001001"}
session["clinic_name"] = "Test clinic"
session.save()
r = self.client.post(reverse("registrations:confirm-optin"), {"yes": ["Yes"]})
self.assertRedirects(r, reverse("registrations:confirm-clinic"))
def test_goes_to_farewell_page_on_no(self):
"""
If "no" is selected, we should redirect to a farewell page
"""
session = self.client.session
session["registration_details"] = {"msisdn": "+27820001001"}
session.save()
r = self.client.post(reverse("registrations:confirm-optin"), {"no": ["No"]})
self.assertRedirects(r, reverse("registrations:reject-optin"))
class ClinicConfirmTests(TestCase):
def test_redirect_on_invalid_session(self):
"""
If there isn't a clinic name in the session, then we should redirect to the
registration details page, as the user went to this page without first going
through the registration details page.
"""
r = self.client.get(reverse("registrations:confirm-clinic"))
self.assertRedirects(r, reverse("registrations:registration-details"))
@mock.patch("registrations.views.send_registration_to_openhim")
@mock.patch("registrations.views.send_registration_to_rapidpro")
@mock.patch("registrations.views.RegistrationConfirmClinic.get_channel")
def test_goes_to_end_on_yes(self, get_channel, _, _2):
"""
If "yes" is selected, we should set the channel and redirect to the success page
"""
get_channel.return_value = "WhatsApp"
session = self.client.session
session["clinic_name"] = "Test clinic"
session["registration_details"] = {
"msisdn": "+27820001001",
"clinic_code": "123457",
}
session["contact"] = {}
session.save()
r = self.client.post(reverse("registrations:confirm-clinic"), {"yes": ["Yes"]})
self.assertEqual(self.client.session["channel"], "WhatsApp")
self.assertRedirects(r, reverse("registrations:success"))
def test_goes_to_homepage_no(self):
"""
If "no" is selected, we should redirect to the registration details page, set
the clinic code error message, and remove the clinic code from the initial
form values
"""
session = self.client.session
session["clinic_name"] = "Test clinic"
session["registration_details"] = {
"msisdn": "+27820001001",
"clinic_code": "123457",
}
session.save()
r = self.client.post(reverse("registrations:confirm-clinic"), {"no": ["No"]})
self.assertEqual(
self.client.session["clinic_code_error"],
"Please re-enter your 6-digit clinic code.",
)
self.assertNotIn("clinic_code", self.client.session["registration_details"])
self.assertRedirects(r, reverse("registrations:registration-details"))
@responses.activate
@mock.patch("registrations.views.send_registration_to_openhim")
@mock.patch("registrations.views.send_registration_to_rapidpro")
def test_get_channel_whatsapp(self, _, _2):
"""
If the user has a whatsapp account, the channel should be whatsapp
"""
responses.add(
responses.POST,
"https://whatsapp.praekelt.org/v1/contacts",
json={
"contacts": [
{"input": "+27820001001", "status": "valid", "wa_id": "27820001001"}
]
},
)
session = self.client.session
session["clinic_name"] = "Test clinic"
session["registration_details"] = {
"msisdn": "+27820001001",
"clinic_code": "123457",
}
session["contact"] = {}
session.save()
r = self.client.post(reverse("registrations:confirm-clinic"), {"yes": ["Yes"]})
self.assertEqual(self.client.session["channel"], "WhatsApp")
self.assertRedirects(r, reverse("registrations:success"))
@responses.activate
@mock.patch("registrations.views.send_registration_to_openhim")
@mock.patch("registrations.views.send_registration_to_rapidpro")
def test_get_channel_sms(self, _, _2):
"""
If the user doesn't have a whatsapp account, the channel should be sms
"""
responses.add(
responses.POST,
"https://whatsapp.praekelt.org/v1/contacts",
json={"contacts": [{"input": "+27820001001", "status": "invalid"}]},
)
session = self.client.session
session["clinic_name"] = "Test clinic"
session["registration_details"] = {
"msisdn": "+27820001001",
"clinic_code": "123457",
}
session["contact"] = {}
session.save()
r = self.client.post(reverse("registrations:confirm-clinic"), {"yes": ["Yes"]})
self.assertEqual(self.client.session["channel"], "SMS")
self.assertRedirects(r, reverse("registrations:success"))
@responses.activate
def test_get_channel_error(self):
"""
If there's an error making the HTTP request, an error message should be returned
to the user, asking them to try again.
"""
responses.add(
responses.POST, "https://whatsapp.praekelt.org/v1/contacts", status=500
)
session = self.client.session
session["clinic_name"] = "Test clinic"
session["registration_details"] = {"msisdn": "+27820001001"}
session.save()
r = self.client.post(reverse("registrations:confirm-clinic"), {"yes": ["Yes"]})
[message] = get_messages(r.wsgi_request)
self.assertEqual(
str(message),
"There was an error creating your registration. Please try again.",
)
@responses.activate
def test_get_channel_multiple_errors(self):
"""
If there are multiple HTTP errors, then it should be logged so that we know
about it
"""
responses.add(
responses.POST, "https://whatsapp.praekelt.org/v1/contacts", status=500
)
session = self.client.session
session["clinic_name"] = "Test clinic"
session["registration_details"] = {"msisdn": "+27820001001"}
session.save()
with self.assertLogs(level="ERROR") as logs:
self.client.post(reverse("registrations:confirm-clinic"), {"yes": ["Yes"]})
self.client.post(reverse("registrations:confirm-clinic"), {"yes": ["Yes"]})
self.client.post(reverse("registrations:confirm-clinic"), {"yes": ["Yes"]})
[error_log] = logs.output
self.assertIn("WhatsApp API error limit reached", error_log)
@responses.activate
def test_correct_info_sent_to_openhim(self):
"""
Check that the correct values for the registration are being sent to the OpenHIM
API.
"""
response_data = self.get_rp_responses_data()
contact_list = {
"next": None,
"previous": None,
"results": [response_data["contact_data"]],
}
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"urn": "tel:+27820001001"}),
json=contact_list,
)
responses.add(responses.POST, "http://testopenhim/nc/subscription")
timestamp = datetime(2019, 1, 1).timestamp()
channel = "WhatsApp"
msisdn = "+27820001001"
clinic_code = "123457"
contact_persal = "testpersal"
contact_sanc = "testsanc"
registered_by = "+27820001002"
eid = str(uuid.uuid4())
send_registration_to_openhim(
(msisdn, "89341938-7c98-4c8e-bc9d-7cd8c9cfc468"),
registered_by,
channel,
clinic_code,
contact_persal,
contact_sanc,
timestamp,
eid,
)
[call] = responses.calls
self.assertEqual(
json.loads(call.request.body),
{
"mha": 1,
"swt": 7,
"type": 7,
"cmsisdn": "+27820001001",
"dmsisdn": "+27820001002",
"rmsisdn": None,
"faccode": "123457",
"id": "27820001001^^^ZAF^TEL",
"dob": None,
"persal": "testpersal",
"sanc": "testsanc",
"encdate": "20190101000000",
"sid": "89341938-7c98-4c8e-bc9d-7cd8c9cfc468",
"eid": eid,
},
)
self.assertEqual(
call.request.headers["Authorization"], "Basic UkVQTEFDRU1FOlJFUExBQ0VNRQ=="
)
def get_rp_responses_data(self):
"""
Returns data to be used for resposes to RapidPro requests in multiple tests.
"""
contact_data = {
"uuid": "89341938-7c98-4c8e-bc9d-7cd8c9cfc468",
"name": "Test User",
"language": None,
"urns": ["tel:+27820001001", "whatsapp:27820001001"],
"groups": [],
"fields": {
"persal": None,
"opt_out_date": None,
"registered_by": "+27820001002",
"facility_code": "123457",
"registration_date": "2019-01-01T00:00:00.000000Z",
"preferred_channel": "whatsapp",
"sanc": None,
},
"blocked": None,
"stopped": None,
"created_on": "2019-01-01T00:00:00.000000Z",
"modified_on": "2019-01-01T00:00:00.000000Z",
}
flows_data = {
"next": None,
"previous": None,
"results": [
{
"uuid": "9766a4c2-12c3-4eeb-9e39-912662918a9c",
"name": "Post Registration",
"type": "message",
"archived": False,
"labels": [],
"expires": 10080,
"runs": {
"active": 0,
"completed": 1,
"interrupted": 0,
"expired": 0,
},
"created_on": "2019-04-09T09:25:01.532016Z",
"modified_on": "2019-04-09T09:32:12.657544Z",
}
],
}
flow_start_data = {
"uuid": "09d23a05-47fe-11e4-bfe9-b8f6b119e9ab",
"flow": {
"uuid": "9766a4c2-12c3-4eeb-9e39-912662918a9c",
"name": "Post Registration",
},
"groups": [],
"contacts": [{"uuid": "89341938-7c98-4c8e-bc9d-7cd8c9cfc468", "name": ""}],
"restart_participants": False,
"status": "complete",
"extra": {},
"created_on": "2013-08-19T19:11:21.082Z",
"modified_on": "2013-08-19T19:11:21.082Z",
}
return {
"contact_data": contact_data,
"flows_data": flows_data,
"flow_start_data": flow_start_data,
}
@responses.activate
def test_registration_created_for_existing_contact(self):
"""
Check that the correct information is being sent to RapidPro to create
the registration.
"""
contact_list_data = {
"next": None,
"previous": None,
"results": [
{
"uuid": "89341938-7c98-4c8e-bc9d-7cd8c9cfc468",
"name": "Test User",
"language": None,
"urns": ["tel:+27820001001", "whatsapp:27820001001"],
"groups": [],
"fields": {
"persal": None,
"opt_out_date": None,
"registered_by": "+27820001002",
"facility_code": "123457",
"registration_date": "2019-01-01T00:00:00.000000Z",
"preferred_channel": "whatsapp",
"sanc": None,
},
"blocked": None,
"stopped": None,
"created_on": "2019-01-01T00:00:00.000000Z",
"modified_on": "2019-01-01T00:00:00.000000Z",
}
],
}
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"urn": "tel:+27820001001"}),
json=contact_list_data,
)
response_data = self.get_rp_responses_data()
responses.add(
responses.POST,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"uuid": "89341938-7c98-4c8e-bc9d-7cd8c9cfc468"}),
json=response_data["contact_data"],
)
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/flows.json?",
json=response_data["flows_data"],
)
responses.add(
responses.POST,
"https://test.rapidpro/api/v2/flow_starts.json",
json=response_data["flow_start_data"],
)
timestamp = datetime(2019, 1, 1).timestamp()
channel = "WhatsApp"
msisdn = "+27820001001"
clinic_code = "123457"
registered_by = "+27820001002"
contact = {
"uuid": "89341938-7c98-4c8e-bc9d-7cd8c9cfc468",
"fields": {"persal": "testpersal", "sanc": "testsanc"},
}
contact_info = send_registration_to_rapidpro(
contact, msisdn, registered_by, channel, clinic_code, timestamp
)
[rp_call_1, rp_contact_call, rp_call_3, rp_flow_start_call] = responses.calls
self.assertEqual(
json.loads(rp_contact_call.request.body),
{
"fields": {
"preferred_channel": "whatsapp",
"registered_by": "+27820001002",
"facility_code": "123457",
"registration_date": "2019-01-01T00:00:00.000000Z",
"reg_source": "mobi-site",
}
},
)
self.assertEqual(
json.loads(rp_flow_start_call.request.body),
{
"flow": "9766a4c2-12c3-4eeb-9e39-912662918a9c",
"contacts": ["89341938-7c98-4c8e-bc9d-7cd8c9cfc468"],
},
)
self.assertEqual(contact_info, (msisdn, "89341938-7c98-4c8e-bc9d-7cd8c9cfc468"))
@responses.activate
def test_registration_created_for_new_contact(self):
"""
Check that the correct information is being sent to RapidPro to create
the registration.
"""
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/contacts.json?"
+ urlencode({"urn": "tel:+27820001001"}),
json={"next": None, "previous": None, "results": []},
)
response_data = self.get_rp_responses_data()
responses.add(
responses.POST,
"https://test.rapidpro/api/v2/contacts.json?",
json=response_data["contact_data"],
)
responses.add(
responses.GET,
"https://test.rapidpro/api/v2/flows.json?",
json=response_data["flows_data"],
)
responses.add(
responses.POST,
"https://test.rapidpro/api/v2/flow_starts.json",
json=response_data["flow_start_data"],
)
responses.add(responses.POST, "http://testopenhim/nc/subscription")
timestamp = datetime(2019, 1, 1).timestamp()
channel = "WhatsApp"
msisdn = "+27820001001"
clinic_code = "123457"
registered_by = "+27820001002"
contact = {}
contact_info = send_registration_to_rapidpro(
contact, msisdn, registered_by, channel, clinic_code, timestamp
)
[rp_call_1, rp_contact_call, rp_call_3, rp_flow_start_call] = responses.calls
self.assertEqual(
json.loads(rp_contact_call.request.body),
{
"urns": ["tel:+27820001001", "whatsapp:27820001001"],
"fields": {
"preferred_channel": "whatsapp",
"registered_by": "+27820001002",
"facility_code": "123457",
"registration_date": "2019-01-01T00:00:00.000000Z",
"reg_source": "mobi-site",
},
},
)
self.assertEqual(
json.loads(rp_flow_start_call.request.body),
{
"flow": "9766a4c2-12c3-4eeb-9e39-912662918a9c",
"contacts": ["89341938-7c98-4c8e-bc9d-7cd8c9cfc468"],
},
)
self.assertEqual(contact_info, (msisdn, "89341938-7c98-4c8e-bc9d-7cd8c9cfc468"))
class RegistrationSuccessTests(TestCase):
def test_redirect_to_clinic_confirm(self):
"""
If there is no channel defined, we should redirect to the clinic confirmation
"""
r = self.client.get(reverse("registrations:success"))
# The confirm-clinic view also redirects because there is no clinic name
self.assertRedirects(
r, reverse("registrations:confirm-clinic"), target_status_code=302
)
def test_clears_session(self):
"""
If the channel is defined, it should place the channel in the context, and clear
the session data
"""
session = self.client.session
session["channel"] = "WhatsApp"
session["foo"] = "bar"
session["registration_details"] = {"msisdn": "+27820001001"}
session.save()
r = self.client.get(reverse("registrations:success"))
self.assertContains(r, "Thank you")
self.assertEqual(r.context["channel"], "WhatsApp")
self.assertEqual(sorted(self.client.session.keys()), [])
def test_referral_link(self):
"""
After a successful registration, it should display the user's referral link
"""
session = self.client.session
session["channel"] = "WhatsApp"
session["foo"] = "bar"
session["registration_details"] = {"msisdn": "+27820001001"}
session.save()
r = self.client.get(reverse("registrations:success"))
referral = ReferralLink.objects.get(msisdn="+27820001001")
self.assertContains(r, referral.path)
self.assertEqual(r.context["channel"], "WhatsApp")
self.assertEqual(sorted(self.client.session.keys()), [])
| 0 | 32,192 | 92 |
ac03bbe228ac3b2173aa9a8e83fe86907dfacbc4 | 3,070 | py | Python | Hubitat Presence from Unifi/Unifi-Presence.py | cesquib/python-scripts | bd1a56d8814debc8afcaf31ac3c8d447704f5ff1 | [
"MIT"
] | null | null | null | Hubitat Presence from Unifi/Unifi-Presence.py | cesquib/python-scripts | bd1a56d8814debc8afcaf31ac3c8d447704f5ff1 | [
"MIT"
] | null | null | null | Hubitat Presence from Unifi/Unifi-Presence.py | cesquib/python-scripts | bd1a56d8814debc8afcaf31ac3c8d447704f5ff1 | [
"MIT"
] | null | null | null | #imports
import csv
import json
import requests
import requests.utils
import requests.sessions
import urllib3
import sys
import traceback
import configparser
import logging
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
config = configparser.ConfigParser()
config.read('config.ini')
if __name__ == '__main__':
main() | 35.697674 | 171 | 0.671987 | #imports
import csv
import json
import requests
import requests.utils
import requests.sessions
import urllib3
import sys
import traceback
import configparser
import logging
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
config = configparser.ConfigParser()
config.read('config.ini')
def main():
#open CSV file as configured in config.ini file.
with open(config['common']['devicelist']) as csvfile:
devices=csv.DictReader(csvfile)
#iterate through each row of the CSV file; perforing necessary presence actions.
for row in devices:
logging.debug("Processing " + row['name'])
wireless_connected = unifistatus(row['mac'])
current_presence = getpresence(row['deviceid'])
if wireless_connected == True and current_presence == 'not present':
updated_presence = setpresence(row['deviceid'],'arrived')
elif wireless_connected == False and current_presence == 'present':
updated_presence = setpresence(row['deviceid'],'departed')
def unifisession():
#Tested.
logging.debug("Getting session for initial logon to unifi controller ")
url = config['unifi']['baseurl'] + "/api/login"
auth_header = {'username': config['unifi']['user'], 'password': config['unifi']['pass']}
unifi_session = requests.Session()
try:
r = unifi_session.post(url,verify=False,data=json.dumps(auth_header))
except requests.exceptions.RequestException as e:
logging.error(e)
return r
def unifistatus(deviceMAC):
#Tested.
#let's get our login session...
session_unifi = unifisession()
url = config['unifi']['baseurl'] + "/api/s/" + config['unifi']['site'] + "/stat/sta"
logging.debug("Device MAC sent: " + deviceMAC)
try:
r = requests.get(url,verify=False,cookies=session_unifi.cookies)
except requests.exceptions.RequestException as e:
logging.error(e)
json_result = r.json()
status = any(sd['mac']==deviceMAC for sd in json_result['data'])
session_unifi.cookies.clear()
return status
def setpresence(deviceID,status):
url = config['hubitat']['baseurl'] + '/' + config['hubitat']['maker_api'] + '/devices/' + deviceID + '/' + status + '?access_token=' + config['hubitat']['maker_token']
try:
r = requests.get(url,verify=False)
except requests.exceptions.RequestException as e:
logging.error(e)
print(r.status_code)
return True
def getpresence(deviceID):
url = config['hubitat']['baseurl'] + '/' + config['hubitat']['maker_api'] + '/devices/' + deviceID + '?access_token=' + config['hubitat']['maker_token']
try:
r = requests.get(url,verify=False)
except requests.exceptions.RequestException as e:
logging.error(e)
json_result = r.json()
presence = json_result['attributes'][0]['currentValue']
return presence
if __name__ == '__main__':
main() | 2,491 | 0 | 123 |
4bccd7428316015be16c38509ae34fe303dc319d | 1,812 | py | Python | bip32utils/Base58.py | lyndsysimon/bip32utils | 56f5a56d1c54e648f35b670a87efabbca08fffae | [
"MIT"
] | 40 | 2017-09-05T21:34:05.000Z | 2022-03-22T01:03:24.000Z | bip32utils/Base58.py | deployed/bip32utils | 85c46714b580978da396d7984c466ec14e15b675 | [
"MIT"
] | 1 | 2019-08-13T11:55:32.000Z | 2019-08-13T12:07:05.000Z | bip32utils/Base58.py | deployed/bip32utils | 85c46714b580978da396d7984c466ec14e15b675 | [
"MIT"
] | 20 | 2017-05-27T19:27:49.000Z | 2022-02-05T10:04:34.000Z | #!/usr/bin/env python
#
# Copyright 2014 Corgan Labs
# See LICENSE.txt for distribution terms
#
from hashlib import sha256
__base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__base58_radix = len(__base58_alphabet)
def __string_to_int(data):
"Convert string of bytes Python integer, MSB"
val = 0
for (i, c) in enumerate(data[::-1]):
val += (256**i)*ord(c)
return val
def encode(data):
"Encode string into Bitcoin base58"
enc = ''
val = __string_to_int(data)
while val >= __base58_radix:
val, mod = divmod(val, __base58_radix)
enc = __base58_alphabet[mod] + enc
if val:
enc = __base58_alphabet[val] + enc
# Pad for leading zeroes
n = len(data)-len(data.lstrip('\0'))
return __base58_alphabet[0]*n + enc
def check_encode(raw):
"Encode raw string into Bitcoin base58 with checksum"
chk = sha256(sha256(raw).digest()).digest()[:4]
return encode(raw+chk)
def decode(data):
"Decode Bitcoin base58 format to string"
val = 0
for (i, c) in enumerate(data[::-1]):
val += __base58_alphabet.find(c) * (__base58_radix**i)
dec = ''
while val >= 256:
val, mod = divmod(val, 256)
dec = chr(mod) + dec
if val:
dec = chr(val) + dec
return dec
def check_decode(enc):
"Decode string from Bitcoin base58 and test checksum"
dec = decode(enc)
raw, chk = dec[:-4], dec[-4:]
if chk != sha256(sha256(raw).digest()).digest()[:4]:
raise ValueError("base58 decoding checksum error")
else:
return raw
if __name__ == '__main__':
assert(__base58_radix == 58)
data = 'now is the time for all good men to come to the aid of their country'
enc = check_encode(data)
assert(check_decode(enc) == data)
| 25.521127 | 81 | 0.639625 | #!/usr/bin/env python
#
# Copyright 2014 Corgan Labs
# See LICENSE.txt for distribution terms
#
from hashlib import sha256
__base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__base58_radix = len(__base58_alphabet)
def __string_to_int(data):
"Convert string of bytes Python integer, MSB"
val = 0
for (i, c) in enumerate(data[::-1]):
val += (256**i)*ord(c)
return val
def encode(data):
"Encode string into Bitcoin base58"
enc = ''
val = __string_to_int(data)
while val >= __base58_radix:
val, mod = divmod(val, __base58_radix)
enc = __base58_alphabet[mod] + enc
if val:
enc = __base58_alphabet[val] + enc
# Pad for leading zeroes
n = len(data)-len(data.lstrip('\0'))
return __base58_alphabet[0]*n + enc
def check_encode(raw):
"Encode raw string into Bitcoin base58 with checksum"
chk = sha256(sha256(raw).digest()).digest()[:4]
return encode(raw+chk)
def decode(data):
"Decode Bitcoin base58 format to string"
val = 0
for (i, c) in enumerate(data[::-1]):
val += __base58_alphabet.find(c) * (__base58_radix**i)
dec = ''
while val >= 256:
val, mod = divmod(val, 256)
dec = chr(mod) + dec
if val:
dec = chr(val) + dec
return dec
def check_decode(enc):
"Decode string from Bitcoin base58 and test checksum"
dec = decode(enc)
raw, chk = dec[:-4], dec[-4:]
if chk != sha256(sha256(raw).digest()).digest()[:4]:
raise ValueError("base58 decoding checksum error")
else:
return raw
if __name__ == '__main__':
assert(__base58_radix == 58)
data = 'now is the time for all good men to come to the aid of their country'
enc = check_encode(data)
assert(check_decode(enc) == data)
| 0 | 0 | 0 |
e68b75a7178eff6526f15122e0220fdc0f0ab014 | 2,078 | py | Python | tests/test_iterators.py | johnnoone/aiodisque | afb6851ac907783a69b4b2e5c09456ae48a1faba | [
"MIT"
] | null | null | null | tests/test_iterators.py | johnnoone/aiodisque | afb6851ac907783a69b4b2e5c09456ae48a1faba | [
"MIT"
] | null | null | null | tests/test_iterators.py | johnnoone/aiodisque | afb6851ac907783a69b4b2e5c09456ae48a1faba | [
"MIT"
] | null | null | null | import pytest
from aiodisque import Disque, Job
from aiodisque.iterators import JobsIterator
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| 28.861111 | 80 | 0.622714 | import pytest
from aiodisque import Disque, Job
from aiodisque.iterators import JobsIterator
@pytest.mark.asyncio
async def test_queues(node, event_loop):
client = Disque(node.port, loop=event_loop)
expected = set()
for i in range(0, 256):
res = await client.addjob('q', 'job-%s' % i, 5000, replicate=1, retry=0)
expected.add(res)
it = client.getjob_iter('q', nohang=True)
results = set()
async for job in it:
results.add(job.id)
assert results == expected
assert isinstance(it, JobsIterator)
@pytest.mark.asyncio
async def test_queues_count(node, event_loop):
client = Disque(node.port, loop=event_loop)
expected = set()
for i in range(0, 256):
res = await client.addjob('q', 'job-%s' % i, 5000, replicate=1, retry=0)
expected.add(res)
it = client.getjob_iter('q', nohang=True, count=2)
results = set()
async for jobs in it:
results.update(job.id for job in jobs)
assert results == expected
assert isinstance(it, JobsIterator)
@pytest.mark.asyncio
async def test_queues_padding(node, event_loop):
client = Disque(node.port, loop=event_loop)
for i in range(0, 4):
await client.addjob('q', 'job-%s' % i, 5000, replicate=1, retry=0)
count = 0
it = client.getjob_iter('q', nohang=True, count=3, padding=True)
async for j1, j2, j3 in it:
if count == 0:
assert isinstance(j1, Job)
assert isinstance(j2, Job)
assert isinstance(j3, Job)
elif count == 1:
assert isinstance(j1, Job)
assert j2 is None
assert j3 is None
else:
break
count += 1
@pytest.mark.asyncio
async def test_queues_padding_missing(node, event_loop):
client = Disque(node.port, loop=event_loop)
for i in range(0, 2):
await client.addjob('q', 'job-%s' % i, 5000, replicate=1, retry=0)
with pytest.raises(ValueError):
it = client.getjob_iter('q', nohang=True, count=3)
async for j1, j2, j3 in it:
pass
| 1,805 | 0 | 88 |
ffe22a8de5457916618077063b2da92df6b4ce0b | 6,009 | py | Python | Yatube/hw05_final/posts/tests/test_views.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | 3 | 2020-11-18T05:16:30.000Z | 2021-03-08T06:36:01.000Z | Yatube/hw05_final/posts/tests/test_views.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | null | null | null | Yatube/hw05_final/posts/tests/test_views.py | abi83/YaPractice | 1c3a5670ee2f872d4f872623a392755318b893b5 | [
"MIT"
] | 1 | 2021-01-20T12:41:48.000Z | 2021-01-20T12:41:48.000Z | import os
from shutil import rmtree
from uuid import uuid1
from django.conf import settings
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, Client
from django.urls import reverse
from posts.models import Post, Group, User, Comment
| 38.273885 | 79 | 0.587951 | import os
from shutil import rmtree
from uuid import uuid1
from django.conf import settings
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, Client
from django.urls import reverse
from posts.models import Post, Group, User, Comment
class ViewsTests(TestCase):
@classmethod
def setUpClass(cls):
"""
Making unauthorised client
Creating 'First Post', 'First Group' and 'FirstUser'
MEDIA_ROOT was change for test files delete in tearDown method
"""
super().setUpClass()
test_view_media_root = os.path.join(settings.MEDIA_ROOT,
'test_temp_files')
try:
os.mkdir(test_view_media_root)
except FileExistsError:
pass
settings.MEDIA_ROOT = test_view_media_root
cls.unauthorized_client = Client()
cls.first_user = User.objects.create_user(
username=str(uuid1()),
first_name=str(uuid1()),
last_name=str(uuid1()),
)
cls.first_group = Group.objects.create(
title=str(uuid1()),
slug='first_slug',
description=str(uuid1()),
)
cls.first_post = Post.objects.create(
text=str(uuid1()),
group=cls.first_group,
author=cls.first_user,
)
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04'
b'\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02'
b'\x02\x4c\x01\x00\x3b'
)
cls.first_post.image = SimpleUploadedFile(
name=str(uuid1()) + '.gif',
content=small_gif,
content_type='image/gif')
cls.first_post.save()
cls.post_check_urls = [
reverse('posts'),
reverse('post', args=[cls.first_post.author, cls.first_post.pk]),
reverse('group-posts', args=[cls.first_group.slug]),
reverse('profile', args=[cls.first_user.username]),
]
@classmethod
def tearDownClass(cls):
"""
Cleaning up temporary files after test
Set MEDIA_ROOT to default back
"""
super().tearDownClass()
rmtree(settings.MEDIA_ROOT)
settings.MEDIA_ROOT = os.path.join(settings.BASE_DIR, 'media')
def test_post_view_on_all_pages(self):
"""
Checking if First Post content is available
at posts page, author profile page, simple post and group pages
"""
cache.clear()
for url in self.post_check_urls:
with self.subTest(url=url):
response = self.unauthorized_client.get(url)
self.assertTrue(
(self.first_post == response.context['post']) or
(self.first_post in response.context['posts']),
f'Page {url} dosnt contains post text')
def test_img_tag_on_all_pages(self):
"""
Checks <img> tag with "card-img" class in list of pages
"""
cache.clear()
for url in self.post_check_urls:
with self.subTest(url=url):
response = self.unauthorized_client.get(url)
self.assertContains(response, '<img class="card-img"')
def test_cache_index_page(self):
"""
Checking correct cache work on index page
"""
response_one = self.unauthorized_client.get(reverse('posts'))
Post.objects.create(text='Cache check', author=self.first_user)
response_two = self.unauthorized_client.get(reverse('posts'))
cache.clear()
response_three = self.unauthorized_client.get(reverse('posts'))
self.assertEqual(response_one.content, response_two.content,
'Cache doesnt work')
self.assertNotEqual(response_two.content, response_three.content,
'Couldnt clean the cache')
def test_groups_page(self):
"""
Testing first_group appears on groups page
"""
response = self.unauthorized_client.get(reverse('groups'))
self.assertContains(response, self.first_group.title)
self.assertContains(response, self.first_group.description)
def test_authors_page(self):
"""
Testing first_user appears on authors page
"""
response = self.unauthorized_client.get(reverse('authors'))
self.assertContains(response, self.first_user.first_name)
self.assertContains(response, self.first_user.last_name)
def test_unauthorised_user_new_comment_redirect(self):
"""
Checking post anf get methods requests for add-comment page
with unauthorised client
"""
target_url = reverse('login')+'?next='+reverse(
'add-comment', args=[self.first_post.author.username,
self.first_post.pk])
comments_count = Comment.objects.all().count()
responses = {
'get': self.unauthorized_client.get(
reverse('add-comment', args=[
self.first_post.author.username,
self.first_post.pk]),
follow=False),
'post': self.unauthorized_client.post(
reverse('add-comment', args=[
self.first_post.author.username,
self.first_post.pk]),
{'text': 'Test unauthorized user new comment'},
follow=False)}
for response in responses.values():
with self.subTest(response=response):
self.assertRedirects(
response,
target_url,
status_code=302,
target_status_code=200,
msg_prefix=f'Redirect for {response} fails')
self.assertEqual(comments_count, Comment.objects.all().count())
| 0 | 5,668 | 23 |
2d9a451ecbb99e4ca2b66ca8136964aa76800625 | 1,291 | py | Python | P1.Outliers/Ex1. Code.py | khaledxmust/Statistical-Projects | 2aa832a13f9d9ee9e21db7ea12b151b092baa86a | [
"MIT"
] | null | null | null | P1.Outliers/Ex1. Code.py | khaledxmust/Statistical-Projects | 2aa832a13f9d9ee9e21db7ea12b151b092baa86a | [
"MIT"
] | null | null | null | P1.Outliers/Ex1. Code.py | khaledxmust/Statistical-Projects | 2aa832a13f9d9ee9e21db7ea12b151b092baa86a | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = np.loadtxt('Data1.txt')
dataset = pd.DataFrame({'No.':data[:]})
dataset.sort_values('No.',inplace=True)
dataset.hist(bins=50) # Exploring data
plt.show()
dataset.boxplot(vert=False)
plt.show()
Q1=np.percentile(dataset, [25]) # Calculating Quartiles
Q2=np.percentile(dataset, [50])
Q3=np.percentile(dataset, [75])
Iqr=np.percentile(dataset, [75])-np.percentile(dataset, [25])
print("1st quartile:",Q1,"\n2nd quartile:",Q2,"\n3rd quartile:",Q3)
print("Inter-quartile range:",Iqr)
x1= 1.5 * Iqr # Calculating Boundary for Outlier
x2= 3 * Iqr # Calculating Boundary for Extreme Outlier
w1= Q1 - x1 #Setting Outlier Whisker
w2= Q3 + x1
Ew1= Q1 - x2 # Setting Extreme Outlier Whisker
Ew2= Q3 + x2
o =[] # Outliers points
Eo=[] # Extreme Outlier points
for i in range(len(dataset)):
if dataset['No.'][i] >= w2 and dataset['No.'][i] <= Ew2:
o.append(dataset['No.'][i])
if dataset['No.'][i] <= w1 and dataset['No.'][i] >= Ew1:
o.append(dataset['No.'][i])
if dataset['No.'][i] >= Ew2 or dataset['No.'][i] <= Ew1 :
Eo.append(dataset['No.'][i])
print("Outlier points: ", len(o))
print("Extreme Outlier points: ", len(Eo))
| 30.738095 | 68 | 0.62897 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = np.loadtxt('Data1.txt')
dataset = pd.DataFrame({'No.':data[:]})
dataset.sort_values('No.',inplace=True)
dataset.hist(bins=50) # Exploring data
plt.show()
dataset.boxplot(vert=False)
plt.show()
Q1=np.percentile(dataset, [25]) # Calculating Quartiles
Q2=np.percentile(dataset, [50])
Q3=np.percentile(dataset, [75])
Iqr=np.percentile(dataset, [75])-np.percentile(dataset, [25])
print("1st quartile:",Q1,"\n2nd quartile:",Q2,"\n3rd quartile:",Q3)
print("Inter-quartile range:",Iqr)
x1= 1.5 * Iqr # Calculating Boundary for Outlier
x2= 3 * Iqr # Calculating Boundary for Extreme Outlier
w1= Q1 - x1 #Setting Outlier Whisker
w2= Q3 + x1
Ew1= Q1 - x2 # Setting Extreme Outlier Whisker
Ew2= Q3 + x2
o =[] # Outliers points
Eo=[] # Extreme Outlier points
for i in range(len(dataset)):
if dataset['No.'][i] >= w2 and dataset['No.'][i] <= Ew2:
o.append(dataset['No.'][i])
if dataset['No.'][i] <= w1 and dataset['No.'][i] >= Ew1:
o.append(dataset['No.'][i])
if dataset['No.'][i] >= Ew2 or dataset['No.'][i] <= Ew1 :
Eo.append(dataset['No.'][i])
print("Outlier points: ", len(o))
print("Extreme Outlier points: ", len(Eo))
| 0 | 0 | 0 |
9ce6a950623dfbf3e3a314b5f2f838f2509e8d15 | 1,455 | py | Python | utilities/point_manipulation.py | vibinash/vision | 7d775d6a877412c963965ecca2eea71ee2def007 | [
"MIT"
] | null | null | null | utilities/point_manipulation.py | vibinash/vision | 7d775d6a877412c963965ecca2eea71ee2def007 | [
"MIT"
] | null | null | null | utilities/point_manipulation.py | vibinash/vision | 7d775d6a877412c963965ecca2eea71ee2def007 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
| 30.3125 | 67 | 0.588316 | import cv2
import numpy as np
def order_points(pts):
# Initialize a rectangular result list in this order
# (top-left, top-right, bottom-right, bottom-left)
result = np.zeros((4,2), dtype='float32')
# find the top-left and bottom-right
s = pts.sum(axis=1)
result[0] = pts[np.argmin(s)]
result[2] = pts[np.argmax(s)]
# fdin
d = np.diff(pts, axis=1)
result[1] = pts[np.argmin(d)]
result[3] = pts[np.argmax(d)]
return result
def transform_edge_points(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
# calculate the max height of the new image
heigthA = int(np.sqrt((tl[1] - bl[1])**2 + (tl[0] - bl[0])**2))
heigthB = int(np.sqrt((tr[1] - br[1])**2 + (tr[0] - br[0])**2))
max_height = max(heigthA, heigthB)
# calculate the max width of the new image
widthA = int(np.sqrt((tl[1] - tr[1])**2 + (tl[0] - tr[0])**2))
widthB = int(np.sqrt((bl[1] - br[1])**2 + (bl[0] - br[0])**2))
max_width = max(widthA, widthB)
# construct the top-down view of the image
result = np.array([
[0,0], # top-left
[max_width -1, 0], # top-right
[max_width -1, max_height -1], # bottom-right
[0, max_width -1]], dtype = 'float32'
)
# compute the persective transform matrix
M = cv2.getPerspectiveTransform(rect, result)
warped = cv2.warpPerspective(image, M, (max_width, max_height))
return warped
| 1,379 | 0 | 46 |
7c671ba8bfb5fabe18275bc55ac690769709901e | 1,416 | py | Python | ROAR/planning_module/local_planner/local_planner.py | RyanC1681/RCAI1122 | c9683110b58c255a7a78d880ff73df7ff2329405 | [
"Apache-2.0"
] | 18 | 2020-10-16T00:38:55.000Z | 2022-03-03T06:01:49.000Z | ROAR/planning_module/local_planner/local_planner.py | Jaish567/ROAR | 75b0bc819abbe676f518070da3fa8043422c7cb7 | [
"Apache-2.0"
] | 20 | 2020-07-23T03:50:50.000Z | 2021-11-09T04:00:26.000Z | ROAR/planning_module/local_planner/local_planner.py | Jaish567/ROAR | 75b0bc819abbe676f518070da3fa8043422c7cb7 | [
"Apache-2.0"
] | 140 | 2019-11-20T22:46:02.000Z | 2022-03-29T13:26:17.000Z | from abc import abstractmethod
from ROAR.planning_module.abstract_planner import AbstractPlanner
from ROAR.control_module.controller import Controller
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.mission_planner import MissionPlanner
from typing import Optional
from ROAR.utilities_module.vehicle_models import VehicleControl
from collections import deque
| 32.930233 | 82 | 0.68291 | from abc import abstractmethod
from ROAR.planning_module.abstract_planner import AbstractPlanner
from ROAR.control_module.controller import Controller
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.mission_planner import MissionPlanner
from typing import Optional
from ROAR.utilities_module.vehicle_models import VehicleControl
from collections import deque
class LocalPlanner(AbstractPlanner):
def __init__(
self,
agent,
controller: Optional[Controller] = None,
behavior_planner: Optional[BehaviorPlanner] = None,
mission_planner: Optional[MissionPlanner] = None,
**kwargs
):
super().__init__(agent=agent, **kwargs)
self.controller = (
Controller(agent=agent) if controller is None else controller
)
self.behavior_planner = (
BehaviorPlanner(agent=agent)
if behavior_planner is None
else behavior_planner
)
self.mission_planner = (
MissionPlanner(agent=agent)
if mission_planner is None
else mission_planner
)
self.way_points_queue = deque()
@abstractmethod
def is_done(self):
return False
@abstractmethod
def run_in_series(self) -> VehicleControl:
return VehicleControl()
| 821 | 135 | 23 |
97da7aec4eb5fdc0db5ef2ecdb8ecc9a5b223165 | 15,172 | py | Python | preprocess_recovered_hormuud_messages.py | AfricasVoices/Project-RVI-Election | 78c88e98584e89330bb286ca01c32c1ae03c88eb | [
"MIT"
] | null | null | null | preprocess_recovered_hormuud_messages.py | AfricasVoices/Project-RVI-Election | 78c88e98584e89330bb286ca01c32c1ae03c88eb | [
"MIT"
] | 2 | 2022-03-07T10:03:20.000Z | 2022-03-15T11:45:32.000Z | preprocess_recovered_hormuud_messages.py | AfricasVoices/Project-RVI-Election | 78c88e98584e89330bb286ca01c32c1ae03c88eb | [
"MIT"
] | null | null | null | import argparse
import csv
import re
from datetime import datetime, timedelta
from decimal import Decimal
import pytz
from core_data_modules.logging import Logger
from dateutil.parser import isoparse
from rapid_pro_tools.rapid_pro_client import RapidProClient
from storage.google_cloud import google_cloud_utils
log = Logger(__name__)
TARGET_SHORTCODE = "378"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Uses Rapid Pro's message logs to filter a Hormuud recovery csv for incoming messages on this "
"short code that aren't in Rapid Pro. Attempts to identify messages that have already been "
"received in Rapid Pro by (i) looking for exact text matches, then (ii) looking for matches after "
"applying Excel's data-mangling algorithms, then (iii) matching by timestamp. "
"Matches made by method (iii) are exported for manual review")
parser.add_argument("google_cloud_credentials_file_path", metavar="google-cloud-credentials-file-path",
help="Path to a Google Cloud service account credentials file to use to access the "
"credentials bucket")
parser.add_argument("rapid_pro_domain", metavar="rapid-pro-domain",
help="URL of the Rapid Pro server to download data from")
parser.add_argument("rapid_pro_token_file_url", metavar="rapid-pro-token-file-url",
help="GS URL of a text file containing the authorisation token for the Rapid Pro server")
parser.add_argument("start_date", metavar="start-date",
help="Timestamp to filter both datasets by (inclusive), as an ISO8601 str")
parser.add_argument("end_date", metavar="end-date",
help="Timestamp to filter both datasets by (exclusive), as an ISO8601 str")
parser.add_argument("hormuud_csv_input_path", metavar="hormuud-csv-input-path",
help="Path to a CSV file issued by Hormuud to recover messages from")
parser.add_argument("timestamp_matches_log_output_csv_path", metavar="timestamp-matches-log-output-csv-path",
help="File to log the matches made between the Rapid Pro and recovery datasets by timestamp, "
"for manual review and approval")
parser.add_argument("output_csv_path", metavar="output-csv-path",
help="File to write the filtered, recovered data to, in a format ready for de-identification "
"and integration into the pipeline")
args = parser.parse_args()
google_cloud_credentials_file_path = args.google_cloud_credentials_file_path
rapid_pro_domain = args.rapid_pro_domain
rapid_pro_token_file_url = args.rapid_pro_token_file_url
start_date = isoparse(args.start_date)
end_date = isoparse(args.end_date)
hormuud_csv_input_path = args.hormuud_csv_input_path
timestamp_matches_log_output_csv_path = args.timestamp_matches_log_output_csv_path
output_csv_path = args.output_csv_path
# Get messages from Rapid Pro and from the recovery csv
rapid_pro_messages = get_incoming_hormuud_messages_from_rapid_pro(
google_cloud_credentials_file_path, rapid_pro_domain, rapid_pro_token_file_url,
created_after_inclusive=start_date,
created_before_exclusive=end_date,
)
all_rapid_pro_messages = rapid_pro_messages
recovered_messages = get_incoming_hormuud_messages_from_recovery_csv(
hormuud_csv_input_path, received_after_inclusive=start_date, received_before_exclusive=end_date
)
# Group the messages by the sender's urn, and store in container dicts where we can write the best matching Rapid
# Pro message to when we find it.
recovered_lut = dict() # of urn -> list of recovered message dict
recovered_messages.sort(key=lambda msg: msg["timestamp"])
for msg in recovered_messages:
urn = msg["Sender"]
if urn not in recovered_lut:
recovered_lut[urn] = []
recovered_lut[urn].append({
"recovered_message": msg,
"rapid_pro_message": None
})
# Search the recovered messages for exact text matches to each of the Rapid Pro messages.
# A Rapid Pro message matches a message in the recovery csv if:
# (i) the recovery csv message has no match yet,
# (ii) the text exactly matches, and
# (iii) the time at Hormuud differs from the time at Rapid Pro by < 5 minutes (experimental analysis of this
# dataset showed the mean lag to be roughly 3-4 mins, with >99.99% of messages received within 4 minutes)
log.info(f"Attempting to match the Rapid Pro messages with the recovered messages...")
rapid_pro_messages.sort(key=lambda msg: msg.sent_on)
unmatched_messages = []
skipped_messages = []
for rapid_pro_msg in rapid_pro_messages:
rapid_pro_text = rapid_pro_msg.text
if rapid_pro_msg.urn not in recovered_lut:
log.warning(f"URN {rapid_pro_msg.urn} not found in the recovered_lut")
skipped_messages.append(rapid_pro_msg)
continue
for recovery_item in recovered_lut[rapid_pro_msg.urn]:
if recovery_item["rapid_pro_message"] is None and \
recovery_item["recovered_message"]["Message"] == rapid_pro_text and \
rapid_pro_msg.sent_on - recovery_item["recovered_message"]["timestamp"] < timedelta(minutes=5):
recovery_item["rapid_pro_message"] = rapid_pro_msg
break
else:
unmatched_messages.append(rapid_pro_msg)
log.info(f"Attempted to perform exact matches for {len(rapid_pro_messages)} Rapid Pro messages: "
f"{len(rapid_pro_messages) - len(unmatched_messages)} matched successfully, "
f"{len(skipped_messages)} messages skipped due to their urns not being present in the recovery csv, "
f"{len(unmatched_messages)} unmatched messages remain")
# Attempt to find matches after simulating Excel-mangling of some of the data.
rapid_pro_messages = unmatched_messages
unmatched_messages = []
for rapid_pro_msg in rapid_pro_messages:
rapid_pro_text = rapid_pro_msg.text
rapid_pro_text = rapid_pro_text.replace("\n", " ") # newlines -> spaces
if re.compile("^\\s*[0-9][0-9]*\\s*$").match(rapid_pro_text):
rapid_pro_text = rapid_pro_text.strip() # numbers with whitespace -> just the number
if rapid_pro_text.startswith("0"):
rapid_pro_text = rapid_pro_text[1:] # replace leading 0
if Decimal(rapid_pro_text) > 1000000000:
rapid_pro_text = f"{Decimal(rapid_pro_text):.14E}" # big numbers -> scientific notation
if re.compile("^\".*\"$").match(rapid_pro_text):
rapid_pro_text = rapid_pro_text.replace("\"", "") # strictly quoted text -> just the text
rapid_pro_text = rapid_pro_text.encode("ascii", "replace").decode("ascii") # non-ascii characters -> '?'
for recovery_item in recovered_lut[rapid_pro_msg.urn]:
if recovery_item["rapid_pro_message"] is None and \
recovery_item["recovered_message"]["Message"] == rapid_pro_text and \
rapid_pro_msg.sent_on - recovery_item["recovered_message"]["timestamp"] < timedelta(minutes=5):
recovery_item["rapid_pro_message"] = rapid_pro_msg
break
else:
unmatched_messages.append(rapid_pro_msg)
log.info(f"Attempted to perform Excel-mangled matches for {len(rapid_pro_messages)} Rapid Pro messages: "
f"{len(rapid_pro_messages) - len(unmatched_messages)} matched successfully, "
f"{len(unmatched_messages)} unmatched messages remain")
# Finally, search by timestamp, and export these to a log file for manual review.
# This covers all sorts of weird edge cases, mostly around Hormuud/Excel's handling of special characters.
rapid_pro_messages = unmatched_messages
unmatched_messages = []
with open(timestamp_matches_log_output_csv_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=["Rapid Pro", "Hormuud Recovery"])
writer.writeheader()
for rapid_pro_msg in rapid_pro_messages:
for recovery_item in recovered_lut[rapid_pro_msg.urn]:
if recovery_item["rapid_pro_message"] is None and \
rapid_pro_msg.sent_on - recovery_item["recovered_message"]["timestamp"] < timedelta(minutes=5):
writer.writerow({
"Rapid Pro": rapid_pro_msg.text,
"Hormuud Recovery": recovery_item["recovered_message"]["Message"]
})
recovery_item["rapid_pro_message"] = rapid_pro_msg
break
else:
unmatched_messages.append(rapid_pro_msg)
log.info(f"Attempted to perform timestamp matching for {len(rapid_pro_messages)} Rapid Pro messages: "
f"{len(rapid_pro_messages) - len(unmatched_messages)} matched successfully, "
f"{len(unmatched_messages)} unmatched messages remain")
log.info(f"Wrote the timestamp-based matches to {timestamp_matches_log_output_csv_path} for manual verification. "
f"Please check these carefully")
if len(unmatched_messages) > 0:
log.error(f"{len(unmatched_messages)} unmatched messages remain after attempting all automated matching "
f"techniques")
print(unmatched_messages[0].serialize())
exit(1)
# Get the recovered messages that don't have a matching message from Rapid Pro
unmatched_recovered_messages = []
matched_recovered_messages = []
for urn in recovered_lut:
for recovery_item in recovered_lut[urn]:
if recovery_item["rapid_pro_message"] is None:
unmatched_recovered_messages.append(recovery_item["recovered_message"])
else:
matched_recovered_messages.append(recovery_item["recovered_message"])
log.info(f"Found {len(unmatched_recovered_messages)} recovered messages that had no match in Rapid Pro "
f"(and {len(matched_recovered_messages)} that did have a match)")
expected_unmatched_messages_count = len(recovered_messages) - len(all_rapid_pro_messages) + len(skipped_messages)
log.info(f"Total expected unmatched messages was {expected_unmatched_messages_count}")
if expected_unmatched_messages_count != len(unmatched_recovered_messages):
log.error("Number of unmatched messages != expected number of unmatched messages")
exit(1)
# Export to a csv that can be processed by de_identify_csv.py
log.info(f"Exporting unmatched recovered messages to {output_csv_path}")
with open(output_csv_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=["Sender", "Receiver", "Message", "ReceivedOn"])
writer.writeheader()
for msg in unmatched_recovered_messages:
writer.writerow({
"Sender": msg["Sender"],
"Receiver": msg["Receiver"],
"Message": msg["Message"],
"ReceivedOn": msg["ReceivedOn"]
})
| 54.971014 | 124 | 0.680925 | import argparse
import csv
import re
from datetime import datetime, timedelta
from decimal import Decimal
import pytz
from core_data_modules.logging import Logger
from dateutil.parser import isoparse
from rapid_pro_tools.rapid_pro_client import RapidProClient
from storage.google_cloud import google_cloud_utils
log = Logger(__name__)
TARGET_SHORTCODE = "378"
def get_incoming_hormuud_messages_from_rapid_pro(google_cloud_credentials_file_path, rapid_pro_domain,
rapid_pro_token_file_url,
created_after_inclusive=None, created_before_exclusive=None):
log.info("Downloading Rapid Pro access token...")
rapid_pro_token = google_cloud_utils.download_blob_to_string(
google_cloud_credentials_file_path, rapid_pro_token_file_url).strip()
rapid_pro = RapidProClient(rapid_pro_domain, rapid_pro_token)
all_messages = rapid_pro.get_raw_messages(
created_after_inclusive=created_after_inclusive,
created_before_exclusive=created_before_exclusive,
ignore_archives=True
)
log.info(f"Downloaded {len(all_messages)} messages")
log.info(f"Filtering for messages from URNs on Hormuud's networks")
hormuud_messages = [msg for msg in all_messages if msg.urn.startswith("tel:+25261") or msg.urn.startswith("tel:+25268")]
log.info(f"Filtered for messages from URNs on Hormuud's networks: {len(hormuud_messages)} messages remain")
log.info(f"Filtering for incoming messages")
incoming_hormuud_messages = [msg for msg in hormuud_messages if msg.direction == "in"]
log.info(f"Filtered for incoming messages: {len(incoming_hormuud_messages)} remain")
return incoming_hormuud_messages
def get_incoming_hormuud_messages_from_recovery_csv(csv_path,
received_after_inclusive=None, received_before_exclusive=None):
log.info(f"Loading recovered messages from Hormuud csv at {csv_path}...")
all_recovered_messages = []
with open(csv_path) as f:
reader = csv.DictReader(f)
for line in reader:
all_recovered_messages.append(line)
log.info(f"Loaded {len(all_recovered_messages)} messages")
log.info(f"Filtering for messages sent to the target short code {TARGET_SHORTCODE}...")
incoming_recovered_messages = [msg for msg in all_recovered_messages if msg["Receiver"] == TARGET_SHORTCODE]
log.info(f"Filtered for messages sent to the target short code {TARGET_SHORTCODE}: "
f"{len(incoming_recovered_messages)} recovered messages remain")
log.info(f"Standardising fieldnames")
for msg in incoming_recovered_messages:
msg["Sender"] = "tel:+" + msg["Sender"]
# Convert times with a try/catch because there are two possible formats due to the omission of ms when ms == 000
try:
msg["timestamp"] = pytz.timezone("Africa/Mogadishu").localize(
datetime.strptime(msg["ReceivedOn"], "%d/%m/%Y %H:%M:%S.%f")
)
except ValueError:
msg["timestamp"] = pytz.timezone("Africa/Mogadishu").localize(
datetime.strptime(msg["ReceivedOn"], "%d/%m/%Y %H:%M:%S")
)
if received_after_inclusive is not None:
log.info(f"Filtering out messages sent before {received_after_inclusive}...")
incoming_recovered_messages = [msg for msg in incoming_recovered_messages
if msg["timestamp"] >= received_after_inclusive]
log.info(f"Filtered out messages sent before {received_after_inclusive}: "
f"{len(incoming_recovered_messages)} messages remain")
if received_before_exclusive is not None:
log.info(f"Filtering out messages sent after {received_before_exclusive}...")
incoming_recovered_messages = [msg for msg in incoming_recovered_messages
if msg["timestamp"] < received_before_exclusive]
log.info(f"Filtered out messages sent after {received_before_exclusive}: "
f"{len(incoming_recovered_messages)} messages remain")
return incoming_recovered_messages
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Uses Rapid Pro's message logs to filter a Hormuud recovery csv for incoming messages on this "
"short code that aren't in Rapid Pro. Attempts to identify messages that have already been "
"received in Rapid Pro by (i) looking for exact text matches, then (ii) looking for matches after "
"applying Excel's data-mangling algorithms, then (iii) matching by timestamp. "
"Matches made by method (iii) are exported for manual review")
parser.add_argument("google_cloud_credentials_file_path", metavar="google-cloud-credentials-file-path",
help="Path to a Google Cloud service account credentials file to use to access the "
"credentials bucket")
parser.add_argument("rapid_pro_domain", metavar="rapid-pro-domain",
help="URL of the Rapid Pro server to download data from")
parser.add_argument("rapid_pro_token_file_url", metavar="rapid-pro-token-file-url",
help="GS URL of a text file containing the authorisation token for the Rapid Pro server")
parser.add_argument("start_date", metavar="start-date",
help="Timestamp to filter both datasets by (inclusive), as an ISO8601 str")
parser.add_argument("end_date", metavar="end-date",
help="Timestamp to filter both datasets by (exclusive), as an ISO8601 str")
parser.add_argument("hormuud_csv_input_path", metavar="hormuud-csv-input-path",
help="Path to a CSV file issued by Hormuud to recover messages from")
parser.add_argument("timestamp_matches_log_output_csv_path", metavar="timestamp-matches-log-output-csv-path",
help="File to log the matches made between the Rapid Pro and recovery datasets by timestamp, "
"for manual review and approval")
parser.add_argument("output_csv_path", metavar="output-csv-path",
help="File to write the filtered, recovered data to, in a format ready for de-identification "
"and integration into the pipeline")
args = parser.parse_args()
google_cloud_credentials_file_path = args.google_cloud_credentials_file_path
rapid_pro_domain = args.rapid_pro_domain
rapid_pro_token_file_url = args.rapid_pro_token_file_url
start_date = isoparse(args.start_date)
end_date = isoparse(args.end_date)
hormuud_csv_input_path = args.hormuud_csv_input_path
timestamp_matches_log_output_csv_path = args.timestamp_matches_log_output_csv_path
output_csv_path = args.output_csv_path
# Get messages from Rapid Pro and from the recovery csv
rapid_pro_messages = get_incoming_hormuud_messages_from_rapid_pro(
google_cloud_credentials_file_path, rapid_pro_domain, rapid_pro_token_file_url,
created_after_inclusive=start_date,
created_before_exclusive=end_date,
)
all_rapid_pro_messages = rapid_pro_messages
recovered_messages = get_incoming_hormuud_messages_from_recovery_csv(
hormuud_csv_input_path, received_after_inclusive=start_date, received_before_exclusive=end_date
)
# Group the messages by the sender's urn, and store in container dicts where we can write the best matching Rapid
# Pro message to when we find it.
recovered_lut = dict() # of urn -> list of recovered message dict
recovered_messages.sort(key=lambda msg: msg["timestamp"])
for msg in recovered_messages:
urn = msg["Sender"]
if urn not in recovered_lut:
recovered_lut[urn] = []
recovered_lut[urn].append({
"recovered_message": msg,
"rapid_pro_message": None
})
# Search the recovered messages for exact text matches to each of the Rapid Pro messages.
# A Rapid Pro message matches a message in the recovery csv if:
# (i) the recovery csv message has no match yet,
# (ii) the text exactly matches, and
# (iii) the time at Hormuud differs from the time at Rapid Pro by < 5 minutes (experimental analysis of this
# dataset showed the mean lag to be roughly 3-4 mins, with >99.99% of messages received within 4 minutes)
log.info(f"Attempting to match the Rapid Pro messages with the recovered messages...")
rapid_pro_messages.sort(key=lambda msg: msg.sent_on)
unmatched_messages = []
skipped_messages = []
for rapid_pro_msg in rapid_pro_messages:
rapid_pro_text = rapid_pro_msg.text
if rapid_pro_msg.urn not in recovered_lut:
log.warning(f"URN {rapid_pro_msg.urn} not found in the recovered_lut")
skipped_messages.append(rapid_pro_msg)
continue
for recovery_item in recovered_lut[rapid_pro_msg.urn]:
if recovery_item["rapid_pro_message"] is None and \
recovery_item["recovered_message"]["Message"] == rapid_pro_text and \
rapid_pro_msg.sent_on - recovery_item["recovered_message"]["timestamp"] < timedelta(minutes=5):
recovery_item["rapid_pro_message"] = rapid_pro_msg
break
else:
unmatched_messages.append(rapid_pro_msg)
log.info(f"Attempted to perform exact matches for {len(rapid_pro_messages)} Rapid Pro messages: "
f"{len(rapid_pro_messages) - len(unmatched_messages)} matched successfully, "
f"{len(skipped_messages)} messages skipped due to their urns not being present in the recovery csv, "
f"{len(unmatched_messages)} unmatched messages remain")
# Attempt to find matches after simulating Excel-mangling of some of the data.
rapid_pro_messages = unmatched_messages
unmatched_messages = []
for rapid_pro_msg in rapid_pro_messages:
rapid_pro_text = rapid_pro_msg.text
rapid_pro_text = rapid_pro_text.replace("\n", " ") # newlines -> spaces
if re.compile("^\\s*[0-9][0-9]*\\s*$").match(rapid_pro_text):
rapid_pro_text = rapid_pro_text.strip() # numbers with whitespace -> just the number
if rapid_pro_text.startswith("0"):
rapid_pro_text = rapid_pro_text[1:] # replace leading 0
if Decimal(rapid_pro_text) > 1000000000:
rapid_pro_text = f"{Decimal(rapid_pro_text):.14E}" # big numbers -> scientific notation
if re.compile("^\".*\"$").match(rapid_pro_text):
rapid_pro_text = rapid_pro_text.replace("\"", "") # strictly quoted text -> just the text
rapid_pro_text = rapid_pro_text.encode("ascii", "replace").decode("ascii") # non-ascii characters -> '?'
for recovery_item in recovered_lut[rapid_pro_msg.urn]:
if recovery_item["rapid_pro_message"] is None and \
recovery_item["recovered_message"]["Message"] == rapid_pro_text and \
rapid_pro_msg.sent_on - recovery_item["recovered_message"]["timestamp"] < timedelta(minutes=5):
recovery_item["rapid_pro_message"] = rapid_pro_msg
break
else:
unmatched_messages.append(rapid_pro_msg)
log.info(f"Attempted to perform Excel-mangled matches for {len(rapid_pro_messages)} Rapid Pro messages: "
f"{len(rapid_pro_messages) - len(unmatched_messages)} matched successfully, "
f"{len(unmatched_messages)} unmatched messages remain")
# Finally, search by timestamp, and export these to a log file for manual review.
# This covers all sorts of weird edge cases, mostly around Hormuud/Excel's handling of special characters.
rapid_pro_messages = unmatched_messages
unmatched_messages = []
with open(timestamp_matches_log_output_csv_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=["Rapid Pro", "Hormuud Recovery"])
writer.writeheader()
for rapid_pro_msg in rapid_pro_messages:
for recovery_item in recovered_lut[rapid_pro_msg.urn]:
if recovery_item["rapid_pro_message"] is None and \
rapid_pro_msg.sent_on - recovery_item["recovered_message"]["timestamp"] < timedelta(minutes=5):
writer.writerow({
"Rapid Pro": rapid_pro_msg.text,
"Hormuud Recovery": recovery_item["recovered_message"]["Message"]
})
recovery_item["rapid_pro_message"] = rapid_pro_msg
break
else:
unmatched_messages.append(rapid_pro_msg)
log.info(f"Attempted to perform timestamp matching for {len(rapid_pro_messages)} Rapid Pro messages: "
f"{len(rapid_pro_messages) - len(unmatched_messages)} matched successfully, "
f"{len(unmatched_messages)} unmatched messages remain")
log.info(f"Wrote the timestamp-based matches to {timestamp_matches_log_output_csv_path} for manual verification. "
f"Please check these carefully")
if len(unmatched_messages) > 0:
log.error(f"{len(unmatched_messages)} unmatched messages remain after attempting all automated matching "
f"techniques")
print(unmatched_messages[0].serialize())
exit(1)
# Get the recovered messages that don't have a matching message from Rapid Pro
unmatched_recovered_messages = []
matched_recovered_messages = []
for urn in recovered_lut:
for recovery_item in recovered_lut[urn]:
if recovery_item["rapid_pro_message"] is None:
unmatched_recovered_messages.append(recovery_item["recovered_message"])
else:
matched_recovered_messages.append(recovery_item["recovered_message"])
log.info(f"Found {len(unmatched_recovered_messages)} recovered messages that had no match in Rapid Pro "
f"(and {len(matched_recovered_messages)} that did have a match)")
expected_unmatched_messages_count = len(recovered_messages) - len(all_rapid_pro_messages) + len(skipped_messages)
log.info(f"Total expected unmatched messages was {expected_unmatched_messages_count}")
if expected_unmatched_messages_count != len(unmatched_recovered_messages):
log.error("Number of unmatched messages != expected number of unmatched messages")
exit(1)
# Export to a csv that can be processed by de_identify_csv.py
log.info(f"Exporting unmatched recovered messages to {output_csv_path}")
with open(output_csv_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=["Sender", "Receiver", "Message", "ReceivedOn"])
writer.writeheader()
for msg in unmatched_recovered_messages:
writer.writerow({
"Sender": msg["Sender"],
"Receiver": msg["Receiver"],
"Message": msg["Message"],
"ReceivedOn": msg["ReceivedOn"]
})
| 3,788 | 0 | 46 |
f44356cc7275597e8fb3e5cb12dc91edf393188f | 11,478 | py | Python | platform/polycommon/tests/test_conf/test_option_service.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | platform/polycommon/tests/test_conf/test_option_service.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | platform/polycommon/tests/test_conf/test_option_service.py | erexer/polyaxon | be14dae1ed56d568983388736bcdaf27a7baa4a4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import TestCase
from django.conf import settings
from polyaxon import types
from polycommon.conf.exceptions import ConfException
from polycommon.conf.service import ConfService
from polycommon.options.option import Option, OptionScope, OptionStores
from polycommon.options.option_manager import OptionManager
| 33.858407 | 88 | 0.688796 | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import TestCase
from django.conf import settings
from polyaxon import types
from polycommon.conf.exceptions import ConfException
from polycommon.conf.service import ConfService
from polycommon.options.option import Option, OptionScope, OptionStores
from polycommon.options.option_manager import OptionManager
class DummySettingsService(ConfService):
def __init__(self):
self.options = set([])
super().__init__()
def get(self, key, check_cache=True, to_dict=False):
self.options.add(key)
return super().get(key, check_cache=check_cache, to_dict=to_dict)
class DummyEnvService(ConfService):
def __init__(self):
self.options = set([])
super().__init__()
def get(self, key, check_cache=True, to_dict=False):
self.options.add(key)
return super().get(key, check_cache=check_cache, to_dict=to_dict)
class DummySettingsOption(Option):
key = "FOO_BAR"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = OptionStores.SETTINGS
typing = types.STR
default = None
options = None
class DummyOptionalDefaultSettingsOption(Option):
key = "FOO_BAR2"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = OptionStores.SETTINGS
typing = types.STR
default = "default_settings"
options = None
class DummyNonOptionalSettingsOption(Option):
key = "FOO_BAR2"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = False
is_list = False
store = OptionStores.SETTINGS
typing = types.STR
default = None
options = None
class DummyEnvOption(Option):
key = "FOO_BAR"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = OptionStores.ENV
typing = types.STR
default = None
options = None
cache_ttl = 0
class DummyOptionalDefaultEnvOption(Option):
key = "FOO_BAR2"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = OptionStores.ENV
typing = types.STR
default = "default_env"
options = None
class DummyNonOptionalEnvOption(Option):
key = "FOO_BAR2"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = False
is_list = False
store = OptionStores.ENV
typing = types.STR
default = None
options = None
class DummyBoolEnvOption(Option):
key = "BOOL_KEY"
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = OptionStores.ENV
typing = types.BOOL
default = True
options = None
class TestConfService(TestCase):
def setUp(self):
super().setUp()
self.settings_service = DummySettingsService()
self.env_service = DummyEnvService()
self.settings_service.option_manager = OptionManager()
self.env_service.option_manager = OptionManager()
self.settings_service.setup()
self.env_service.setup()
def test_can_handle(self):
# Test handles only str event types
assert self.settings_service.can_handle(key=1) is False
# The service's manager did not subscribe to the event yet
assert self.settings_service.can_handle(key=DummySettingsOption.key) is False
# Subscribe to the event
self.settings_service.option_manager.subscribe(DummySettingsOption)
assert self.settings_service.can_handle(key=DummySettingsOption.key) is True
def test_non_optional_settings(self):
with self.assertRaises(ConfException):
self.settings_service.get(key=DummyNonOptionalSettingsOption.key)
# Subscribe to the event
self.settings_service.option_manager.subscribe(DummyNonOptionalSettingsOption)
with self.assertRaises(ConfException):
self.settings_service.get(key=DummyNonOptionalSettingsOption.key)
def test_non_optional_env(self):
with self.assertRaises(ConfException):
self.env_service.get(key=DummyNonOptionalEnvOption.key)
# Subscribe to the event
self.env_service.option_manager.subscribe(DummyNonOptionalEnvOption)
with self.assertRaises(ConfException):
self.env_service.get(key=DummyNonOptionalEnvOption.key)
def test_optional_with_default_settings(self):
with self.assertRaises(ConfException):
self.settings_service.get(key=DummyOptionalDefaultSettingsOption.key)
# Subscribe to the event
self.settings_service.option_manager.subscribe(
DummyOptionalDefaultSettingsOption
)
assert (
self.settings_service.get(key=DummyOptionalDefaultSettingsOption.key)
== "default_settings"
)
def test_optional_with_default_env(self):
with self.assertRaises(ConfException):
self.env_service.get(key=DummyOptionalDefaultEnvOption.key)
# Subscribe to the event
self.env_service.option_manager.subscribe(DummyOptionalDefaultEnvOption)
assert (
self.env_service.get(key=DummyOptionalDefaultEnvOption.key) == "default_env"
)
def test_get_from_settings(self):
settings.FOO_BAR = None
# The service's manager did not subscribe to the event yet
with self.assertRaises(ConfException):
self.settings_service.get(key=DummySettingsOption.key)
# Subscribe
self.settings_service.option_manager.subscribe(DummySettingsOption)
# No entry in settings
assert self.settings_service.get(key=DummySettingsOption.key) is None
# Update settings
settings.FOO_BAR = "foo"
assert self.settings_service.get(key=DummySettingsOption.key) == "foo"
# Get as option
option_dict = DummySettingsOption.to_dict(value="foo")
assert option_dict["value"] == "foo"
assert (
self.settings_service.get(key=DummySettingsOption.key, to_dict=True)
== option_dict
)
assert len(self.settings_service.options) == 1
option_key = self.settings_service.options.pop()
assert option_key == DummySettingsOption.key
def test_get_from_env(self):
# The service's manager did not subscribe to the event yet
with self.assertRaises(ConfException):
self.env_service.get(key=DummyEnvOption.key)
# Subscribe
self.env_service.option_manager.subscribe(DummyEnvOption)
# No entry in env
assert self.env_service.get(key=DummyEnvOption.key) is None
# Update settings does not change anything
settings.FOO_BAR = "foo"
assert self.env_service.get(key=DummyEnvOption.key) is None
# Update env
os.environ[DummyEnvOption.key] = "foo"
assert self.env_service.get(key=DummyEnvOption.key) == "foo"
# Get as option
option_dict = DummyEnvOption.to_dict(value="foo")
assert option_dict["value"] == "foo"
assert self.env_service.get(key=DummyEnvOption.key, to_dict=True) == option_dict
assert len(self.env_service.options) == 1
option_key = self.env_service.options.pop()
assert option_key == DummyEnvOption.key
# Get bool options
self.env_service.option_manager.subscribe(DummyBoolEnvOption)
option_dict = DummyBoolEnvOption.to_dict(value=True)
assert option_dict["value"] is True
assert (
self.env_service.get(key=DummyBoolEnvOption.key, to_dict=True)
== option_dict
)
option_dict = DummyBoolEnvOption.to_dict(value=False)
assert option_dict["value"] is False
os.environ[DummyBoolEnvOption.key] = "false"
assert (
self.env_service.get(key=DummyBoolEnvOption.key, to_dict=True)
== option_dict
)
def test_option_caching(self):
os.environ.pop(DummyEnvOption.key, None)
# Subscribe
self.env_service.option_manager.subscribe(DummyEnvOption)
# No entry in env
assert self.env_service.get(key=DummyEnvOption.key) is None
# Update env
os.environ[DummyEnvOption.key] = "foo"
assert self.env_service.get(key=DummyEnvOption.key) == "foo"
# Cache is 0, changing the value should be reflected automatically
os.environ[DummyEnvOption.key] = "bar"
assert self.env_service.get(key=DummyEnvOption.key) == "bar"
# Update caching ttl
DummyEnvOption.cache_ttl = 10
assert self.env_service.get(key=DummyEnvOption.key) == "bar"
os.environ[DummyEnvOption.key] = "foo"
assert self.env_service.get(key=DummyEnvOption.key) == "bar"
assert self.env_service.get(key=DummyEnvOption.key, check_cache=False) == "foo"
# Delete remove from cache
DummyEnvOption.cache_ttl = 0
self.env_service.delete(key=DummyEnvOption.key)
assert self.env_service.get(key=DummyEnvOption.key) is None
# Set new value
os.environ[DummyEnvOption.key] = "foo"
# Update caching ttl
DummyEnvOption.cache_ttl = 0
assert self.env_service.get(key=DummyEnvOption.key) == "foo"
os.environ[DummyEnvOption.key] = "bar"
assert self.env_service.get(key=DummyEnvOption.key) == "bar"
os.environ[DummyEnvOption.key] = "foo"
assert self.env_service.get(key=DummyEnvOption.key) == "foo"
def test_setting_none_value_raises(self):
with self.assertRaises(ConfException):
self.settings_service.set(key="SOME_NEW_KEY", value=None)
with self.assertRaises(ConfException):
self.env_service.set(key="SOME_NEW_KEY", value=None)
def test_setting_unknown_key_raises(self):
with self.assertRaises(ConfException):
self.settings_service.set(key="SOME_NEW_KEY", value="foo_bar")
with self.assertRaises(ConfException):
self.env_service.set(key="SOME_NEW_KEY", value="foo_bar")
def test_cannot_set_keys_on_settings_backend(self):
with self.assertRaises(ConfException):
self.settings_service.set(key=DummySettingsOption.key, value="foo_bar")
# Subscribe
self.settings_service.option_manager.subscribe(DummySettingsOption)
with self.assertRaises(ConfException):
self.settings_service.set(key=DummySettingsOption.key, value="foo_bar")
def test_cannot_delete_keys_on_settings_backend(self):
with self.assertRaises(ConfException):
self.settings_service.delete(key=DummySettingsOption.key)
# Subscribe
self.settings_service.option_manager.subscribe(DummySettingsOption)
with self.assertRaises(ConfException):
self.settings_service.delete(key=DummySettingsOption.key)
| 8,164 | 1,677 | 686 |
bad371a5670df77a6cc1c85725b2410b85c36255 | 210 | py | Python | tests/conftest.py | ynikitenko/lena | d0fbae47f21007685edbd4e77bc91413421bebd1 | [
"Apache-2.0"
] | 4 | 2020-03-01T14:01:48.000Z | 2021-02-23T19:33:36.000Z | tests/conftest.py | ynikitenko/lena | d0fbae47f21007685edbd4e77bc91413421bebd1 | [
"Apache-2.0"
] | 1 | 2021-05-09T15:47:17.000Z | 2021-05-09T16:12:03.000Z | tests/conftest.py | ynikitenko/lena | d0fbae47f21007685edbd4e77bc91413421bebd1 | [
"Apache-2.0"
] | null | null | null | try:
import ROOT
except ImportError:
collect_ignore_glob = ["*/root/*"]
# otherwise will have problems either with tox,
# or when executing pytest directly
collect_ignore_glob += ["root/*"]
| 26.25 | 51 | 0.680952 | try:
import ROOT
except ImportError:
collect_ignore_glob = ["*/root/*"]
# otherwise will have problems either with tox,
# or when executing pytest directly
collect_ignore_glob += ["root/*"]
| 0 | 0 | 0 |
40cab5c47f0af4516f0ed2cdf6a7aaed78088be1 | 1,013 | py | Python | leave/migrations/0005_auto_20211121_0757.py | PriyanshBordia/LNMIIT-Leave-Management | 279464f4c3e3103d4edadc161f5efa027bca9bbd | [
"MIT"
] | 1 | 2022-03-06T19:39:10.000Z | 2022-03-06T19:39:10.000Z | leave/migrations/0005_auto_20211121_0757.py | PriyanshBordia/LNMIIT-Leave-Management-System | 279464f4c3e3103d4edadc161f5efa027bca9bbd | [
"MIT"
] | null | null | null | leave/migrations/0005_auto_20211121_0757.py | PriyanshBordia/LNMIIT-Leave-Management-System | 279464f4c3e3103d4edadc161f5efa027bca9bbd | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-21 07:57
from django.db import migrations, models
| 34.931034 | 305 | 0.615992 | # Generated by Django 3.2.9 on 2021-11-21 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leave', '0004_rename_reschedules_date_application_rescheduled_date'),
]
operations = [
migrations.AddField(
model_name='person',
name='department',
field=models.CharField(choices=[('CSE', 'Computer Science and Engineering'), ('ECE', 'Electronics and Communication Engineering'), ('ME', 'Mechanical-Mechatronics Engineering'), ('HSS', 'Humanities and Social Sciences'), ('MH', 'Mathematics'), ('PH', 'Physics')], default='CSE', max_length=3),
),
migrations.AlterField(
model_name='application',
name='rescheduled_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='person',
name='office_no',
field=models.IntegerField(default='00'),
),
]
| 0 | 899 | 23 |
f9d790a69517924bbd6d8395aea26be9e72e920b | 9,475 | py | Python | humpday/objectives/classic.py | MDCHAMP/humpday | 45e2cea95ae951d991ebc6c1e98314cc8c726f25 | [
"MIT"
] | 53 | 2021-02-13T01:17:02.000Z | 2022-03-16T10:07:29.000Z | humpday/objectives/classic.py | MDCHAMP/humpday | 45e2cea95ae951d991ebc6c1e98314cc8c726f25 | [
"MIT"
] | 16 | 2021-02-13T17:42:06.000Z | 2022-03-06T10:08:50.000Z | humpday/objectives/classic.py | MDCHAMP/humpday | 45e2cea95ae951d991ebc6c1e98314cc8c726f25 | [
"MIT"
] | 12 | 2020-12-09T03:16:22.000Z | 2022-02-23T09:34:00.000Z | import numpy as np
import math
from humpday.objectives.deapobjectives import schwefel, schaffer, bohachevsky, griewank, rastrigin, shekel, rosenbrock
# Some test objective functions to help guide optimizer choices
# -------------------------------------------------------------
#
# We'll use DEAP's set of groovy benchmarks, and landscapes, swarmpackagepy also
#
# See pretty pictures at https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks
# Some hardness assessment is at https://github.com/nathanrooy/landscapes#available-functions-from-single_objective but we'll do our own
## Basis of tricky functions
import datetime
DAY = datetime.datetime.today().day
OFFSET = DAY / 50
POWER = 1 + (DAY % 3) / 3.0
SHIFT = DAY / 100
def smoosh(ui):
""" Distort the interval to avoid obvious minima and avoid memorization """
ui_rotate = ui + SHIFT % 1.0
ui_shift = ui_rotate + SHIFT
xi = ui_shift ** POWER
low = SHIFT ** POWER
high = (1 + SHIFT) ** POWER
yi = (xi - low) / (high - low)
return yi ** POWER
## Combinations
DEAP_OBJECTIVES = [schwefel_on_cube, rastrigin_on_cube, griewank_on_cube,
bohachevsky_on_cube, rosenbrock_on_cube, shaffer_on_cube, shekel_on_cube,
deap_combo1_on_cube, deap_combo2_on_cube, deap_combo3_on_cube]
# By hand...
def rosenbrock_modified_on_cube(u: [float]) -> float:
""" https://en.wikipedia.org/wiki/Rosenbrock_function """
u_scaled = [4 * ui - 2 for ui in u]
if len(u) == 1:
return (0.25 - u_scaled[0]) ** 2
else:
return 5 + 0.001 * np.sum(
[100 * (ui_plus - ui * ui) + (1 - ui) * (1 - ui) for ui, ui_plus in zip(u_scaled[1:], u_scaled)])
# According to http://infinity77.net/global_optimization/test_functions.html#test-functions-index
# there are some really hard ones
# See https://github.com/andyfaff/ampgo/blob/master/%20ampgo%20--username%20andrea.gavana%40gmail.com/go_benchmark.py
# See also https://arxiv.org/pdf/1308.4008v1.pdf
def damavandi_on_cube(u: [float]) -> float:
""" A trivial multi-dimensional extension of Damavandi's function """
return 0.01 * damavandi2(u[0], u[1]) - 0.46
def damavandi2(u1, u2) -> float:
""" Pretty evil function this one """
# http://infinity77.net/global_optimization/test_functions_nd_D.html#go_benchmark.Damavandi
x1 = u1 / 14.
x2 = u2 / 14.
numerator = math.sin(math.pi * (x1 - 2.0)) * math.sin(math.pi * (x2 - 2.0))
denumerator = (math.pi ** 2) * (x1 - 2.0) * (x2 - 2.0)
factor1 = 1.0 - (abs(numerator / denumerator)) ** 5.0
factor2 = 2 + (x1 - 7.0) ** 2.0 + 2 * (x2 - 7.0) ** 2.0
return factor1 * factor2
# Landscapes
from landscapes.single_objective import styblinski_tang, zakharov, salomon, rotated_hyper_ellipsoid, qing, michalewicz
LANDSCAPES_OBJECTIVES = [styblinski_tang_on_cube, zakharov_on_cube, salomon_on_cube, rotated_hyper_ellipsoid_on_cube,
qing_on_cube, michaelewicz_on_cube, landscapes_combo1_on_cube, landscapes_combo2_on_cube,
landscapes_combo3_on_cube]
# Some copied from peabox
# https://github.com/stromatolith/peabox/blob/master/peabox/peabox_testfuncs.py
# as that isn't deployed to PyPI as far as I can determine
# Adapted from https://github.com/SISDevelop/SwarmPackagePy/blob/master/SwarmPackagePy/testFunctions.py
SWARM_OBJECTIVES = [cross_on_cube, powers_on_cube,
booth_on_cube, matyas_on_cube, drop_wave_on_cube]
A_CLASSIC_OBJECTIVE = rastrigin_on_cube # Just pick one for testing
MISC_OBJECTIVES = [paviani_on_cube, damavandi_on_cube, rosenbrock_modified_on_cube, ackley_on_cube]
CLASSIC_OBJECTIVES = DEAP_OBJECTIVES + LANDSCAPES_OBJECTIVES + MISC_OBJECTIVES + SWARM_OBJECTIVES
if __name__ == "__main__":
for objective in CLASSIC_OBJECTIVES:
objective(u=[0.0, 0.5, 1.0])
objective(u=[0.0, 0.5, 0.0, 0.0, 1.0])
print(len(CLASSIC_OBJECTIVES))
| 34.9631 | 136 | 0.640633 | import numpy as np
import math
from humpday.objectives.deapobjectives import schwefel, schaffer, bohachevsky, griewank, rastrigin, shekel, rosenbrock
# Some test objective functions to help guide optimizer choices
# -------------------------------------------------------------
#
# We'll use DEAP's set of groovy benchmarks, and landscapes, swarmpackagepy also
#
# See pretty pictures at https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks
# Some hardness assessment is at https://github.com/nathanrooy/landscapes#available-functions-from-single_objective but we'll do our own
## Basis of tricky functions
import datetime
DAY = datetime.datetime.today().day
OFFSET = DAY / 50
POWER = 1 + (DAY % 3) / 3.0
SHIFT = DAY / 100
def smoosh(ui):
""" Distort the interval to avoid obvious minima and avoid memorization """
ui_rotate = ui + SHIFT % 1.0
ui_shift = ui_rotate + SHIFT
xi = ui_shift ** POWER
low = SHIFT ** POWER
high = (1 + SHIFT) ** POWER
yi = (xi - low) / (high - low)
return yi ** POWER
def schwefel_on_cube(u: [float]) -> float:
# https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks.schwefel
u_squished = [1000 * (smoosh(ui) - 0.5) for ui in u]
try:
return 0.001 * schwefel(u_squished)[0] / 0.71063
except Exception as e:
raise Exception(e)
def griewank_on_cube(u: [float]) -> float:
# https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks.griewank
u_squished = [1200 * (ui ** 1.1 - 0.5) for ui in u]
return griewank(u_squished)[0] / 0.532075
def rastrigin_on_cube(u: [float]) -> float:
# https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks.rastrigin
u_squished = [10.24 * (ui ** 1.1 - 0.5) for ui in u]
return 0.01 * rastrigin(u_squished)[0] / 0.059697
def bohachevsky_on_cube(u: [float]) -> float:
# https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks.bohachevsky
u_squished = [10 * (ui ** 1.1 - 0.5) for ui in u]
return 1.0 + bohachevsky(u_squished)[0]
def rosenbrock_on_cube(u: [float]) -> float:
# https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks.rosenbrock
u_squished = [200 * (ui ** 1.1 - 0.5) for ui in u]
return 1 + 0.1 * rosenbrock(u_squished)[0] / 0.008949
def shaffer_on_cube(u: [float]) -> float:
# https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks.schaffer
u_squished = [200 * (ui ** 1.1 - 0.5) for ui in u]
return 0.01 * schaffer(u_squished)[0] / (0.1042133 * 0.71809)
def shekel_on_cube(u: [float]) -> float:
# https://deap.readthedocs.io/en/master/api/benchmarks.html#deap.benchmarks.schaffer
n_dim = len(u)
NUMMAX = 15
A = 10 * np.random.rand(NUMMAX, n_dim)
C = np.random.rand(NUMMAX)
u_squished = [800 * (smoosh(ui) - 0.5) for ui in u]
return 1.2298 - shekel(u_squished, A, C)[0]
## Combinations
def deap_combo1_on_cube(u: [float]) -> float:
return 0.3 * (schwefel_on_cube(u) + griewank_on_cube(u) + shekel_on_cube(u)) / 1.883
def deap_combo2_on_cube(u: [float]) -> float:
return 0.5 * (shaffer_on_cube(u) + shekel_on_cube(u)) - 0.1075
def deap_combo3_on_cube(u: [float]) -> float:
return 0.5 * (rosenbrock_on_cube(u) + bohachevsky_on_cube(u) + shekel_on_cube(u)) / 1.88
DEAP_OBJECTIVES = [schwefel_on_cube, rastrigin_on_cube, griewank_on_cube,
bohachevsky_on_cube, rosenbrock_on_cube, shaffer_on_cube, shekel_on_cube,
deap_combo1_on_cube, deap_combo2_on_cube, deap_combo3_on_cube]
# By hand...
def rosenbrock_modified_on_cube(u: [float]) -> float:
""" https://en.wikipedia.org/wiki/Rosenbrock_function """
u_scaled = [4 * ui - 2 for ui in u]
if len(u) == 1:
return (0.25 - u_scaled[0]) ** 2
else:
return 5 + 0.001 * np.sum(
[100 * (ui_plus - ui * ui) + (1 - ui) * (1 - ui) for ui, ui_plus in zip(u_scaled[1:], u_scaled)])
# According to http://infinity77.net/global_optimization/test_functions.html#test-functions-index
# there are some really hard ones
# See https://github.com/andyfaff/ampgo/blob/master/%20ampgo%20--username%20andrea.gavana%40gmail.com/go_benchmark.py
# See also https://arxiv.org/pdf/1308.4008v1.pdf
def damavandi_on_cube(u: [float]) -> float:
""" A trivial multi-dimensional extension of Damavandi's function """
return 0.01 * damavandi2(u[0], u[1]) - 0.46
def damavandi2(u1, u2) -> float:
""" Pretty evil function this one """
# http://infinity77.net/global_optimization/test_functions_nd_D.html#go_benchmark.Damavandi
x1 = u1 / 14.
x2 = u2 / 14.
numerator = math.sin(math.pi * (x1 - 2.0)) * math.sin(math.pi * (x2 - 2.0))
denumerator = (math.pi ** 2) * (x1 - 2.0) * (x2 - 2.0)
factor1 = 1.0 - (abs(numerator / denumerator)) ** 5.0
factor2 = 2 + (x1 - 7.0) ** 2.0 + 2 * (x2 - 7.0) ** 2.0
return factor1 * factor2
def paviani_on_cube(u: [float]) -> float:
# http://infinity77.net/global_optimization/test_functions_nd_P.html#go_benchmark.Paviani
x = np.array([2.001 + 5.996 * smoosh(ui) for ui in u])
def safe_np_log(x):
lb = np.array([1e-6] * len(x))
xup = np.maximum(x, lb)
return np.log(xup)
return float(np.sum(safe_np_log(x - 2) ** 2.0 + safe_np_log(10.0 - x) ** 2.0) - np.prod(x) ** 0.2) / 8.6456
# Landscapes
from landscapes.single_objective import styblinski_tang, zakharov, salomon, rotated_hyper_ellipsoid, qing, michalewicz
def styblinski_tang_on_cube(u: [float]) -> float:
u_scaled = [10 * (smoosh(ui) - 0.5) for ui in u]
return 3.3499 + 0.01 * styblinski_tang(u_scaled)
def zakharov_on_cube(u: [float]) -> float:
u_scaled = [15 * smoosh(ui) - 10 for ui in u]
return 0.01 * zakharov(u_scaled) / 0.3462
def salomon_on_cube(u: [float]) -> float:
u_scaled = [200 * smoosh(ui) - 100 for ui in u]
return salomon(u_scaled) / 3.09999
def rotated_hyper_ellipsoid_on_cube(u: [float]) -> float:
u_scaled = [2 * 65.536 * smoosh(ui) - 65.536 for ui in u]
return 0.1 * rotated_hyper_ellipsoid(u_scaled)
def qing_on_cube(u: [float]) -> float:
u_scaled = [1000 * smoosh(ui) - 500 for ui in u]
return qing(u_scaled) / 0.01805
def michaelewicz_on_cube(u: [float]) -> float:
u_scaled = [4 * smoosh(ui) - 2 for ui in u]
return 1.4439 + 0.1 * michalewicz(u_scaled, m=20)
def landscapes_combo1_on_cube(u: [float]) -> float:
return (qing_on_cube(u) + michaelewicz_on_cube(u)) / (1.5744 * 1.4688)
def landscapes_combo2_on_cube(u: [float]) -> float:
return (rotated_hyper_ellipsoid_on_cube(u) + salomon_on_cube(u)) / (6.7555 * 0.82)
def landscapes_combo3_on_cube(u: [float]) -> float:
return (2 + zakharov_on_cube(u) + styblinski_tang_on_cube(u)) / 4.4329
LANDSCAPES_OBJECTIVES = [styblinski_tang_on_cube, zakharov_on_cube, salomon_on_cube, rotated_hyper_ellipsoid_on_cube,
qing_on_cube, michaelewicz_on_cube, landscapes_combo1_on_cube, landscapes_combo2_on_cube,
landscapes_combo3_on_cube]
# Some copied from peabox
# https://github.com/stromatolith/peabox/blob/master/peabox/peabox_testfuncs.py
# as that isn't deployed to PyPI as far as I can determine
def ackley_on_cube(u: [float]) -> float:
# allow parameter range -32.768<=x(i)<=32.768, global minimum at x=(0,0,...,0)
rescaled_u = [2 * 32.768 * smoosh(ui) - 32.768 for ui in u]
x = np.asfarray(rescaled_u)
ndim = len(x)
a = 20.;
b = 0.2;
c = 2. * math.pi
return (-a * np.exp(-b * np.sqrt(1. / ndim * np.sum(x ** 2))) - np.exp(
1. / ndim * np.sum(np.cos(c * x))) + a + np.exp(1.)) / 20.0
# Adapted from https://github.com/SISDevelop/SwarmPackagePy/blob/master/SwarmPackagePy/testFunctions.py
def cross_on_cube(u):
x = [5 * smoosh(ui) - 2.5 for ui in u]
return round(-0.0001 * (abs(math.sin(x[0]) * math.sin(x[1]) * math.exp(abs(100 -
math.sqrt(sum([i ** 2 for i in
x])) / math.pi))) + 1) ** 0.1,
7)
def powers_on_cube(u):
x = [5 * smoosh(ui) - 2.5 for ui in u]
return sum([abs(x[i]) ** (i + 2) for i in range(len(x))])
def booth_on_cube(u):
x = [5 * smoosh(ui) - 2.5 for ui in u]
return sum([abs(x[i]) ** (i + 2) for i in range(len(x))])
def matyas_on_cube(u):
x = [3 * smoosh(ui) - 1.5 for ui in u]
def sphere_function(x):
return sum([i ** 2 for i in x])
return 0.26 * sphere_function(x) - 0.48 * x[0] * x[1]
def drop_wave_on_cube(u):
x = [3 * smoosh(ui) - 1.5 for ui in u]
def sphere_function(x):
return sum([i ** 2 for i in x])
return -(1 + math.cos(12 * math.sqrt(sphere_function(x)))) / (0.5 * sphere_function(x) + 2)
SWARM_OBJECTIVES = [cross_on_cube, powers_on_cube,
booth_on_cube, matyas_on_cube, drop_wave_on_cube]
A_CLASSIC_OBJECTIVE = rastrigin_on_cube # Just pick one for testing
MISC_OBJECTIVES = [paviani_on_cube, damavandi_on_cube, rosenbrock_modified_on_cube, ackley_on_cube]
CLASSIC_OBJECTIVES = DEAP_OBJECTIVES + LANDSCAPES_OBJECTIVES + MISC_OBJECTIVES + SWARM_OBJECTIVES
if __name__ == "__main__":
for objective in CLASSIC_OBJECTIVES:
objective(u=[0.0, 0.5, 1.0])
objective(u=[0.0, 0.5, 0.0, 0.0, 1.0])
print(len(CLASSIC_OBJECTIVES))
| 4,902 | 0 | 598 |
0e63b5b14e70e5be11aca2384a303bb7c76120bf | 1,964 | py | Python | packages/routines/database_saver.py | robmanganelly/PyJournal | dcf0e6e69a62ad5c6019b099104ae64880825814 | [
"MIT"
] | 1 | 2021-02-02T03:58:56.000Z | 2021-02-02T03:58:56.000Z | packages/routines/database_saver.py | rlothbrock/PyJournal | e44bca524c46364a6931375d8ac3ab8b90f71ad2 | [
"MIT"
] | null | null | null | packages/routines/database_saver.py | rlothbrock/PyJournal | e44bca524c46364a6931375d8ac3ab8b90f71ad2 | [
"MIT"
] | null | null | null | import datetime
import os
import shutil
from packages.dialogs.auxiliar_dialogs import selfCloseInterface
| 38.509804 | 89 | 0.513238 | import datetime
import os
import shutil
from packages.dialogs.auxiliar_dialogs import selfCloseInterface
def database_saver_routine(self, silent=False):
database_name, saving_date, suffix = self.status.get("connected_to").split('.')[0], \
datetime.datetime.now().__str__() \
.replace('-', '') \
.replace(' ', '-') \
.replace('.', '-') \
.replace(':', ''), 'Saved-'
try:
saving_dir = os.path.join(os.pardir, 'saved databases')
os.mkdir(saving_dir)
except FileExistsError as error:
print('info: on saving dir: %s' % error)
try:
saving_dir = os.path.join(os.pardir, 'saved databases', database_name)
os.mkdir(saving_dir)
except FileExistsError as error:
print('warning on saving dir child: %s' % error)
try:
src = os.path.join(os.curdir, 'databases', '{}.db'.format(database_name))
dst = os.path.join(
os.pardir,
'saved databases',
database_name,
'{}-{}-{}'.format(suffix, database_name, saving_date)
)
shutil.copy(src, dst)
if not silent:
selfCloseInterface(
'Database {} guardada en {}'.format(database_name,dst),
title='Base de Datos Guardada')
except FileNotFoundError as fileError:
print('error: %s' % fileError)
selfCloseInterface('Fallo a la hora de guardar la base de datos',
title='Salva Fallida', alert_level=2)
# no_db_alert = MessageBox(
# lambda: print('error: %s' % fileError),
# 'the saving process has failed!!',
# 'e',
# 'DB Saving Failed',
# str(fileError)
# )
# no_db_alert.show()
return
| 1,834 | 0 | 23 |
4bf1ae882642f3041e24b1449475aac20972541d | 4,388 | py | Python | SIM/cvbridge_build_ws/src/ros_enet/src/detector.py | dlfdn9392/autonomous_driving_car_project | dc07a9e949be4bbb37c8726357ee596f74eec3da | [
"MIT"
] | 3 | 2022-02-12T08:51:37.000Z | 2022-03-21T04:30:08.000Z | SIM/cvbridge_build_ws/src/ros_enet/src/detector.py | dlfdn9392/autonomous_driving_car_project | dc07a9e949be4bbb37c8726357ee596f74eec3da | [
"MIT"
] | null | null | null | SIM/cvbridge_build_ws/src/ros_enet/src/detector.py | dlfdn9392/autonomous_driving_car_project | dc07a9e949be4bbb37c8726357ee596f74eec3da | [
"MIT"
] | 2 | 2021-10-09T08:26:19.000Z | 2022-03-09T12:44:00.000Z | #!/usr/bin/env python3
#### ros import
import rospy
import std_msgs.msg
from rospkg import RosPack
from std_msgs.msg import UInt8
from std_msgs.msg import Float32MultiArray #c
from sensor_msgs.msg import Image
from geometry_msgs.msg import Polygon, Point32
import cv2
from cv_bridge import CvBridge, CvBridgeError
# python import
import os
import argparse
import time
import math
package = RosPack()
img_size = (480, 360)
if __name__ == "__main__":
# Initialize node
rospy.init_node("detector_manager_node")
dm = DetectorManager()
| 38.156522 | 134 | 0.645624 | #!/usr/bin/env python3
#### ros import
import rospy
import std_msgs.msg
from rospkg import RosPack
from std_msgs.msg import UInt8
from std_msgs.msg import Float32MultiArray #c
from sensor_msgs.msg import Image
from geometry_msgs.msg import Polygon, Point32
import cv2
from cv_bridge import CvBridge, CvBridgeError
# python import
import os
import argparse
import time
import math
package = RosPack()
img_size = (480, 360)
class DetectorManager():
def __init__(self):
# Load image parameter and confidence threshold
self.image_topic = rospy.get_param('~image_topic', '/carla/ego_vehicle/camera/semantic_segmentation/front/image_segmentation')
# Load publisher topics
self.published_image_topic = rospy.get_param('~detections_image_topic')
self.gpu_id = rospy.get_param('~gpu_id', 0)
self.publish_image = rospy.get_param('~publish_image')
# Load CvBridge
self.bridge = CvBridge()
# Define subscribers
self.image_sub = rospy.Subscriber(self.image_topic, Image, self.imageCb, queue_size = 1, buff_size = 2**24)
self.pub_viz_ = rospy.Publisher(self.published_image_topic, Image, queue_size=10)
self.pub_input_ = rospy.Publisher('/TFF/ddpg_camera_input', Float32MultiArray, queue_size=10) #c
self.input_msg1 = Float32MultiArray()
self.input_msg2 = Float32MultiArray()
self.input_msg3 = Float32MultiArray()
self.input_msg4 = Float32MultiArray()
self.input_msg5 = Float32MultiArray()
rospy.loginfo("Launched node for object detection")
rospy.spin()
def visualize(self, img):
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, bin_img = cv2.threshold(img_gray, 91, 255, cv2.THRESH_BINARY)
kernel = np.ones((3, 3), np.uint8)
erosion_img = cv2.erode(bin_img, kernel, iterations=3)
dilation_img = cv2.dilate(erosion_img, kernel, iterations=3)
line_img = np.zeros_like(img_gray)
input1 = self.custom_draw_line(dilation_img, line_img, -1, -2, 1)
input2 = self.custom_draw_line(dilation_img, line_img, -3, -2, 2)
input3 = self.custom_draw_line(dilation_img, line_img, -3, 0, 3)
input4 = self.custom_draw_line(dilation_img, line_img, -3, 2, 4)
input5 = self.custom_draw_line(dilation_img, line_img, -1, 2, 5)
x_average = (input1[0] + input2[0] + input3[0] + input4[0] + input5[0]) // 5
x_goal_draw, y_goal_draw = (input1[0]+input2[0]+input4[0]+input5[0])//4, (input1[1]+input2[1]+input4[1]+input5[1])//4
y_goal, x_goal = (input1[0]+input2[0]+input4[0]+input5[0]-1440)//4, (input1[1]+input2[1]+input4[1]+input5[1]-960)//4
theta = math.atan2(-y_goal, x_goal)
goal_yaw = -(theta*57.3-90)
self.input_msg1.data = input1 + input2 + input3 + input4 + input5
img = cv2.addWeighted(src1=line_img, alpha=1., src2=dilation_img, beta=0.3, gamma=0.)
img[line_img.shape[0]-40:line_img.shape[0], x_average-10:x_average+10] = 150
img = cv2.line(img, (y_goal_draw, x_goal_draw), (240, 360), 245, 3)
return img
def custom_draw_line(self, dilation_img, line_img, m, n, idx):
a, b = line_img.shape[0], line_img.shape[1]//2
for i in range(0,120):
if dilation_img[(line_img.shape[0]-1) +m*i][line_img.shape[1]//2 +n*i] == 0:
a, b = ((line_img.shape[0]-1) +m*i, line_img.shape[1]//2 +n*i)
else:
line_img = cv2.line(line_img, (line_img.shape[1]//2, line_img.shape[0]), (b, a), 245, 3)
break
start_point= np.array((line_img.shape[1]//2, line_img.shape[0]))
detect_point = np.array((b, a))
dist = np.linalg.norm(start_point - detect_point)
return [a, b, dist]
def imageCb(self,frame):
frame = self.bridge.imgmsg_to_cv2(frame, "mono8")
loop_start = time.time()
frame = transform_img({'img': frame})['img']
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = self.visualize(img)
image_msg = self.bridge.cv2_to_imgmsg(img, "mono8")
self.pub_viz_.publish(image_msg)
self.pub_input_.publish(self.input_msg1) #c
if __name__ == "__main__":
# Initialize node
rospy.init_node("detector_manager_node")
dm = DetectorManager()
| 3,702 | 3 | 131 |
9c59cbd4c8733988e53305f4267a10fe2ea2df39 | 324 | py | Python | juegotruco/juegotruco/urls.py | germanferrero/truco | b073f1cbb6c44b00a3b6651e7dda0f3a419a9710 | [
"MIT"
] | null | null | null | juegotruco/juegotruco/urls.py | germanferrero/truco | b073f1cbb6c44b00a3b6651e7dda0f3a419a9710 | [
"MIT"
] | null | null | null | juegotruco/juegotruco/urls.py | germanferrero/truco | b073f1cbb6c44b00a3b6651e7dda0f3a419a9710 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^truco/', include('truco.urls', namespace="truco")),
url(r'^usuarios/', include('usuarios.urls', namespace="usuarios")),
)
| 27 | 71 | 0.685185 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^truco/', include('truco.urls', namespace="truco")),
url(r'^usuarios/', include('usuarios.urls', namespace="usuarios")),
)
| 0 | 0 | 0 |
783a5ff947b88f8438dd0c957fdf50140a48a0dd | 1,277 | py | Python | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_02.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 3 | 2021-12-30T21:56:58.000Z | 2022-02-20T11:19:12.000Z | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_02.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 10 | 2021-11-13T21:18:33.000Z | 2022-03-11T23:11:23.000Z | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_02.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 2 | 2022-02-06T11:24:43.000Z | 2022-02-09T20:13:40.000Z | #!/usr/bin/env python3
"""
Description:
Usage:
$> roslaunch turtle_nodes.launch
$> ./executive_step_02.py
Output:
[INFO] : State machine starting in initial state 'RESET' with userdata:
[]
[INFO] : State machine transitioning 'RESET':'succeeded'-->'SPAWN'
[INFO] : State machine terminating 'SPAWN':'succeeded':'succeeded'
"""
import rospy
import threading
import smach
from smach import StateMachine, ServiceState, SimpleActionState
import std_srvs.srv
import turtlesim.srv
if __name__ == '__main__':
main()
| 24.09434 | 81 | 0.643696 | #!/usr/bin/env python3
"""
Description:
Usage:
$> roslaunch turtle_nodes.launch
$> ./executive_step_02.py
Output:
[INFO] : State machine starting in initial state 'RESET' with userdata:
[]
[INFO] : State machine transitioning 'RESET':'succeeded'-->'SPAWN'
[INFO] : State machine terminating 'SPAWN':'succeeded':'succeeded'
"""
import rospy
import threading
import smach
from smach import StateMachine, ServiceState, SimpleActionState
import std_srvs.srv
import turtlesim.srv
def main():
rospy.init_node('smach_usecase_step_02')
# Create a SMACH state machine
sm0 = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Open the container
with sm0:
# Reset turtlesim
StateMachine.add('RESET',
ServiceState('reset', std_srvs.srv.Empty),
{'succeeded':'SPAWN'})
# Create a second turtle
StateMachine.add('SPAWN',
ServiceState('spawn', turtlesim.srv.Spawn,
request = turtlesim.srv.SpawnRequest(0.0,0.0,0.0,'turtle2')))
# Execute SMACH tree
outcome = sm0.execute()
# Signal ROS shutdown (kill threads in background)
rospy.signal_shutdown('All done.')
if __name__ == '__main__':
main()
| 696 | 0 | 23 |
311f9e2d29caacb6a49a3c6660fb6c0e9015e8f5 | 4,068 | py | Python | predict.py | fzbio/GILoop | c4845a9f5c5bf8654640f823786f4e4dd6576169 | [
"MIT"
] | 1 | 2022-03-07T19:16:25.000Z | 2022-03-07T19:16:25.000Z | predict.py | fzbio/GILoop | c4845a9f5c5bf8654640f823786f4e4dd6576169 | [
"MIT"
] | null | null | null | predict.py | fzbio/GILoop | c4845a9f5c5bf8654640f823786f4e4dd6576169 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import os
from hickit.reader import get_headers, get_chrom_sizes
import tensorflow as tf
import json
import tensorflow_addons as tfa
from utils import *
import gc
from sklearn.metrics import f1_score, average_precision_score
def run_output_predictions(run_id, model_stage, threshold, target_dataset_name, target_assembly, chroms, output_path, mode):
"""
:param run_id: String - The string that specifies the run of experiment
:param model_stage: String - can only be 'GNN', 'CNN', or 'Finetune'
:param threshold: Float - The probability threshold
:param target_dataset_name: String - The name of dataset you want to predict on
:param chroms: List - Chromosome list we want to predict on. e.g. ['1', '2', 'X']
:param target_assembly: String - 'hg19' or 'hg38'
:param output_path: String - The path to the output file
:param mode: String - 'test' or 'realworld'. Test mode means the target cell line has the ground truth ChIA-PET
data and the program will calculate the PRAUC for it. 'realworld' mode does not print PRAUC because the target
dataset does not have label.
:return: Pandas dataframe contains the genome-wide annotations
"""
dataset_dir = os.path.join('dataset', target_dataset_name)
model_path = os.path.join('models', run_id + '_' + model_stage)
chrom_size_path = '{}.chrom.sizes'.format(target_assembly)
extra_config_path = os.path.join('configs', '{}_extra_settings.json'.format(run_id))
with open(extra_config_path) as fp:
saved_upper_bound = json.load(fp)['graph_upper_bound']
pred_dfs = []
ys = []
y_preds = []
for chrom in chroms:
model = tf.keras.models.load_model(model_path)
indicator_path = os.path.join(dataset_dir, 'indicators.{}.csv'.format(chrom))
identical_path = os.path.join(dataset_dir, 'graph_identical.{}.npy'.format(chrom))
images, graphs, y, features = read_data_with_motif([chrom], dataset_dir, IMAGE_SIZE)
graphs = normalise_graphs(scale_hic(graphs, saved_upper_bound))
test_y_pred = np.asarray(model.predict([images, features, graphs])[1])
ys.append(y.flatten())
y_preds.append(test_y_pred.flatten())
chrom_proba, chrom_gt = get_chrom_proba(
chrom,
get_chrom_sizes(chrom_size_path),
10000,
test_y_pred,
y,
indicator_path,
identical_path,
IMAGE_SIZE
)
current_df = get_chrom_pred_df(
chrom, chrom_proba, threshold,
get_headers([chrom], get_chrom_sizes(chrom_size_path), 10000),
)
pred_dfs.append(current_df)
del model
gc.collect()
tf.keras.backend.clear_session()
if mode == 'test':
print('PRAUC on the target cell line is {}'.format(
average_precision_score(np.concatenate(ys), np.concatenate(y_preds))
))
full_pred_df = pd.concat(pred_dfs)
full_pred_df.to_csv(output_path, sep='\t', index=False, header=False)
return full_pred_df
if __name__ == '__main__':
run_output_predictions(
'gm12878_ctcf_50', # Specify the ID of a pre-trained model
'Finetune', # Specify using which stage of the model to make prediction
0.48, # Set the probability threshold
'hela_100', # Specify the name of the dataset you want to predict on
'hg38', # The genome assembly of the target dataset
['1'], # Annotate on which Chromosomes
'predictions/hela_test.bedpe', # The output file path
'test' # Test mode means the target dataset has label; 'realworld' mode
# means the target cell line does not have label
)
| 47.302326 | 124 | 0.622173 | import pandas as pd
import numpy as np
import os
from hickit.reader import get_headers, get_chrom_sizes
import tensorflow as tf
import json
import tensorflow_addons as tfa
from utils import *
import gc
from sklearn.metrics import f1_score, average_precision_score
def run_output_predictions(run_id, model_stage, threshold, target_dataset_name, target_assembly, chroms, output_path, mode):
"""
:param run_id: String - The string that specifies the run of experiment
:param model_stage: String - can only be 'GNN', 'CNN', or 'Finetune'
:param threshold: Float - The probability threshold
:param target_dataset_name: String - The name of dataset you want to predict on
:param chroms: List - Chromosome list we want to predict on. e.g. ['1', '2', 'X']
:param target_assembly: String - 'hg19' or 'hg38'
:param output_path: String - The path to the output file
:param mode: String - 'test' or 'realworld'. Test mode means the target cell line has the ground truth ChIA-PET
data and the program will calculate the PRAUC for it. 'realworld' mode does not print PRAUC because the target
dataset does not have label.
:return: Pandas dataframe contains the genome-wide annotations
"""
dataset_dir = os.path.join('dataset', target_dataset_name)
model_path = os.path.join('models', run_id + '_' + model_stage)
chrom_size_path = '{}.chrom.sizes'.format(target_assembly)
extra_config_path = os.path.join('configs', '{}_extra_settings.json'.format(run_id))
with open(extra_config_path) as fp:
saved_upper_bound = json.load(fp)['graph_upper_bound']
pred_dfs = []
ys = []
y_preds = []
for chrom in chroms:
model = tf.keras.models.load_model(model_path)
indicator_path = os.path.join(dataset_dir, 'indicators.{}.csv'.format(chrom))
identical_path = os.path.join(dataset_dir, 'graph_identical.{}.npy'.format(chrom))
images, graphs, y, features = read_data_with_motif([chrom], dataset_dir, IMAGE_SIZE)
graphs = normalise_graphs(scale_hic(graphs, saved_upper_bound))
test_y_pred = np.asarray(model.predict([images, features, graphs])[1])
ys.append(y.flatten())
y_preds.append(test_y_pred.flatten())
chrom_proba, chrom_gt = get_chrom_proba(
chrom,
get_chrom_sizes(chrom_size_path),
10000,
test_y_pred,
y,
indicator_path,
identical_path,
IMAGE_SIZE
)
current_df = get_chrom_pred_df(
chrom, chrom_proba, threshold,
get_headers([chrom], get_chrom_sizes(chrom_size_path), 10000),
)
pred_dfs.append(current_df)
del model
gc.collect()
tf.keras.backend.clear_session()
if mode == 'test':
print('PRAUC on the target cell line is {}'.format(
average_precision_score(np.concatenate(ys), np.concatenate(y_preds))
))
full_pred_df = pd.concat(pred_dfs)
full_pred_df.to_csv(output_path, sep='\t', index=False, header=False)
return full_pred_df
if __name__ == '__main__':
run_output_predictions(
'gm12878_ctcf_50', # Specify the ID of a pre-trained model
'Finetune', # Specify using which stage of the model to make prediction
0.48, # Set the probability threshold
'hela_100', # Specify the name of the dataset you want to predict on
'hg38', # The genome assembly of the target dataset
['1'], # Annotate on which Chromosomes
'predictions/hela_test.bedpe', # The output file path
'test' # Test mode means the target dataset has label; 'realworld' mode
# means the target cell line does not have label
)
| 0 | 0 | 0 |
d158e24c09e1ef821cbfa90a4530d19a3e56e9e9 | 7,941 | py | Python | tests/test__pipeline.py | kevinmooreiii/old-elstruct | c1aa3dd0c34626e6887e1c903de2a9b977ef4163 | [
"Apache-2.0"
] | null | null | null | tests/test__pipeline.py | kevinmooreiii/old-elstruct | c1aa3dd0c34626e6887e1c903de2a9b977ef4163 | [
"Apache-2.0"
] | null | null | null | tests/test__pipeline.py | kevinmooreiii/old-elstruct | c1aa3dd0c34626e6887e1c903de2a9b977ef4163 | [
"Apache-2.0"
] | null | null | null | """ test elstruct writer/run/reader pipelines
"""
import warnings
import tempfile
import numpy
import automol
import elstruct
SCRIPT_DCT = {
'cfour2': None,
'gaussian09': None,
'gaussian16': None,
'molpro2015': None,
'mrcc2018': None,
'nwchem6': None,
'orca4': None,
'psi4': "#!/usr/bin/env bash\n"
"psi4 -i run.inp -o run.out >> stdout.log &> stderr.log",
}
def test__energy():
""" test the energy pipeline
"""
basis = '6-31g'
geom = (('O', (0.0, 0.0, -0.110)),
('H', (0.0, -1.635, 0.876)),
('H', (-0.0, 1.635, 0.876)))
mult_vals = [1, 2]
charge_vals = [0, 1]
for prog in elstruct.writer.programs():
for method in elstruct.program_methods(prog):
for mult, charge in zip(mult_vals, charge_vals):
for orb_restricted in (
elstruct.program_method_orbital_restrictions(
prog, method, singlet=(mult == 1))):
vals = _test_pipeline(
script_str=SCRIPT_DCT[prog],
writer=elstruct.writer.energy,
readers=(
elstruct.reader.energy_(prog, method),
),
args=(geom, charge, mult, method, basis, prog),
kwargs={'orb_restricted': orb_restricted},
error=elstruct.Error.SCF_NOCONV,
error_kwargs={'scf_options': [
elstruct.option.specify(
elstruct.Option.Scf.MAXITER_, 2)
]},
)
print(vals)
def test__gradient():
""" test the gradient pipeline
"""
basis = 'sto-3g'
geom = (('O', (0.0, 0.0, -0.110)),
('H', (0.0, -1.635, 0.876)),
('H', (-0.0, 1.635, 0.876)))
mult_vals = [1, 2]
charge_vals = [0, 1]
for prog in elstruct.writer.gradient_programs():
methods = list(elstruct.program_nondft_methods(prog))
dft_methods = list(elstruct.program_dft_methods(prog))
if dft_methods:
methods.append(numpy.random.choice(dft_methods))
for method in methods:
for mult, charge in zip(mult_vals, charge_vals):
for orb_restricted in (
elstruct.program_method_orbital_restrictions(
prog, method, singlet=(mult == 1))):
vals = _test_pipeline(
script_str=SCRIPT_DCT[prog],
writer=elstruct.writer.gradient,
readers=(
elstruct.reader.energy_(prog, method),
elstruct.reader.gradient_(prog),
),
args=(geom, charge, mult, method, basis, prog),
kwargs={'orb_restricted': orb_restricted},
)
print(vals)
def test__hessian():
""" test the hessian pipeline
"""
basis = 'sto-3g'
geom = (('O', (0.0, 0.0, -0.110)),
('H', (0.0, -1.635, 0.876)),
('H', (-0.0, 1.635, 0.876)))
mult_vals = [1, 2]
charge_vals = [0, 1]
for prog in elstruct.writer.hessian_programs():
methods = list(elstruct.program_nondft_methods(prog))
dft_methods = list(elstruct.program_dft_methods(prog))
if dft_methods:
methods.append(numpy.random.choice(dft_methods))
for method in methods:
for mult, charge in zip(mult_vals, charge_vals):
for orb_restricted in (
elstruct.program_method_orbital_restrictions(
prog, method, singlet=(mult == 1))):
vals = _test_pipeline(
script_str=SCRIPT_DCT[prog],
writer=elstruct.writer.hessian,
readers=(
elstruct.reader.energy_(prog, method),
elstruct.reader.hessian_(prog),
),
args=(geom, charge, mult, method, basis, prog),
kwargs={'orb_restricted': orb_restricted},
)
print(vals)
def test__optimization():
""" test elstruct optimization writes and reads
"""
method = 'hf'
basis = 'sto-3g'
geom = ((('C', (None, None, None), (None, None, None)),
('O', (0, None, None), ('R1', None, None)),
('H', (0, 1, None), ('R2', 'A2', None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3')),
('H', (0, 1, 2), ('R4', 'A4', 'D4')),
('H', (1, 0, 2), ('R5', 'A5', 'D5'))),
{'R1': 2.6, 'R2': 2.0, 'A2': 1.9,
'R3': 2.0, 'A3': 1.9, 'D3': 2.1,
'R4': 2.0, 'A4': 1.9, 'D4': 4.1,
'R5': 1.8, 'A5': 1.8, 'D5': 5.2})
mult = 1
charge = 0
orb_restricted = True
frozen_coordinates = ('R5', 'A5', 'D3')
ref_frozen_values = (1.8, 1.8, 2.1)
for prog in elstruct.writer.optimization_programs():
script_str = SCRIPT_DCT[prog]
# MRCC2018 does not support constrained optimizations
if prog != 'mrcc2018':
opt_kwargs = {'orb_restricted': orb_restricted,
'frozen_coordinates': frozen_coordinates}
else:
opt_kwargs = {'orb_restricted': orb_restricted}
vals = _test_pipeline(
script_str=script_str,
writer=elstruct.writer.optimization,
readers=(
elstruct.reader.energy_(prog, method),
elstruct.reader.opt_geometry_(prog),
elstruct.reader.opt_zmatrix_(prog),
),
args=(geom, charge, mult, method, basis, prog),
kwargs=opt_kwargs,
error=elstruct.Error.OPT_NOCONV,
error_kwargs={'job_options': [
elstruct.option.specify(
elstruct.Option.Opt.MAXITER_, 2)
]},
)
print(vals)
if script_str is not None:
# check that the frozen coordinates didn't change
zma = vals[-1]
val_dct = automol.zmatrix.values(zma)
frozen_values = tuple(
map(val_dct.__getitem__, frozen_coordinates))
assert numpy.allclose(
frozen_values, ref_frozen_values, rtol=1e-4)
if __name__ == '__main__':
test__energy()
test__gradient()
test__hessian()
test__optimization()
| 34.982379 | 71 | 0.501574 | """ test elstruct writer/run/reader pipelines
"""
import warnings
import tempfile
import numpy
import automol
import elstruct
SCRIPT_DCT = {
'cfour2': None,
'gaussian09': None,
'gaussian16': None,
'molpro2015': None,
'mrcc2018': None,
'nwchem6': None,
'orca4': None,
'psi4': "#!/usr/bin/env bash\n"
"psi4 -i run.inp -o run.out >> stdout.log &> stderr.log",
}
def test__energy():
""" test the energy pipeline
"""
basis = '6-31g'
geom = (('O', (0.0, 0.0, -0.110)),
('H', (0.0, -1.635, 0.876)),
('H', (-0.0, 1.635, 0.876)))
mult_vals = [1, 2]
charge_vals = [0, 1]
for prog in elstruct.writer.programs():
for method in elstruct.program_methods(prog):
for mult, charge in zip(mult_vals, charge_vals):
for orb_restricted in (
elstruct.program_method_orbital_restrictions(
prog, method, singlet=(mult == 1))):
vals = _test_pipeline(
script_str=SCRIPT_DCT[prog],
writer=elstruct.writer.energy,
readers=(
elstruct.reader.energy_(prog, method),
),
args=(geom, charge, mult, method, basis, prog),
kwargs={'orb_restricted': orb_restricted},
error=elstruct.Error.SCF_NOCONV,
error_kwargs={'scf_options': [
elstruct.option.specify(
elstruct.Option.Scf.MAXITER_, 2)
]},
)
print(vals)
def test__gradient():
""" test the gradient pipeline
"""
basis = 'sto-3g'
geom = (('O', (0.0, 0.0, -0.110)),
('H', (0.0, -1.635, 0.876)),
('H', (-0.0, 1.635, 0.876)))
mult_vals = [1, 2]
charge_vals = [0, 1]
for prog in elstruct.writer.gradient_programs():
methods = list(elstruct.program_nondft_methods(prog))
dft_methods = list(elstruct.program_dft_methods(prog))
if dft_methods:
methods.append(numpy.random.choice(dft_methods))
for method in methods:
for mult, charge in zip(mult_vals, charge_vals):
for orb_restricted in (
elstruct.program_method_orbital_restrictions(
prog, method, singlet=(mult == 1))):
vals = _test_pipeline(
script_str=SCRIPT_DCT[prog],
writer=elstruct.writer.gradient,
readers=(
elstruct.reader.energy_(prog, method),
elstruct.reader.gradient_(prog),
),
args=(geom, charge, mult, method, basis, prog),
kwargs={'orb_restricted': orb_restricted},
)
print(vals)
def test__hessian():
""" test the hessian pipeline
"""
basis = 'sto-3g'
geom = (('O', (0.0, 0.0, -0.110)),
('H', (0.0, -1.635, 0.876)),
('H', (-0.0, 1.635, 0.876)))
mult_vals = [1, 2]
charge_vals = [0, 1]
for prog in elstruct.writer.hessian_programs():
methods = list(elstruct.program_nondft_methods(prog))
dft_methods = list(elstruct.program_dft_methods(prog))
if dft_methods:
methods.append(numpy.random.choice(dft_methods))
for method in methods:
for mult, charge in zip(mult_vals, charge_vals):
for orb_restricted in (
elstruct.program_method_orbital_restrictions(
prog, method, singlet=(mult == 1))):
vals = _test_pipeline(
script_str=SCRIPT_DCT[prog],
writer=elstruct.writer.hessian,
readers=(
elstruct.reader.energy_(prog, method),
elstruct.reader.hessian_(prog),
),
args=(geom, charge, mult, method, basis, prog),
kwargs={'orb_restricted': orb_restricted},
)
print(vals)
def test__optimization():
""" test elstruct optimization writes and reads
"""
method = 'hf'
basis = 'sto-3g'
geom = ((('C', (None, None, None), (None, None, None)),
('O', (0, None, None), ('R1', None, None)),
('H', (0, 1, None), ('R2', 'A2', None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3')),
('H', (0, 1, 2), ('R4', 'A4', 'D4')),
('H', (1, 0, 2), ('R5', 'A5', 'D5'))),
{'R1': 2.6, 'R2': 2.0, 'A2': 1.9,
'R3': 2.0, 'A3': 1.9, 'D3': 2.1,
'R4': 2.0, 'A4': 1.9, 'D4': 4.1,
'R5': 1.8, 'A5': 1.8, 'D5': 5.2})
mult = 1
charge = 0
orb_restricted = True
frozen_coordinates = ('R5', 'A5', 'D3')
ref_frozen_values = (1.8, 1.8, 2.1)
for prog in elstruct.writer.optimization_programs():
script_str = SCRIPT_DCT[prog]
# MRCC2018 does not support constrained optimizations
if prog != 'mrcc2018':
opt_kwargs = {'orb_restricted': orb_restricted,
'frozen_coordinates': frozen_coordinates}
else:
opt_kwargs = {'orb_restricted': orb_restricted}
vals = _test_pipeline(
script_str=script_str,
writer=elstruct.writer.optimization,
readers=(
elstruct.reader.energy_(prog, method),
elstruct.reader.opt_geometry_(prog),
elstruct.reader.opt_zmatrix_(prog),
),
args=(geom, charge, mult, method, basis, prog),
kwargs=opt_kwargs,
error=elstruct.Error.OPT_NOCONV,
error_kwargs={'job_options': [
elstruct.option.specify(
elstruct.Option.Opt.MAXITER_, 2)
]},
)
print(vals)
if script_str is not None:
# check that the frozen coordinates didn't change
zma = vals[-1]
val_dct = automol.zmatrix.values(zma)
frozen_values = tuple(
map(val_dct.__getitem__, frozen_coordinates))
assert numpy.allclose(
frozen_values, ref_frozen_values, rtol=1e-4)
def _test_pipeline(script_str, writer, readers,
args, kwargs, error=None, error_kwargs=None):
read_vals = []
prog = args[-1]
# for programs with no run test, ensure input file generated
_ = writer(*args, **kwargs)
if script_str is not None:
script_str = SCRIPT_DCT[prog]
run_dir = tempfile.mkdtemp()
_, out_str = elstruct.run.direct(
writer, script_str, run_dir, *args, **kwargs)
assert elstruct.reader.has_normal_exit_message(prog, out_str)
for reader in readers:
val = reader(out_str)
read_vals.append(val)
if error is not None:
run_dir = tempfile.mkdtemp()
assert not elstruct.reader.has_error_message(prog, error,
out_str)
err_kwargs = kwargs.copy()
err_kwargs.update(error_kwargs)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
_, err_out_str = elstruct.run.direct(
writer, script_str, run_dir, *args, **err_kwargs)
assert elstruct.reader.has_error_message(prog, error,
err_out_str)
return read_vals
if __name__ == '__main__':
test__energy()
test__gradient()
test__hessian()
test__optimization()
| 1,268 | 0 | 23 |
5c0c594de1d8ed9bde7a2cc16d5e17047639c00f | 564 | py | Python | setup.py | Savahi/tnn | 21cd0c0e1827b159ccfb8668495b25f3c9486c75 | [
"MIT"
] | null | null | null | setup.py | Savahi/tnn | 21cd0c0e1827b159ccfb8668495b25f3c9486c75 | [
"MIT"
] | null | null | null | setup.py | Savahi/tnn | 21cd0c0e1827b159ccfb8668495b25f3c9486c75 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name = 'tnn',
version = '0.0.4',
description = 'Tensorflow Neural Network Framework for Algorithmic Traders',
url = 'http://github.com/Savahi/tnn',
author = 'Savahi',
author_email = 'sh@tradingene.ru',
license = 'MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
],
packages = ['tnn'],
keywords = 'neural network tensorflow algorithmic trading stock exchange',
install_requires = ['tensorflow', 'numpy', 'datetime', 'shelve', 'os', 'taft'],
zip_safe = False )
| 26.857143 | 80 | 0.675532 | from setuptools import setup
setup(
name = 'tnn',
version = '0.0.4',
description = 'Tensorflow Neural Network Framework for Algorithmic Traders',
url = 'http://github.com/Savahi/tnn',
author = 'Savahi',
author_email = 'sh@tradingene.ru',
license = 'MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
],
packages = ['tnn'],
keywords = 'neural network tensorflow algorithmic trading stock exchange',
install_requires = ['tensorflow', 'numpy', 'datetime', 'shelve', 'os', 'taft'],
zip_safe = False )
| 0 | 0 | 0 |
2c1743b80d8ca24e2b651a50e4b89ff286c10af4 | 9,099 | py | Python | analysis/Austin/project_functions.py | data301-2020-winter2/course-project-group_1030 | f051b7c55e91ded54b0854083c2a750b13f31cc6 | [
"MIT"
] | 1 | 2021-02-16T01:22:07.000Z | 2021-02-16T01:22:07.000Z | analysis/Austin/project_functions.py | data301-2020-winter2/course-project-group_1030 | f051b7c55e91ded54b0854083c2a750b13f31cc6 | [
"MIT"
] | 1 | 2021-03-23T07:48:23.000Z | 2021-03-29T23:50:06.000Z | analysis/Austin/project_functions.py | data301-2020-winter2/course-project-group_1030 | f051b7c55e91ded54b0854083c2a750b13f31cc6 | [
"MIT"
] | 1 | 2021-02-16T01:31:17.000Z | 2021-02-16T01:31:17.000Z | import pandas as pd
import numpy as np | 51.117978 | 391 | 0.639301 | import pandas as pd
import numpy as np
def load_and_process(csv):
data=pd.read_csv(csv)
#Drop Unwanted Columns & remove non-games
df1 = (pd.DataFrame(data[data['GenreIsNonGame'] != True])
.drop('DemoCount',axis=1)
.drop('Reviews',axis=1)
.drop('Website',axis=1)
.drop('HeaderImage',axis=1)
.drop('DRMNotice',axis=1)
.drop('DLCCount',axis=1)
.drop('DeveloperCount',axis=1)
.drop('LegalNotice',axis=1)
.drop('ExtUserAcctNotice',axis=1)
.drop('MovieCount',axis=1)
.drop('RequiredAge',axis=1)
.drop('PublisherCount',axis=1)
.drop('ScreenshotCount',axis=1)
.drop('Background',axis=1)
.drop('AboutText',axis=1)
.drop('ShortDescrip',axis=1)
.drop('DetailedDescrip',axis=1)
.drop('SupportEmail',axis=1)
.drop('SupportURL',axis=1)
.drop('SupportedLanguages',axis=1)
.drop('PriceCurrency',axis=1)
.drop('LinuxMinReqsText',axis=1)
.drop('LinuxRecReqsText',axis=1)
.drop('PCRecReqsText',axis=1)
.drop('PCMinReqsText',axis=1)
.drop('MacMinReqsText',axis=1)
.drop('MacRecReqsText',axis=1)
.drop('PackageCount',axis=1)
.drop('SteamSpyOwnersVariance',axis=1)
.drop('SteamSpyPlayersVariance',axis=1)
.drop('AchievementCount',axis=1)
.drop('AchievementHighlightedCount',axis=1)
.drop('ControllerSupport',axis=1)
.drop('SteamSpyPlayersEstimate',axis=1)
.drop('FreeVerAvail',axis=1)
.drop('PurchaseAvail',axis=1)
.drop('SubscriptionAvail',axis=1)
.drop('PlatformWindows',axis=1)
.drop('PlatformLinux',axis=1)
.drop('PlatformMac',axis=1)
.drop('PCReqsHaveMin',axis=1)
.drop('PCReqsHaveRec',axis=1)
.drop('LinuxReqsHaveMin',axis=1)
.drop('LinuxReqsHaveRec',axis=1)
.drop('MacReqsHaveMin',axis=1)
.drop('MacReqsHaveRec',axis=1)
.drop('CategorySinglePlayer',axis=1)
.drop('CategoryMultiplayer',axis=1)
.drop('CategoryCoop',axis=1)
.drop('CategoryMMO',axis=1)
.drop('CategoryIncludeSrcSDK',axis=1)
.drop('CategoryIncludeLevelEditor',axis=1)
.drop('CategoryVRSupport',axis=1)
.drop('GenreIsNonGame',axis=1)
.drop('QueryName',axis=1)
.drop('QueryID',axis=1)
.drop('ResponseID',axis=1)
.drop('IsFree',axis=1))
#Rename
df2=(df1
.rename(columns={"Metacritic":"Rating"})
.rename(columns={"SteamSpyOwners":"Owners"})
.rename(columns={"RecommendationCount":"Recommendations"})
.rename(columns={"ResponseName":"Games"}))
#Add Revenue in Millions column
df3=(df2
.assign(RevenueMillions=data.SteamSpyOwners*data.PriceFinal/1000000))
return df3
def Column_var_sort(df,col,up_down):
df1=(df.sort_values(col,ascending=up_down))
return df1
def Rating_Sort(df,val):
d2=df.loc[lambda x: x['Rating']>val]
return d2
def Split_Genre(df,col):
d3 = df[df[col] == True]
return d3
def plotOwners(df):
dfIndie=df[df.GenreIsIndie == True]
dfAction=df[df.GenreIsAction == True]
dfCasual=df[df.GenreIsCasual == True]
dfAdventure=df[df.GenreIsAdventure == True]
dfStrategy=df[df.GenreIsStrategy == True]
dfRPG=df[df.GenreIsRPG == True]
dfSimulation=df[df.GenreIsSimulation == True]
dfEA=df[df.GenreIsEarlyAccess == True]
dfFTP=df[df.GenreIsFreeToPlay == True]
dfSports=df[df.GenreIsSports == True]
dfRacing=df[df.GenreIsRacing == True]
dfMM=df[df.GenreIsMassivelyMultiplayer == True]
Total = df["Owners"].sum()
Indie = dfIndie["Owners"].sum()
Action = dfAction["Owners"].sum()
Casual = dfCasual["Owners"].sum()
Adventure = dfAdventure["Owners"].sum()
Strategy = dfStrategy["Owners"].sum()
RPG = dfRPG["Owners"].sum()
Simulation = dfSimulation["Owners"].sum()
EarlyAccess = dfEA["Owners"].sum()
FreeToPlay = dfFTP["Owners"].sum()
Sports = dfSports["Owners"].sum()
Racing = dfRacing["Owners"].sum()
MassivelyMultiplayer = dfMM["Owners"].sum()
print('Total Games : ', Total, ' Indie : ', Indie,' Action : ', Action, ' Casual : ', Casual,' Adventure : ', Adventure, ' Strategy: ', Strategy, 'RPG : ', RPG, ' Simulation : ', Simulation, ' Early Access : ', EarlyAccess,' Free To Play : ', FreeToPlay, ' Sports : ' ,Sports, ' Racing : ', Racing, ' Massively Multiplayer : ', MassivelyMultiplayer)
ap= {"Genre":["Total","Indie", "Action","Casual", "Adventure", "Strategy","RPG","Simulation", "Early Access", "Free to Play","Sports","Racing","Massively Multiplayer"], "Owners":[Total, Indie, Action, Casual, Adventure, Strategy,RPG,Simulation,EarlyAccess,FreeToPlay,Sports,Racing,MassivelyMultiplayer]}
dataFrame=pd.DataFrame(data=ap)
dataFrame.plot.bar(x="Genre",y="Owners")
def Genrecount(df):
dfIndie=df[df.GenreIsIndie == True]
dfAction=df[df.GenreIsAction == True]
dfCasual=df[df.GenreIsCasual == True]
dfAdventure=df[df.GenreIsAdventure == True]
dfStrategy=df[df.GenreIsStrategy == True]
dfRPG=df[df.GenreIsRPG == True]
dfSimulation=df[df.GenreIsSimulation == True]
dfEA=df[df.GenreIsEarlyAccess == True]
dfFTP=df[df.GenreIsFreeToPlay == True]
dfSports=df[df.GenreIsSports == True]
dfRacing=df[df.GenreIsRacing == True]
dfMM=df[df.GenreIsMassivelyMultiplayer == True]
Total = df['Games'].count()
Indie = dfIndie['Games'].count()
Action = dfAction['Games'].count()
Casual = dfCasual['Games'].count()
Adventure = dfAdventure['Games'].count()
Strategy = dfStrategy['Games'].count()
RPG = dfRPG['Games'].count()
Simulation = dfSimulation['Games'].count()
EarlyAccess = dfEA['Games'].count()
FreeToPlay = dfFTP['Games'].count()
Sports = dfSports['Games'].count()
Racing = dfRacing['Games'].count()
MassivelyMultiplayer = dfMM['Games'].count()
print('Total Games : ', Total, ' Indie Games : ', Indie,' Action Games : ', Action, ' Casual Games : ', Casual,' Adventure Games : ', Adventure, ' Strategy Games: ', Strategy, 'RPG : ', RPG, ' Simulation Games : ', Simulation, ' Early Access : ', EarlyAccess,' Free To Play : ', FreeToPlay, ' Sports : ' ,Sports, ' Racing : ', Racing, ' Massively Multiplayer : ', MassivelyMultiplayer)
ap= {"Genre":["Total","Indie", "Action","Casual", "Adventure", "Strategy","RPG","Simulation", "Early Access", "Free to Play","Sports","Racing","Massively Multiplayer"], "Games":[Total, Indie, Action, Casual, Adventure, Strategy,RPG,Simulation,EarlyAccess,FreeToPlay,Sports,Racing,MassivelyMultiplayer]}
dataFrame=pd.DataFrame(data=ap)
dataFrame.plot.bar(x="Genre",y="Games")
def plotRevenue(df):
dfIndie=df[df.GenreIsIndie == True]
dfAction=df[df.GenreIsAction == True]
dfCasual=df[df.GenreIsCasual == True]
dfAdventure=df[df.GenreIsAdventure == True]
dfStrategy=df[df.GenreIsStrategy == True]
dfRPG=df[df.GenreIsRPG == True]
dfSimulation=df[df.GenreIsSimulation == True]
dfEA=df[df.GenreIsEarlyAccess == True]
dfFTP=df[df.GenreIsFreeToPlay == True]
dfSports=df[df.GenreIsSports == True]
dfRacing=df[df.GenreIsRacing == True]
dfMM=df[df.GenreIsMassivelyMultiplayer == True]
Total = df["RevenueMillions"].sum()
Indie = dfIndie["RevenueMillions"].sum()
Action = dfAction["RevenueMillions"].sum()
Casual = dfCasual["RevenueMillions"].sum()
Adventure = dfAdventure["RevenueMillions"].sum()
Strategy = dfStrategy["RevenueMillions"].sum()
RPG = dfRPG["RevenueMillions"].sum()
Simulation = dfSimulation["RevenueMillions"].sum()
EarlyAccess = dfEA["RevenueMillions"].sum()
FreeToPlay = dfFTP["RevenueMillions"].sum()
Sports = dfSports["RevenueMillions"].sum()
Racing = dfRacing["RevenueMillions"].sum()
MassivelyMultiplayer = dfMM["RevenueMillions"].sum()
print('Total : ', Total, ' Indie : ', Indie,' Action : ', Action, ' Casual : ', Casual,' Adventure : ', Adventure, ' Strategy : ', Strategy, 'RPG : ', RPG, ' Simulation : ', Simulation, ' Early Access : ', EarlyAccess,' Free To Play : ', FreeToPlay, ' Sports : ' ,Sports, ' Racing : ', Racing, ' Massively Multiplayer : ', MassivelyMultiplayer)
ap= {"Genre":["Total","Indie", "Action","Casual", "Adventure", "Strategy","RPG","Simulation", "Early Access", "Free to Play","Sports","Racing","Massively Multiplayer"], "Owners":[Total, Indie, Action, Casual, Adventure, Strategy,RPG,Simulation,EarlyAccess,FreeToPlay,Sports,Racing,MassivelyMultiplayer]}
dataFrame=pd.DataFrame(data=ap)
dataFrame.plot.bar(x="Genre",y="Owners")
def genreratingplot(data1,genre):
dfrated = data1.loc[lambda x: x['Rating']>0]
genresplit = dfrated[dfrated[genre] == True]
genreplot = sns.displot(x="Rating", data=genresplit, bins = 20).set(title=("Rating Histogram for "+genre))
return genreplot | 8,885 | 0 | 176 |
1a220f455056de8d29d4fdc05194bbd2b99d0167 | 3,083 | py | Python | src/syncremote/models.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | 1 | 2016-01-18T08:19:22.000Z | 2016-01-18T08:19:22.000Z | src/syncremote/models.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | src/syncremote/models.py | litedesk/litedesk-webserver-provision | 1576b9d3e5e2e64d1136d276767c2710cfb1938f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from django.db import models
| 33.150538 | 93 | 0.695102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from django.db import models
class Synchronizable(models.Model):
SYNCHRONIZABLE_ATTRIBUTES_MAP = {}
last_remote_read = models.DateTimeField(null=True, editable=False)
last_remote_save = models.DateTimeField(null=True, editable=False)
last_modified = models.DateTimeField(auto_now=True, editable=False)
@property
def last_sync(self):
if self.last_remote_read is not None and self.last_remote_save is not None:
return max(self.last_remote_read, self.last_remote_save)
if self.last_remote_save is not None:
return self.last_remote_save
if self.last_remote_read is not None:
return self.last_remote_read
return None
def _needs_pull(self, remote_object):
if self.last_remote_read is None: return True
return self.last_remote_read < self.__class__.get_remote_last_modified(remote_object)
def _needs_push(self, remote_object):
if self.last_remote_save is None: return True
return self.last_modified > self.__class__.get_remote_last_modified(remote_object)
@property
def _has_remote_save(self):
return self.last_remote_save is not None
def sync(self, force_push=False, force_pull=False):
remote = self.get_remote()
changed = (self._get_changed_attributes(remote_object=remote) != [])
needs_pull = changed and self._needs_pull(remote)
needs_push = changed and self._needs_push(remote)
if force_pull or needs_pull:
self.pull()
self.last_remote_read = datetime.datetime.now()
if force_push or needs_push:
self.push()
self.last_remote_save = datetime.datetime.now()
def _get_changed_attributes(self, remote_object=None):
remote = remote_object or self.get_remote()
if remote is None: return self.SYNCHRONIZABLE_ATTRIBUTES_MAP.keys()
return [
local_attr
for local_attr, remote_attr in self.SYNCHRONIZABLE_ATTRIBUTES_MAP.items()
if getattr(self, local_attr) != getattr(remote, remote_attr)
]
def get_remote(self):
raise NotImplementedError
def pull(self):
raise NotImplementedError
def push(self):
raise NotImplementedError
@classmethod
def get_remote_last_modified(cls, remote_object):
raise NotImplementedError
@classmethod
def load(cls, remote_object, **kw):
raise NotImplementedError
@classmethod
def merge(cls, local_object, remote_object, **extra_fields):
remote_last_modified = local_object.get_remote_last_modified(remote_object)
if local_object.last_modified > remote_last_modified: return
for local_attr, remote_attr in local_object.SYNCHRONIZABLE_ATTRIBUTES_MAP.items():
remote_value = getattr(remote_object, remote_attr)
setattr(local_object, local_attr, remote_value)
local_object.save(**extra_fields)
class Meta:
abstract = True
| 2,255 | 712 | 23 |
e5c011e71d450157209e1e36d22ef161e3f0381f | 108 | py | Python | api/posts_communities/apps.py | Juangr1803/Foro-AgrodatAI | a8f23afd32d2ec60d25a03c97f5f353fd0ef5e0b | [
"MIT"
] | 1 | 2021-04-19T16:13:39.000Z | 2021-04-19T16:13:39.000Z | api/posts_communities/apps.py | Juangr1803/Foro-AgrodatAI | a8f23afd32d2ec60d25a03c97f5f353fd0ef5e0b | [
"MIT"
] | null | null | null | api/posts_communities/apps.py | Juangr1803/Foro-AgrodatAI | a8f23afd32d2ec60d25a03c97f5f353fd0ef5e0b | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 18 | 40 | 0.796296 | from django.apps import AppConfig
class PostsCommunitiesConfig(AppConfig):
name = 'posts_communities'
| 0 | 50 | 23 |
24874a1570e70a14061bdb66cf777b1ffcd69f23 | 1,873 | py | Python | test/tests/transformation_test.py | dabraude/Pyliza | 66944c9b3d5ef75b39847fe1c460b6f1648042cd | [
"CC0-1.0"
] | null | null | null | test/tests/transformation_test.py | dabraude/Pyliza | 66944c9b3d5ef75b39847fe1c460b6f1648042cd | [
"CC0-1.0"
] | null | null | null | test/tests/transformation_test.py | dabraude/Pyliza | 66944c9b3d5ef75b39847fe1c460b6f1648042cd | [
"CC0-1.0"
] | null | null | null | import unittest
from hypothesis import given, example
from . import pyliza_strategies as liza_st
from pyliza.transformation import DecompositionRule
from pyliza.processing import ProcessingWord as PW
from pyliza.processing import ProcessingPhrase as PPhrase
| 44.595238 | 85 | 0.672718 | import unittest
from hypothesis import given, example
from . import pyliza_strategies as liza_st
from pyliza.transformation import DecompositionRule
from pyliza.processing import ProcessingWord as PW
from pyliza.processing import ProcessingPhrase as PPhrase
class DecompositionTestCase(unittest.TestCase):
@given(liza_st.valid_decomposition())
@example(([0], [[]], PPhrase([])))
@example(([1, PW("A")], [[PW("A")], [PW("A")]], PPhrase([PW("A"), PW("A")])))
@example(([1], [[PW("A")]], PPhrase([PW("A")])))
@example(([0, PW("A")], [[], [PW("A")]], PPhrase([PW("A")])))
def test_matching(self, eg):
"""Decomposition will correctly decompose a phrase."""
pattern, decomposed_phrase, phrase = eg
rule = DecompositionRule(pattern)
decomposed = rule.decompose(phrase)
self.assertEqual(len(decomposed_phrase), len(decomposed))
for real, dec in zip(decomposed_phrase, decomposed):
self.assertEqual(real, dec)
@given(liza_st.invalid_decomposition())
def test_non_match(self, eg):
"""Decomposition will return None if the phrase doesn't match the pattern."""
pattern, phrase = eg
rule = DecompositionRule(pattern)
self.assertIsNone(rule.decompose(phrase))
def test_bad_patterns(self):
"""Check against some invalid inputs."""
self.assertRaises(ValueError, DecompositionRule, None)
self.assertRaises(ValueError, DecompositionRule, [])
self.assertRaises(ValueError, DecompositionRule, [None])
self.assertRaises(ValueError, DecompositionRule, [""])
self.assertRaises(ValueError, DecompositionRule, [0.99])
self.assertRaises(ValueError, DecompositionRule, [{0.99}])
self.assertRaises(ValueError, DecompositionRule, [{None}])
self.assertRaises(ValueError, DecompositionRule, [{""}])
| 0 | 1,590 | 23 |
7e94be0b426059a0b5f80191330dd01d9cefa8e8 | 2,371 | py | Python | lib/python3.10/site-packages/integrations/trac/zulip_trac_config.py | FHIR/zulip-archive | b1f69a091f74b613d74ebb558eed30415c0a9245 | [
"MIT"
] | 1 | 2020-05-25T11:52:31.000Z | 2020-05-25T11:52:31.000Z | lib/python3.10/site-packages/integrations/trac/zulip_trac_config.py | FHIR/zulip-archive | b1f69a091f74b613d74ebb558eed30415c0a9245 | [
"MIT"
] | 6 | 2020-03-24T16:39:54.000Z | 2021-04-30T20:46:43.000Z | api/integrations/trac/zulip_trac_config.py | erinis-eligro/zulip-outcasts | 51153a6ce219370aee79bfe462f6e4fb956993d9 | [
"Apache-2.0"
] | 3 | 2019-01-26T21:40:16.000Z | 2019-02-24T20:16:26.000Z | # -*- coding: utf-8 -*-
#
# Copyright © 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See zulip_trac.py for installation and configuration instructions
# Change these constants to configure the plugin:
ZULIP_USER = "trac-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
STREAM_FOR_NOTIFICATIONS = "trac"
TRAC_BASE_TICKET_URL = "https://trac.example.com/ticket"
# Most people find that having every change in Trac result in a
# notification is too noisy -- in particular, when someone goes
# through recategorizing a bunch of tickets, that can often be noisy
# and annoying. We solve this issue by only sending a notification
# for changes to the fields listed below.
#
# TRAC_NOTIFY_FIELDS lets you specify which fields will trigger a
# Zulip notification in response to a trac update; you should change
# this list to match your team's workflow. The complete list of
# possible fields is:
#
# (priority, milestone, cc, owner, keywords, component, severity,
# type, versions, description, resolution, summary, comment)
TRAC_NOTIFY_FIELDS = ["description", "summary", "resolution", "comment", "owner"]
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://zulip.example.com"
| 45.596154 | 81 | 0.770561 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See zulip_trac.py for installation and configuration instructions
# Change these constants to configure the plugin:
ZULIP_USER = "trac-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
STREAM_FOR_NOTIFICATIONS = "trac"
TRAC_BASE_TICKET_URL = "https://trac.example.com/ticket"
# Most people find that having every change in Trac result in a
# notification is too noisy -- in particular, when someone goes
# through recategorizing a bunch of tickets, that can often be noisy
# and annoying. We solve this issue by only sending a notification
# for changes to the fields listed below.
#
# TRAC_NOTIFY_FIELDS lets you specify which fields will trigger a
# Zulip notification in response to a trac update; you should change
# this list to match your team's workflow. The complete list of
# possible fields is:
#
# (priority, milestone, cc, owner, keywords, component, severity,
# type, versions, description, resolution, summary, comment)
TRAC_NOTIFY_FIELDS = ["description", "summary", "resolution", "comment", "owner"]
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://zulip.example.com"
| 0 | 0 | 0 |
842efcd09eed67da59176d84a717ccdb5c52f087 | 1,102 | py | Python | ibsng/handler/invoice/update_invoice_profile.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 6 | 2018-03-06T10:16:36.000Z | 2021-12-05T12:43:10.000Z | ibsng/handler/invoice/update_invoice_profile.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-03-06T10:27:08.000Z | 2022-01-02T15:21:27.000Z | ibsng/handler/invoice/update_invoice_profile.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-01-06T16:28:31.000Z | 2018-09-17T19:47:19.000Z | """Update invoice profile API method."""
from ibsng.handler.handler import Handler
class updateInvoiceProfile(Handler):
"""Update invoice profile method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.profile_id, int)
self.is_valid(self.profile_name, str)
self.is_valid(self.isp_name, str, False)
self.is_valid(self.rules, list, False)
self.is_valid(self.comment, str, False)
def setup(self, profile_id, profile_name,
isp_name="", rules=[], comment=""):
"""Setup required parameters.
:param int profile_id: profile id
:param str profile_name: new profile name
:param str isp_name: new isp name
:param list rules: new rules
:param str comment: new comment
:return: None
:rtype: None
"""
self.profile_id = profile_id
self.profile_name = profile_name
self.isp_name = isp_name
self.rules = rules
self.comment = comment
| 29 | 49 | 0.615245 | """Update invoice profile API method."""
from ibsng.handler.handler import Handler
class updateInvoiceProfile(Handler):
"""Update invoice profile method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.profile_id, int)
self.is_valid(self.profile_name, str)
self.is_valid(self.isp_name, str, False)
self.is_valid(self.rules, list, False)
self.is_valid(self.comment, str, False)
def setup(self, profile_id, profile_name,
isp_name="", rules=[], comment=""):
"""Setup required parameters.
:param int profile_id: profile id
:param str profile_name: new profile name
:param str isp_name: new isp name
:param list rules: new rules
:param str comment: new comment
:return: None
:rtype: None
"""
self.profile_id = profile_id
self.profile_name = profile_name
self.isp_name = isp_name
self.rules = rules
self.comment = comment
| 0 | 0 | 0 |
a17cb8280684b30e14af30222a03c3876843b327 | 1,059 | py | Python | test_pragmatic.py | prajwalccc13/Pragmatic-Web-Framework | 8fccf5ecde2619b40e2f29c1635d5f56fa31781a | [
"Apache-2.0"
] | null | null | null | test_pragmatic.py | prajwalccc13/Pragmatic-Web-Framework | 8fccf5ecde2619b40e2f29c1635d5f56fa31781a | [
"Apache-2.0"
] | null | null | null | test_pragmatic.py | prajwalccc13/Pragmatic-Web-Framework | 8fccf5ecde2619b40e2f29c1635d5f56fa31781a | [
"Apache-2.0"
] | null | null | null | import pytest
from api import API
@pytest.fixture
| 21.612245 | 76 | 0.673277 | import pytest
from api import API
@pytest.fixture
def api():
return API()
def test_basic_route(api):
@api.route("/home")
def home(req, response):
response.text = "worked"
with pytest.raises(AssertionError):
@api.route("/home")
def home2(request, response):
response.text = "not"
def test_pragmatic_test_client_can_send_request(api, client):
RESPONSE_TEXT = "This is good"
@api.route("/hey")
def cool(request, response):
response.test = RESPONSE_TEXT
assert client.get("http://testserver/hey").text == RESPONSE_TEXT
def test_default_404_response(client):
response = client.get("http://testserver/doesnotexist")
assert response.status_code == 404
assert response.text == "Not found."
def test_alternative_route(api, client):
response_text = "Alternative way to add a route"
def home(req, resp):
resp.text = response_text
api.add_route("/alternative", home)
assert client.get("http://testserver/alternative").text == response_text | 887 | 0 | 114 |
2b8da670c87afe6784aa886b883743a5ea977ba2 | 537 | py | Python | jiant/tasks/lib/superglue_axb.py | yzpang/jiant | 192d6b525c06f33010b59044df40cb86bbfba4ea | [
"MIT"
] | 1,108 | 2019-04-22T09:19:19.000Z | 2022-03-31T13:23:51.000Z | jiant/tasks/lib/superglue_axb.py | yzpang/jiant | 192d6b525c06f33010b59044df40cb86bbfba4ea | [
"MIT"
] | 737 | 2019-04-22T14:30:36.000Z | 2022-03-31T22:22:17.000Z | jiant/tasks/lib/superglue_axb.py | yzpang/jiant | 192d6b525c06f33010b59044df40cb86bbfba4ea | [
"MIT"
] | 273 | 2019-04-23T01:42:11.000Z | 2022-03-25T15:59:38.000Z | from dataclasses import dataclass
from . import rte
@dataclass
@dataclass
@dataclass
@dataclass
| 16.78125 | 76 | 0.744879 | from dataclasses import dataclass
from . import rte
@dataclass
class Example(rte.Example):
pass
@dataclass
class TokenizedExample(rte.Example):
pass
@dataclass
class DataRow(rte.DataRow):
pass
@dataclass
class Batch(rte.Batch):
pass
class SuperglueBroadcoverageDiagnosticsTask(rte.RteTask):
def get_train_examples(self):
raise RuntimeError("This task does not support training examples")
def get_val_examples(self):
raise RuntimeError("This task does not support validation examples")
| 166 | 101 | 164 |
684058cc65facf1d2b555fd05e7c6fb109db2485 | 1,655 | py | Python | tests/test_crl_client.py | MatthiasValvekens/certvalidator | 246c5075ecdb6d50b14c93fdc97a9d0470f84821 | [
"MIT"
] | 4 | 2020-11-11T13:59:05.000Z | 2022-03-13T14:06:10.000Z | tests/test_crl_client.py | MatthiasValvekens/certvalidator | 246c5075ecdb6d50b14c93fdc97a9d0470f84821 | [
"MIT"
] | 1 | 2020-11-11T11:29:37.000Z | 2020-11-11T11:29:37.000Z | tests/test_crl_client.py | MatthiasValvekens/certvalidator | 246c5075ecdb6d50b14c93fdc97a9d0470f84821 | [
"MIT"
] | 2 | 2020-11-11T10:33:32.000Z | 2022-03-13T14:06:11.000Z | # coding: utf-8
import unittest
import os
from asn1crypto import x509, pem
from pyhanko_certvalidator.fetchers import aiohttp_fetchers, requests_fetchers
from pyhanko_certvalidator.context import ValidationContext
from pyhanko_certvalidator.validate import verify_crl
from .constants import TEST_REQUEST_TIMEOUT
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
| 34.479167 | 78 | 0.714199 | # coding: utf-8
import unittest
import os
from asn1crypto import x509, pem
from pyhanko_certvalidator.fetchers import aiohttp_fetchers, requests_fetchers
from pyhanko_certvalidator.context import ValidationContext
from pyhanko_certvalidator.validate import verify_crl
from .constants import TEST_REQUEST_TIMEOUT
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
class CRLClientTests(unittest.IsolatedAsyncioTestCase):
async def _test_with_fetchers(self, fetchers):
cert_file = os.path.join(
fixtures_dir, 'digicert-sha2-secure-server-ca.crt'
)
with open(cert_file, 'rb') as f:
file_bytes = f.read()
if pem.detect(file_bytes):
_, _, file_bytes = pem.unarmor(file_bytes)
intermediate = x509.Certificate.load(file_bytes)
crls = await fetchers.crl_fetcher.fetch(intermediate)
context = ValidationContext(crls=crls, fetchers=fetchers)
registry = context.certificate_registry
paths = await registry.async_build_paths(intermediate)
path = paths[0]
await verify_crl(intermediate, path, context)
async def test_fetch_crl_aiohttp(self):
fb = aiohttp_fetchers.AIOHttpFetcherBackend(
per_request_timeout=TEST_REQUEST_TIMEOUT
)
async with fb as fetchers:
await self._test_with_fetchers(fetchers)
async def test_fetch_requests(self):
fetchers = requests_fetchers.RequestsFetcherBackend(
per_request_timeout=TEST_REQUEST_TIMEOUT
).get_fetchers()
await self._test_with_fetchers(fetchers)
| 1,110 | 34 | 104 |
35f5fbc2ecf336a27b90553bf6eced3f2cfbd38f | 9,157 | py | Python | pypibatch/main.py | newvicx/pybatch | 28065d70f5b970669fbb9174415dcd84477a99d2 | [
"MIT"
] | null | null | null | pypibatch/main.py | newvicx/pybatch | 28065d70f5b970669fbb9174415dcd84477a99d2 | [
"MIT"
] | null | null | null | pypibatch/main.py | newvicx/pybatch | 28065d70f5b970669fbb9174415dcd84477a99d2 | [
"MIT"
] | null | null | null | import os
import sys
from datetime import datetime
from typing import List, Tuple, Union
import clr
import pandas as pd
PISDKHOME = os.getenv("PISDKHOME")
sys.path.append(PISDKHOME)
clr.AddReference("OSIsoft.PISDK")
from PISDK import PISDK, PISubBatch, PIUnitBatch
UnitBatches = pd.DataFrame
SubBatches = pd.DataFrame
class PIBatch:
"""
Class for querying PIBatch data via the PISDK
Args
- server (str): the name of the PIServer to connect to
Raises
- PIBatchError: an error occurred trying to connect to
server
"""
def search(
self,
unit_id: str,
start_time: Union[datetime, str] = "-100d",
end_time: Union[datetime, str] = "*",
batch_id: Union[List[str], str] = "*",
product: Union[List[str], str] = "*",
procedure: Union[List[str], str] = "*",
sub_batches: Union[List[str], str] = "*"
) -> Tuple[UnitBatches, SubBatches]:
"""
Query batches for a given unit_id
Args
- unit_id (str): Wildcard string of a PIModule name to match
- start_time (Union[datetime, str]): The search start time.
datetime.datetime objects are converted to ISOFormat strings
- end_time (Union[datetime, str]): The search end time.
datetime.datetime objects are converted to ISOFormat strings.
Defaults to "*"
- batch_id (Union[List[str], str]): Wildcard string of BatchID to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
- product (Union[List[str], str]): Wildcard string of Product to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
- procedure (Union[List[str], str]): Wildcard string of Procedure to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
- sub_batches (Union[List[str], str]): Wildcard string of SubBatch to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
Returns
- UnitBatches (pd.DataFrame): DataFrame of unit batches with schema
"BatchID": str
"Product": str
"Name": str
"StartTime": str
"EndTime": str
"Procedure": str
"UniqueID": str
"SubBatchCount": int
- SubBatches (pd.DataFrame): Dataframe of sub batches with schema
"ParentID": str (PIUnitBatch.UniqueID)
"Name": str
"StartTime": str
"EndTime": str
"UniqueID": str (PISubBatch.UniqueID)
Raises
- PIBatchError: An error occurred in connecting to server or during
query
- NoBatchesFound: Query returned no results
"""
start_time, end_time, batch_id, product, procedure, sub_batches = self._prep_search_criteria(
start_time,
end_time,
batch_id,
product,
procedure,
sub_batches
)
try:
unit_batches_raw = [
PIUnitBatch(batch) for batch in self._db.PIUnitBatchSearch(
start_time, end_time, unit_id, batch_id, product, procedure, sub_batches
)
]
except BaseException as err:
raise PIBatchError(
"Unable to retrieve unit batches"
) from err
if not unit_batches_raw:
raise NoBatchesFound
sub_batches_raw = {unit_batch.UniqueID: unit_batch.PISubBatches for unit_batch in unit_batches_raw}
# parse unit batches and sub batches to dataframes
self.now = datetime.now().strftime("%m/%d/%Y %H:%M:%S %p")
unit_batches: UnitBatches = self._parse_unit_batches(unit_batches_raw)
sub_batches: SubBatches = self._parse_sub_batches(sub_batches_raw)
return unit_batches, sub_batches
def _prep_search_criteria(
self,
start_time: Union[datetime, str],
end_time: Union[datetime, str],
batch_id: Union[List[str], str],
product: Union[List[str], str],
procedure: Union[List[str], str],
sub_batches: Union[List[str], str]
) -> Tuple:
"""
Properly format variables for query
"""
start_time = start_time.isoformat() if isinstance(start_time, datetime) else start_time
end_time = end_time.isoformat() if isinstance(end_time, datetime) else end_time
batch_id = ','.join(batch_id) if isinstance(batch_id, list) else batch_id
product = ','.join(product) if isinstance(product, list) else product
procedure = ','.join(procedure) if isinstance(procedure, list) else procedure
sub_batches = ','.join(sub_batches) if isinstance(sub_batches, list) else sub_batches
return start_time, end_time, batch_id, product, procedure, sub_batches
def _parse_unit_batches(self, unit_batches: list) -> UnitBatches:
"""
Parse returned unit batches to required schema
Args
- unit_batches (list): List of PIUnitBatch objects
Returns
- UnitBatches (pd.DataFrame): DataFrame of unit batches with schema
"BatchID": str
"Product": str
"Name": str
"StartTime": str
"EndTime": str
"Procedure": str
"UniqueID": str
"SubBatchCount": int
"""
batch_ids = [unit_batch.BatchID for unit_batch in unit_batches]
products = [unit_batch.Product for unit_batch in unit_batches]
unit_names = [unit_batch.PIUnit.Name for unit_batch in unit_batches]
start_times = [unit_batch.StartTime.LocalDate.ToString() for unit_batch in unit_batches]
end_times = []
procedure_names = [unit_batch.ProcedureName for unit_batch in unit_batches]
unique_ids = [unit_batch.UniqueID for unit_batch in unit_batches]
sub_batch_counts = [unit_batch.PISubBatches.Count for unit_batch in unit_batches]
for unit_batch in unit_batches:
try:
end_times.append(unit_batch.EndTime.LocalDate.ToString())
except AttributeError:
end_times.append(self.now)
parsed = {
"BatchID": batch_ids,
"Product": products,
"Name": unit_names,
"StartTime": start_times,
"EndTime": end_times,
"Procedure": procedure_names,
"UniqueID": unique_ids,
"SubBatchCount": sub_batch_counts
}
return pd.DataFrame.from_dict(parsed)
def _parse_sub_batches(self, sub_batches: dict) -> SubBatches:
"""
Format returned sub batches to required schema
Args
- sub_batches (dict): key:value pair of objects
PIUnitBatch.UniqueID: PIUnitBatch.PISubBatches
Returns
- SubBatches (pd.DataFrame): Dataframe of sub batches with schema
"ParentID": str (PIUnitBatch.UniqueID)
"Name": str
"StartTime": str
"EndTime": str
"UniqueID": str (PISubBatch.UniqueID)
"""
parent_ids = []
names = []
start_times = []
end_times = []
unique_ids = []
for parent_id, sub_batch in sub_batches.items():
unit_sub_batches = [PISubBatch(unit_sub_batch) for unit_sub_batch in sub_batch]
for unit_sub_batch in unit_sub_batches:
parent_ids.append(parent_id)
names.append(unit_sub_batch.Name)
start_times.append(unit_sub_batch.StartTime.LocalDate.ToString())
try:
end_times.append(unit_sub_batch.EndTime.LocalDate.ToString())
except AttributeError:
end_times.append(self.now)
unique_ids.append(unit_sub_batch.UniqueID)
parsed = {
"ParentID": parent_ids,
"Name": names,
"StartTime": start_times,
"EndTime": end_times,
"UniqueID": unique_ids
}
return pd.DataFrame.from_dict(parsed) | 36.051181 | 107 | 0.584362 | import os
import sys
from datetime import datetime
from typing import List, Tuple, Union
import clr
import pandas as pd
PISDKHOME = os.getenv("PISDKHOME")
sys.path.append(PISDKHOME)
clr.AddReference("OSIsoft.PISDK")
from PISDK import PISDK, PISubBatch, PIUnitBatch
UnitBatches = pd.DataFrame
SubBatches = pd.DataFrame
class PIBatchError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class NoBatchesFound(PIBatchError):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class PIBatch:
"""
Class for querying PIBatch data via the PISDK
Args
- server (str): the name of the PIServer to connect to
Raises
- PIBatchError: an error occurred trying to connect to
server
"""
def __init__(self, server: str) -> None:
try:
sdk = PISDK()
server = sdk.Servers[server]
db = server.PIModuleDB
except BaseException as err:
raise PIBatchError(
"Unable to establish connection to PIBatch"
) from err
self._sdk = sdk
self._server = server
self._db = db
def search(
self,
unit_id: str,
start_time: Union[datetime, str] = "-100d",
end_time: Union[datetime, str] = "*",
batch_id: Union[List[str], str] = "*",
product: Union[List[str], str] = "*",
procedure: Union[List[str], str] = "*",
sub_batches: Union[List[str], str] = "*"
) -> Tuple[UnitBatches, SubBatches]:
"""
Query batches for a given unit_id
Args
- unit_id (str): Wildcard string of a PIModule name to match
- start_time (Union[datetime, str]): The search start time.
datetime.datetime objects are converted to ISOFormat strings
- end_time (Union[datetime, str]): The search end time.
datetime.datetime objects are converted to ISOFormat strings.
Defaults to "*"
- batch_id (Union[List[str], str]): Wildcard string of BatchID to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
- product (Union[List[str], str]): Wildcard string of Product to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
- procedure (Union[List[str], str]): Wildcard string of Procedure to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
- sub_batches (Union[List[str], str]): Wildcard string of SubBatch to match.
List instances are concatenated to a single string separated by commas
",". Defaults to "*"
Returns
- UnitBatches (pd.DataFrame): DataFrame of unit batches with schema
"BatchID": str
"Product": str
"Name": str
"StartTime": str
"EndTime": str
"Procedure": str
"UniqueID": str
"SubBatchCount": int
- SubBatches (pd.DataFrame): Dataframe of sub batches with schema
"ParentID": str (PIUnitBatch.UniqueID)
"Name": str
"StartTime": str
"EndTime": str
"UniqueID": str (PISubBatch.UniqueID)
Raises
- PIBatchError: An error occurred in connecting to server or during
query
- NoBatchesFound: Query returned no results
"""
start_time, end_time, batch_id, product, procedure, sub_batches = self._prep_search_criteria(
start_time,
end_time,
batch_id,
product,
procedure,
sub_batches
)
try:
unit_batches_raw = [
PIUnitBatch(batch) for batch in self._db.PIUnitBatchSearch(
start_time, end_time, unit_id, batch_id, product, procedure, sub_batches
)
]
except BaseException as err:
raise PIBatchError(
"Unable to retrieve unit batches"
) from err
if not unit_batches_raw:
raise NoBatchesFound
sub_batches_raw = {unit_batch.UniqueID: unit_batch.PISubBatches for unit_batch in unit_batches_raw}
# parse unit batches and sub batches to dataframes
self.now = datetime.now().strftime("%m/%d/%Y %H:%M:%S %p")
unit_batches: UnitBatches = self._parse_unit_batches(unit_batches_raw)
sub_batches: SubBatches = self._parse_sub_batches(sub_batches_raw)
return unit_batches, sub_batches
def _prep_search_criteria(
self,
start_time: Union[datetime, str],
end_time: Union[datetime, str],
batch_id: Union[List[str], str],
product: Union[List[str], str],
procedure: Union[List[str], str],
sub_batches: Union[List[str], str]
) -> Tuple:
"""
Properly format variables for query
"""
start_time = start_time.isoformat() if isinstance(start_time, datetime) else start_time
end_time = end_time.isoformat() if isinstance(end_time, datetime) else end_time
batch_id = ','.join(batch_id) if isinstance(batch_id, list) else batch_id
product = ','.join(product) if isinstance(product, list) else product
procedure = ','.join(procedure) if isinstance(procedure, list) else procedure
sub_batches = ','.join(sub_batches) if isinstance(sub_batches, list) else sub_batches
return start_time, end_time, batch_id, product, procedure, sub_batches
def _parse_unit_batches(self, unit_batches: list) -> UnitBatches:
"""
Parse returned unit batches to required schema
Args
- unit_batches (list): List of PIUnitBatch objects
Returns
- UnitBatches (pd.DataFrame): DataFrame of unit batches with schema
"BatchID": str
"Product": str
"Name": str
"StartTime": str
"EndTime": str
"Procedure": str
"UniqueID": str
"SubBatchCount": int
"""
batch_ids = [unit_batch.BatchID for unit_batch in unit_batches]
products = [unit_batch.Product for unit_batch in unit_batches]
unit_names = [unit_batch.PIUnit.Name for unit_batch in unit_batches]
start_times = [unit_batch.StartTime.LocalDate.ToString() for unit_batch in unit_batches]
end_times = []
procedure_names = [unit_batch.ProcedureName for unit_batch in unit_batches]
unique_ids = [unit_batch.UniqueID for unit_batch in unit_batches]
sub_batch_counts = [unit_batch.PISubBatches.Count for unit_batch in unit_batches]
for unit_batch in unit_batches:
try:
end_times.append(unit_batch.EndTime.LocalDate.ToString())
except AttributeError:
end_times.append(self.now)
parsed = {
"BatchID": batch_ids,
"Product": products,
"Name": unit_names,
"StartTime": start_times,
"EndTime": end_times,
"Procedure": procedure_names,
"UniqueID": unique_ids,
"SubBatchCount": sub_batch_counts
}
return pd.DataFrame.from_dict(parsed)
def _parse_sub_batches(self, sub_batches: dict) -> SubBatches:
"""
Format returned sub batches to required schema
Args
- sub_batches (dict): key:value pair of objects
PIUnitBatch.UniqueID: PIUnitBatch.PISubBatches
Returns
- SubBatches (pd.DataFrame): Dataframe of sub batches with schema
"ParentID": str (PIUnitBatch.UniqueID)
"Name": str
"StartTime": str
"EndTime": str
"UniqueID": str (PISubBatch.UniqueID)
"""
parent_ids = []
names = []
start_times = []
end_times = []
unique_ids = []
for parent_id, sub_batch in sub_batches.items():
unit_sub_batches = [PISubBatch(unit_sub_batch) for unit_sub_batch in sub_batch]
for unit_sub_batch in unit_sub_batches:
parent_ids.append(parent_id)
names.append(unit_sub_batch.Name)
start_times.append(unit_sub_batch.StartTime.LocalDate.ToString())
try:
end_times.append(unit_sub_batch.EndTime.LocalDate.ToString())
except AttributeError:
end_times.append(self.now)
unique_ids.append(unit_sub_batch.UniqueID)
parsed = {
"ParentID": parent_ids,
"Name": names,
"StartTime": start_times,
"EndTime": end_times,
"UniqueID": unique_ids
}
return pd.DataFrame.from_dict(parsed) | 477 | 23 | 125 |
2909cd233acf8a536e5a3e44f665d30c6eec060f | 6,762 | py | Python | apps/Am241_Analysis.py | sweigart/pygama | 3c5fe4c69230814933b2de879b9a305ff0d4ad5e | [
"Apache-2.0"
] | 13 | 2019-05-01T01:37:30.000Z | 2022-03-18T08:52:19.000Z | apps/Am241_Analysis.py | sweigart/pygama | 3c5fe4c69230814933b2de879b9a305ff0d4ad5e | [
"Apache-2.0"
] | 111 | 2019-03-25T00:50:48.000Z | 2022-03-30T17:13:43.000Z | apps/Am241_Analysis.py | sweigart/pygama | 3c5fe4c69230814933b2de879b9a305ff0d4ad5e | [
"Apache-2.0"
] | 52 | 2019-01-24T21:05:04.000Z | 2022-03-07T23:37:55.000Z | #!/usr/bin/env python3.7
import numpy as np
import pandas as pd
import tinydb as db
import matplotlib.pyplot as plt
from scipy.integrate import simps
from pygama import DataSet
import pygama.utils as pgu
import pygama.analysis.histograms as pgh
import pygama.analysis.peak_fitting as pga
from numpy import diff
"""""
This is a script to fit the 60keV, 99keV and 103keV lines of an 241Am scan.
This script is based on the pygama version from December 2019 and is a bit outdated.
An update will be done soon
You need to have done a Calibration before and the output must be in the ds.calDB file
The function takes a DataSet (December version) and a t2-level file
Then a fit on the 60kev line and on the 99/103 keV lines is performed, the
integrals are caluclated and the ratio is determind
A.Zschocke
"""
if __name__=="__main__":
main()
| 30.459459 | 158 | 0.586217 | #!/usr/bin/env python3.7
import numpy as np
import pandas as pd
import tinydb as db
import matplotlib.pyplot as plt
from scipy.integrate import simps
from pygama import DataSet
import pygama.utils as pgu
import pygama.analysis.histograms as pgh
import pygama.analysis.peak_fitting as pga
from numpy import diff
"""""
This is a script to fit the 60keV, 99keV and 103keV lines of an 241Am scan.
This script is based on the pygama version from December 2019 and is a bit outdated.
An update will be done soon
You need to have done a Calibration before and the output must be in the ds.calDB file
The function takes a DataSet (December version) and a t2-level file
Then a fit on the 60kev line and on the 99/103 keV lines is performed, the
integrals are caluclated and the ratio is determind
A.Zschocke
"""
def fit_Am_lines(ds, t2, display=False, write_DB=True):
print("Fit Am lines")
etype, ecal = "e_ftp", "e_cal"
e_peak = 0
#Load calibration Values
calDB = ds.calDB
query = db.Query()
table = calDB.table("cal_pass3").all()
df_cal = pd.DataFrame(table)
slope = df_cal.iloc[0]["slope"]
offset = df_cal.iloc[0]["offset"]
# load in the energy and apply (linear) calibration
ene = t2[etype]
e_cal = ene* (ene * slope +offset)
green_line = slope * 500 + offset
fits = {}
pk_names = ds.config["pks"]
am_peaks = ds.config["peaks_of_interest"]
# Here I did a quick study on the impact of the bin size on the integral
# and the chi2 (this is the next for loop)
ar = []
chic = []
scan = [0.1,0.09,0.08,0.07,0.06,0.05,0.04,0.03,0.02,0.01]
aq = 1500000
# For loop over different bin sizes
for bi in scan:
# Do the 100keV lines first
xlo, xhi, xpb = 90, 110,bi
hE, xE, vE = pgh.get_hist(e_cal, range=(xlo, xhi), dx=xpb)
inf = np.inf
# Set up initial values and limits
guess_100 = [100000,99,0.5,11000,103,0.5,4050,101,0.5, 400000,39000,400,20000]
bounds_100 = ([-np.inf,97,-np.inf,-np.inf,102,-np.inf,-np.inf,100.1,0.001,-inf,-inf,-inf,-inf],[inf,100,inf,inf,104,inf,inf,101.7,0.8,inf,inf,inf,inf])
#Do the fit (Am_double function from PeakFitting.py)
xF, xF_cov = pga.fit_hist(pga.Am_double, hE, xE, var=np.ones(len(hE)), guess=guess_100, bounds=bounds_100)
dg_fit, gaus1, gaus2, gaus3, step1, step2 = pga.Am_double(xE,*xF,components=True)
results = {
"99keV" : xF[1],
"99keV_fwhm" : xF[2] * 2.355,
"103keV" : xF[4],
"103keV_fwhm" : xF[5] * 2.355
# ...
}
#calculate the integral
area_g1 = simps(gaus1,dx = bi)
area_g2 = simps(gaus2,dx = bi)
chisq = []
for i, h in enumerate(hE):
diff = (pga.Am_double(xE[i], *xF) - hE[i])**2 / hE[i]
chisq.append(abs(diff))
results["peak_integral1"] = area_g1
results["peak_integral2"] = area_g2
chisq_ndf_100 = sum(np.array(chisq) / (len(hE)-13))
# Plot it if wanted
if display:
plt.plot(xE[1:],hE,ls='steps', lw=1, c='b', label="data")
plt.plot(xE,pga.Am_double(xE,*xF),c='r', label='Fit')
plt.plot(xE,gaus1+gaus2,c='m', label='Gauss 99 keV + 103 keV')
plt.plot(xE,gaus3,c='y', label='Gauss bkg')
plt.plot(xE,step1+step2,c='g', label='Step')
plt.xlabel("Energy [keV]",ha='right', x=1.0)
plt.ylabel("Counts",ha='right', y=1.0)
plt.legend()
meta_dir = os.path.expandvars(ds.config["meta_dir"])
runNum = ds.ds_list[0]
plt.savefig(meta_dir+"/plots/100keV_100ev_bin_lines_run" + str(runNum)+".png")
plt.show()
# Do the 60 keV line
xlo, xhi, xpb = 50, 70, bi
hE, xE, vE = pgh.get_hist(e_cal, range=(xlo, xhi), dx=xpb)
a = aq
mu = 59.5
sigma = 0.3
tail = 50000
tau = 0.5
bkg = 4000
step = 3500
guess_60 = [a,mu,sigma,tail,tau,bkg,step]
bounds_60 = ([10,59,0.001,0.0,0.001,10,10],[inf,60.5,0.8,inf,inf,10000000,1000000])
# The fit Function is a gauss_cdf
xF, xF_cov = pga.fit_hist(pga.gauss_cdf, hE, xE, var=np.ones(len(hE)), guess=guess_60, bounds=bounds_60)
line, tail, step, peak = pga.gauss_cdf(xE,*xF,components=True)
chisq_60 = []
print("Calculating the chi^2")
for i, h in enumerate(hE):
func = pga.gauss_cdf(xE[i], *xF)
diff = (func - hE[i])
dev = diff**2/func
chisq_60.append(abs(dev))
chi_60 = sum(np.array(chisq_60))
chisq_ndf_60 = chi_60/(len(hE))
meta_dir = os.path.expandvars(ds.config["meta_dir"])
runNum = ds.ds_list[0]
if display:
plt.plot(xE[1:],hE,ls='steps', lw=1, c='b', label="data")
plt.plot(xE,pga.gauss_cdf(xE,*xF),c='r', label='Fit')
plt.plot(xE,(peak+tail), c='m', label = 'Gauss+Tail')
plt.plot(xE,step, c='g', label = 'Step')
plt.xlabel("Energy [keV]",ha='right', x=1.0)
plt.ylabel("Counts",ha='right', y=1.0)
plt.legend()
plt.savefig(meta_dir+"/plots/60keV_lines_100ev_bin__run" + str(runNum) +".png")
plt.show()
area = simps(peak+tail,dx=bi)
print("xF\n",xF)
print("chi_60", chisq_ndf_60)
print("chi_100", chisq_ndf_100)
print("Peak Integrals:")
print("60 keV = ", area)
print("99 keV = ", area_g1)
print("10 3keV = ", area_g2)
print("ratio 1 = ", area/area_g1)
print("ratio 2 = ", area/area_g2)
print("ratio 3 = ", area/(area_g1+area_g2))
ar.append(area/(area_g1+area_g2))
chic.append(chisq_ndf_60)
plt.subplot(211)
plt.plot(scan,chic,'bx',ms=15,label='chi^2/f')
plt.grid()
plt.axvline(green_line, c='g', lw=1, label="calibration value at 100 keV")
plt.legend()
plt.subplot(212)
plt.plot(scan,ar,'kx',ms=15,label='ratio "n60/(n99+n103)"')
plt.axvline(green_line, c='g', lw=1, label="calibration value at 100 keV")
plt.xlabel("bin size [keV]")
plt.grid()
plt.legend()
plt.show()
if write_DB:
res_db = meta_dir+"/PeakRatios_100evbin.json"
resDB = db.TinyDB(res_db)
query = db.Query()
ratiotable = resDB.table("Peak_Ratios")
for dset in ds.ds_list:
row = {
"ds":dset,
"chi2_ndf_60":chisq_ndf_60,
"chi2_ndf_100":chisq_ndf_100,
"60_keV": area,
"99_keV": area_g1,
"103_keV": area_g2,
"r1": area/area_g1,
"r2": area/area_g2,
"r3":area/(area_g1+area_g2)
}
ratiotable.upsert(row, query.ds == dset)
if __name__=="__main__":
main()
| 5,880 | 0 | 23 |
8ca9be3878dab670cb6a05d0d41ef9afb347f424 | 2,302 | py | Python | pysaintcoinach/xiv/gc_scrip_shop_item.py | icykoneko/saintcoinach-py | 66898385e1198203a7ec9da83787427bf6fe5c83 | [
"MIT"
] | 7 | 2019-11-20T17:24:49.000Z | 2022-03-29T04:17:53.000Z | pysaintcoinach/xiv/gc_scrip_shop_item.py | icykoneko/saintcoinach-py | 66898385e1198203a7ec9da83787427bf6fe5c83 | [
"MIT"
] | 7 | 2019-04-08T07:36:46.000Z | 2022-01-17T22:51:54.000Z | pysaintcoinach/xiv/gc_scrip_shop_item.py | icykoneko/saintcoinach-py | 66898385e1198203a7ec9da83787427bf6fe5c83 | [
"MIT"
] | 3 | 2019-04-08T08:24:22.000Z | 2021-06-27T22:19:15.000Z | from ..ex.relational import IRelationalRow
from . import xivrow, XivSubRow, IXivSheet
from .interfaces import IShopListing, IShopListingItem
from .shop_listing_item import ShopListingItem
@xivrow
| 28.419753 | 116 | 0.668983 | from ..ex.relational import IRelationalRow
from . import xivrow, XivSubRow, IXivSheet
from .interfaces import IShopListing, IShopListingItem
from .shop_listing_item import ShopListingItem
@xivrow
class GCScripShopItem(XivSubRow, IShopListing, IShopListingItem):
@property
def gc_shop(self) -> 'GCShop':
return self.__gc_shop
@property
def cost(self) -> ShopListingItem:
return self.__cost
@property
def gc_scrip_shop_category(self) -> 'GCScripShopCategory':
return self.__gc_scrip_shop_category
@property
def item(self) -> 'Item':
from .item import Item
return self.as_T(Item)
@property
def required_grand_company_rank(self) -> 'GrandCompanyRank':
# TODO: Use `GrandCompanyRank` type.
return self['Required{GrandCompanyRank}']
@property
def gc_seals_cost(self) -> int:
return self.as_int32('Cost{GCSeals}')
@property
def sort_key(self) -> int:
return self.get_raw('SortKey') & 0xFF
def __init__(self, sheet: IXivSheet, source_row: IRelationalRow):
from .gc_shop import GCShop
from .gc_scrip_shop_category import GCScripShopCategory
super(GCScripShopItem, self).__init__(sheet, source_row)
self.__gc_scrip_shop_category = self.sheet.collection.get_sheet(GCScripShopCategory)[self.parent_key]
self.__gc_shop = next(filter(lambda _: _.grand_company.key == self.gc_scrip_shop_category.grand_company.key,
self.sheet.collection.get_sheet(GCShop)))
seal_item = self.gc_shop.grand_company.seal_item
self.__cost = ShopListingItem(self, seal_item, self.gc_seals_cost, False, 0)
def __str__(self):
return str(self.item)
@property
def rewards(self) -> 'Iterable[IShopListingItem]':
yield self
@property
def costs(self) -> 'Iterable[IShopListingItem]':
yield self.cost
@property
def shops(self) -> 'Iterable[IShop]':
yield self.gc_shop
@property
def is_hq(self) -> bool:
return False
@property
def shop_item(self) -> 'IShopListing':
return self
@property
def collectability_rating(self) -> int:
return 0
@property
def count(self) -> int:
return 1
| 1,410 | 672 | 22 |
9064b8f312956e4226f1f506cb810d923706df75 | 15,765 | py | Python | xask.py | s3h10r/say | 8302ba0bc41b9debd1852f8c0ac6d25a7aaa3b9a | [
"MIT"
] | 2 | 2020-10-18T09:52:20.000Z | 2021-09-27T09:23:33.000Z | xask.py | s3h10r/say | 8302ba0bc41b9debd1852f8c0ac6d25a7aaa3b9a | [
"MIT"
] | null | null | null | xask.py | s3h10r/say | 8302ba0bc41b9debd1852f8c0ac6d25a7aaa3b9a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
**experimental** a graphical retro-style version of `ask` - because we can. :D
asks a yes/no question via audio (text-to-speech).
returncode reflects answer in common unix-style (0 == yes/ok, 1 == nope)
Usage:
xask [<msg>] [--yes=<reply_yes>] [--no=<reply_no>] [--engine=<tts-engine>]
[--yes-exec=<yes-exec>] [--no-exec=<no-exec>]
Options:
--engine=<str> TTS-engine to use {'google', 'espeak', 'festival'}
[default: espeak]
--no=<str> Message for negative answer
--no-exec=<str> execute given command by negative answer
--yes=<str> Message for positive answer
--yes-exec=<str> execute given command by positive answer
-h, --help Print this
--version Print version
Examples:
$ xask "Do you want to play a game?" && echo "Splendid! :)"
$ xask "Do you want to play a game?" --yes="Splendid, let's play!" --no="Okidoki. Maybe another time."
$ xask "Reboot universe?" --yes="rebooting now." --yes-exec "init 6" --no="Ok. Maybe another time."
"""
import logging
import os
import subprocess
import sys
import threading
import time
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
logger.setLevel(logging.WARNING)
handler = logging.StreamHandler() # console-handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
from docopt import docopt
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1' # no "Hello from the pygame community..." on stdout.
try:
import pygame
import pygame.freetype
from pygame.locals import *
except ImportError:
logger.critical("whuuups. no pygame import possible :/")
sys.exit(1)
from say import __version__, available_engines, ENGINE_DEFAULT, say
_VERBOSITY = 0
WINDOW_SIZE = (1200, 800)
FULLSCREEN=True # if set, the previously defined WINDOW_SIZE is ignored
#FULLSCREEN=False # if set, the previously defined WINDOW_SIZE is ignored
FONT_ZOOM=0.75
MARGIN = [0,0,0,0] # top, left, right, bottom
VT100 = (80,24) # https://de.wikipedia.org/wiki/VT100
#PAGE_SIZE=VT100
PAGE_SIZE=(20,6)
# === THEME / COLOR SCHEME
# --- day
#BACKGROUND_COLOR = (255,255,255)
#TEXT_COLOR = (0,0,0)
#CURSOR_COLOR=GRAY
# --- night
BACKGROUND_COLOR = (0,0,0)
TEXT_COLOR = (255,255,255)
CURSOR_COLOR= (0,128,0) # https://docs.oracle.com/cd/E19728-01/820-2550/term_em_colormaps.html
# === THEME / COLOR SCHEME
def get_font_for_page(surface=None, page_size = (80,24), font = "FreeMono, Monospace", margin=(0,0,0,0), monospace=True):
"""
calculates the (monospace) fontsize for page_size (<columns_char_N>,<rows_char_N>)
returns FontInstance
"""
assert(isinstance(font,str))
font_name = font
FONT_SIZE_MIN = 1
width, height = surface.get_size()
width -= MARGIN[1] + MARGIN[2] # left + right
height -= MARGIN[0] + MARGIN[3] # top + bottom
font_size = 101
ref_char = ' '
assert(FONT_SIZE_MIN > 0)
ref_size_x = None
ref_size_y = None
font = None
running = True
while running:
if font_size > (FONT_SIZE_MIN + 1):
font_size -= 1
else:
raise Exception("Ouch! Fontsize required for page_size={} < {} :-/".format(page_size,FONT_SIZE_MIN))
font = pygame.freetype.SysFont(font_name, font_size)
font.origin = True
#ref_size_x = font.get_rect(ref_char).width
ref_size_x = font.get_rect(ref_char).width + 1 # WORKAROUND: add one pixel per char to be safe ?
ref_size_y = font.get_sized_height() + 2
if (ref_size_x * page_size[0] > width) or (ref_size_y * page_size[1] > height):
logger.debug("fontsize={} : ref_char's size_x={} size_y={}".format(font_size, ref_size_x,ref_size_y))
continue
else: # got fitting fontsize
running = False
logger.info("found fontsize={} (font={}) suiting for page_size={} # ref_char's ('{}') size_x={} size_y={}".format(font_size, font_name, page_size, ref_char, ref_size_x,ref_size_y))
return font
def word_wrap(surf = None, text = None, stop_pos = None, font = None, color=(0, 0, 0), render=True):
"""
throws text onto screen/surface (if render=True).
if render is set to False only the positioning is calculated - handy
for calculating the position of a cursor onto content already drawn
by an earlier call (return values can be used for setting the cursor to a specific
position (stop_pos) of the text)
:args:
text a "page" as string which should be printed on durface
stop_pos the position in text where printing to surface shoud stop
(default == None == len(text)
render nothing is printed onto surface. but the positioning
calculations are done (see retunrn values)
returns x,y # position of the last processed character of the text
# (the x-position is the position where the pixelrepresentation of the char ends)
**TODO: `color=random_color()` option**
"""
assert(isinstance(render,bool))
assert(isinstance(stop_pos,int) or stop_pos == None)
if not(isinstance(stop_pos,int)):
stop_pos = len(text) - 1
pos = 0
font.origin = True
words = text.split(' ')
width, height = surf.get_size()
width -= MARGIN[1] + MARGIN[2] # left + right
height -= MARGIN[0] + MARGIN[3] # top + bottom
line_spacing = font.get_sized_height() + 2
x, y = MARGIN[1], line_spacing + MARGIN[0]
space = font.get_rect(' ')
i_pos = -1 # position in text-stream
linebreaks = 0 # nr. of linebreaks in text-stream
lines = text.split('\n')
trimmed = False # if stop_pos is reached we set this to true and end the loop
for i, line in enumerate(lines):
logger.debug("line {} : '{}'".format(i, line))
if len(line) > 0: # cause ''.split(' ') => ['']
words = line.split(' ')
else:
words = []
logger.debug("words of line {}: {}".format(line, words))
for i2, word in enumerate(words):
logger.debug("word_wrap-func line nr. {} word nr. {}".format(i,i2))
if i2 < len(words) - 1:
if set(words[i2+1:]) != set(['']): # FIX-20011822-01: don't append whitespace if last word in line only followed by whitespaces
word += ' '
if stop_pos != None and (i_pos + len(word) >= stop_pos):
logger.debug("trimming word '{}' to pos length {} @ i_pos {}".format(word,stop_pos,i_pos))
# trim word to pos length
too_long = (i_pos + len(word)-1) - stop_pos
tmpi = len(word) - too_long
word = word[:tmpi]
logger.debug("trimmed to '{}' @ i_pos {}".format(word,i_pos))
trimmed=True
if word=='' and not trimmed:
word = ' '
logger.debug("word == ' ' @ i_pos: {}".format(i_pos))
i_pos += len(word)
bounds = font.get_rect(word)
logger.debug("assume: {} <= {}".format(bounds.width,space.width * len(word)))
if not (bounds.width <= (space.width * len(word))):
logger.debug("WARNING ASSERTION WRONG. MAYBE WE CAN USE A TRESHOLD IN WHICH IT IS OKAY?")
logger.debug('{}'.format(word))
if x + bounds.width > width:
x, y = MARGIN[1], y + line_spacing
if x + bounds.width > width:
raise ValueError("word {} px to wide (x) for the surface".format(width - (x + bounds.width)))
else:
logger.debug("word width (x) fits into surface. {}px left".format(width - (x + bounds.width)))
if y + bounds.height - bounds.y > height:
logger.critical("FIXME: text to long (y) for the surface")
raise ValueError("text to long (y) for the surface")
if render:
logger.debug("render word '{}' on pos {},{}".format(word, x,y))
font.render_to(surf, (x, y), None, color)
x += bounds.width
if trimmed:
break
if trimmed:
break
# add linebreak
if i < len(lines) - 1:
x = MARGIN[1]; y += line_spacing
i_pos += 1 # the '\n' of the .split()
linebreaks += 1
logger.info("word_wrap: i_pos {} lines {} linebreaks done {}".format(i_pos,len(lines),linebreaks))
logger.info("word_wrap: i_pos={} stop_pos={} (should be same)".format(i_pos,stop_pos))
if stop_pos < len(text):
#assert(i_pos == stop_pos)
assert(abs(i_pos - stop_pos) < 2)
if abs(i_pos - stop_pos) >= 2:
logger.warning("word_wrap : abs(i_pos - stop_pos) is {} (but should be zero)".format(abs(i_pos - stop_pos)))
return x, y
def _show_message(surf=None, page="Do you want to play a game?", page_from_pos=0, show_cursor=True, wait_for_keypress=True):
"""
shows message (question) char by char (full-)screen
returns
key pressed by user # e.g "y", "n"
"""
SHOW_CURSOR=show_cursor
font = get_font_for_page(surface=surf, page_size = PAGE_SIZE, margin=MARGIN)
# **
page_in_transition = True
page_transition_pos = page_from_pos
page_transition_state = ""
# **
running = True
user_pressed_key = None
clock = pygame.time.Clock()
while running:
for event in pygame.event.get():
# === event handler ===
if event.type == KEYDOWN:
if (event.key == K_ESCAPE):
events = pygame.event.get()
user_pressed_key = event
running = False
break;
else:
user_pressed_key = event.unicode
running = False
break;
# === show content
surf.fill(BACKGROUND_COLOR)
if page_in_transition:
page_transition_state = page[0:page_transition_pos + 1]
x,y = word_wrap(surf, page_transition_state, None, font, TEXT_COLOR)
if page_transition_pos == len(page): # transition finished
page_in_transition = False
#if time.time() % 1 > 0.2: # speed of transition progress
# page_transition_pos += 1
page_transition_pos += 1
else:
x,y = word_wrap(surf, page, None, font, TEXT_COLOR)
if not wait_for_keypress:
running = False
cursor_pos = page_transition_pos + 1
# === cursor positioning
font.origin = True
line_spacing = font.get_sized_height() + 2
space = font.get_rect(' ')
cursor_width = space.width
cursor_height_percentage = 100
cursor_height = (line_spacing / 100) * 80
if SHOW_CURSOR:
if page_in_transition:
x,y = word_wrap(surf=surf, text=page_transition_state, stop_pos=cursor_pos, font = font, color=TEXT_COLOR, render=False)
else:
x,y = word_wrap(surf=surf, text=page, stop_pos=cursor_pos, font = font, color=TEXT_COLOR, render=False)
if x > MARGIN[1]:
cursor = Rect((x, y - cursor_height), (cursor_width, cursor_height)) # left, top, width, height
else:
cursor = Rect((x,y - cursor_height), (cursor_width, cursor_height)) # left, top, width, height
if time.time() % 1 > 0.5: # blinking
pygame.draw.rect(surf, CURSOR_COLOR, cursor)
# --- TODO save a screenshot or gif-animation for docs
#if not page_in_transition:
# pygame.image.save(surf,'/tmp/screenshot_xask.png') # save screenshot
# ---
clock.tick(30)
pygame.display.update()
return user_pressed_key
def xsay(msg,engine,surf=None,quit_if_done=False,timeout=None):
"""
**experimental** a graphical retro-style version of `say`.
"""
if not surf:
surf = _init_screen(fullscreen=FULLSCREEN)
t1 = ThreadWithReturnValue(target=_show_message,args=(surf,msg,))
t2 = threading.Thread(target=say,args=(msg,engine))
t1.start()
#time.sleep(0.5)
t2.start()
res = t1.join()
t2.join()
if quit_if_done:
pygame.quit()
return res
if __name__ == '__main__':
s = time.perf_counter()
is_yes = main()
elapsed = time.perf_counter() - s
logger.info(f"{__file__} executed in {elapsed:0.2f} seconds.")
yn_rc = 0
if not is_yes:
yn_rc = 1
sys.exit(yn_rc)
| 38.639706 | 192 | 0.597843 | #!/usr/bin/env python3
"""
**experimental** a graphical retro-style version of `ask` - because we can. :D
asks a yes/no question via audio (text-to-speech).
returncode reflects answer in common unix-style (0 == yes/ok, 1 == nope)
Usage:
xask [<msg>] [--yes=<reply_yes>] [--no=<reply_no>] [--engine=<tts-engine>]
[--yes-exec=<yes-exec>] [--no-exec=<no-exec>]
Options:
--engine=<str> TTS-engine to use {'google', 'espeak', 'festival'}
[default: espeak]
--no=<str> Message for negative answer
--no-exec=<str> execute given command by negative answer
--yes=<str> Message for positive answer
--yes-exec=<str> execute given command by positive answer
-h, --help Print this
--version Print version
Examples:
$ xask "Do you want to play a game?" && echo "Splendid! :)"
$ xask "Do you want to play a game?" --yes="Splendid, let's play!" --no="Okidoki. Maybe another time."
$ xask "Reboot universe?" --yes="rebooting now." --yes-exec "init 6" --no="Ok. Maybe another time."
"""
import logging
import os
import subprocess
import sys
import threading
import time
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
logger.setLevel(logging.WARNING)
handler = logging.StreamHandler() # console-handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
from docopt import docopt
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1' # no "Hello from the pygame community..." on stdout.
try:
import pygame
import pygame.freetype
from pygame.locals import *
except ImportError:
logger.critical("whuuups. no pygame import possible :/")
sys.exit(1)
from say import __version__, available_engines, ENGINE_DEFAULT, say
_VERBOSITY = 0
WINDOW_SIZE = (1200, 800)
FULLSCREEN=True # if set, the previously defined WINDOW_SIZE is ignored
#FULLSCREEN=False # if set, the previously defined WINDOW_SIZE is ignored
FONT_ZOOM=0.75
MARGIN = [0,0,0,0] # top, left, right, bottom
VT100 = (80,24) # https://de.wikipedia.org/wiki/VT100
#PAGE_SIZE=VT100
PAGE_SIZE=(20,6)
# === THEME / COLOR SCHEME
# --- day
#BACKGROUND_COLOR = (255,255,255)
#TEXT_COLOR = (0,0,0)
#CURSOR_COLOR=GRAY
# --- night
BACKGROUND_COLOR = (0,0,0)
TEXT_COLOR = (255,255,255)
CURSOR_COLOR= (0,128,0) # https://docs.oracle.com/cd/E19728-01/820-2550/term_em_colormaps.html
# === THEME / COLOR SCHEME
class ThreadWithReturnValue(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self, *args):
threading.Thread.join(self, *args)
return self._return
def _init_screen(fullscreen=FULLSCREEN):
global MARGIN
global WINDOW_SIZE
logger.info("_init_screen(fullscreen={})".format(fullscreen))
pygame.init()
pygame.mouse.set_visible(0)
os.environ['SDL_VIDEO_CENTERED'] = '1'
infoObj = pygame.display.Info()
w, h = infoObj.current_w, infoObj.current_h
logger.debug("w=%s h=%s" % (w,h))
MARGIN = [WINDOW_SIZE[1] / 40, WINDOW_SIZE[0] / 20, WINDOW_SIZE[0] / 20, WINDOW_SIZE[1] / 40] # top, left, right, bottom
if fullscreen:
surface = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
WINDOW_SIZE = (w,h)
else:
surface = pygame.display.set_mode(WINDOW_SIZE)
pygame.display.set_caption('xask')
return surface
def get_font_for_page(surface=None, page_size = (80,24), font = "FreeMono, Monospace", margin=(0,0,0,0), monospace=True):
"""
calculates the (monospace) fontsize for page_size (<columns_char_N>,<rows_char_N>)
returns FontInstance
"""
assert(isinstance(font,str))
font_name = font
FONT_SIZE_MIN = 1
width, height = surface.get_size()
width -= MARGIN[1] + MARGIN[2] # left + right
height -= MARGIN[0] + MARGIN[3] # top + bottom
font_size = 101
ref_char = ' '
assert(FONT_SIZE_MIN > 0)
ref_size_x = None
ref_size_y = None
font = None
running = True
while running:
if font_size > (FONT_SIZE_MIN + 1):
font_size -= 1
else:
raise Exception("Ouch! Fontsize required for page_size={} < {} :-/".format(page_size,FONT_SIZE_MIN))
font = pygame.freetype.SysFont(font_name, font_size)
font.origin = True
#ref_size_x = font.get_rect(ref_char).width
ref_size_x = font.get_rect(ref_char).width + 1 # WORKAROUND: add one pixel per char to be safe ?
ref_size_y = font.get_sized_height() + 2
if (ref_size_x * page_size[0] > width) or (ref_size_y * page_size[1] > height):
logger.debug("fontsize={} : ref_char's size_x={} size_y={}".format(font_size, ref_size_x,ref_size_y))
continue
else: # got fitting fontsize
running = False
logger.info("found fontsize={} (font={}) suiting for page_size={} # ref_char's ('{}') size_x={} size_y={}".format(font_size, font_name, page_size, ref_char, ref_size_x,ref_size_y))
return font
def word_wrap(surf = None, text = None, stop_pos = None, font = None, color=(0, 0, 0), render=True):
"""
throws text onto screen/surface (if render=True).
if render is set to False only the positioning is calculated - handy
for calculating the position of a cursor onto content already drawn
by an earlier call (return values can be used for setting the cursor to a specific
position (stop_pos) of the text)
:args:
text a "page" as string which should be printed on durface
stop_pos the position in text where printing to surface shoud stop
(default == None == len(text)
render nothing is printed onto surface. but the positioning
calculations are done (see retunrn values)
returns x,y # position of the last processed character of the text
# (the x-position is the position where the pixelrepresentation of the char ends)
**TODO: `color=random_color()` option**
"""
assert(isinstance(render,bool))
assert(isinstance(stop_pos,int) or stop_pos == None)
if not(isinstance(stop_pos,int)):
stop_pos = len(text) - 1
pos = 0
font.origin = True
words = text.split(' ')
width, height = surf.get_size()
width -= MARGIN[1] + MARGIN[2] # left + right
height -= MARGIN[0] + MARGIN[3] # top + bottom
line_spacing = font.get_sized_height() + 2
x, y = MARGIN[1], line_spacing + MARGIN[0]
space = font.get_rect(' ')
i_pos = -1 # position in text-stream
linebreaks = 0 # nr. of linebreaks in text-stream
lines = text.split('\n')
trimmed = False # if stop_pos is reached we set this to true and end the loop
for i, line in enumerate(lines):
logger.debug("line {} : '{}'".format(i, line))
if len(line) > 0: # cause ''.split(' ') => ['']
words = line.split(' ')
else:
words = []
logger.debug("words of line {}: {}".format(line, words))
for i2, word in enumerate(words):
logger.debug("word_wrap-func line nr. {} word nr. {}".format(i,i2))
if i2 < len(words) - 1:
if set(words[i2+1:]) != set(['']): # FIX-20011822-01: don't append whitespace if last word in line only followed by whitespaces
word += ' '
if stop_pos != None and (i_pos + len(word) >= stop_pos):
logger.debug("trimming word '{}' to pos length {} @ i_pos {}".format(word,stop_pos,i_pos))
# trim word to pos length
too_long = (i_pos + len(word)-1) - stop_pos
tmpi = len(word) - too_long
word = word[:tmpi]
logger.debug("trimmed to '{}' @ i_pos {}".format(word,i_pos))
trimmed=True
if word=='' and not trimmed:
word = ' '
logger.debug("word == ' ' @ i_pos: {}".format(i_pos))
i_pos += len(word)
bounds = font.get_rect(word)
logger.debug("assume: {} <= {}".format(bounds.width,space.width * len(word)))
if not (bounds.width <= (space.width * len(word))):
logger.debug("WARNING ASSERTION WRONG. MAYBE WE CAN USE A TRESHOLD IN WHICH IT IS OKAY?")
logger.debug('{}'.format(word))
if x + bounds.width > width:
x, y = MARGIN[1], y + line_spacing
if x + bounds.width > width:
raise ValueError("word {} px to wide (x) for the surface".format(width - (x + bounds.width)))
else:
logger.debug("word width (x) fits into surface. {}px left".format(width - (x + bounds.width)))
if y + bounds.height - bounds.y > height:
logger.critical("FIXME: text to long (y) for the surface")
raise ValueError("text to long (y) for the surface")
if render:
logger.debug("render word '{}' on pos {},{}".format(word, x,y))
font.render_to(surf, (x, y), None, color)
x += bounds.width
if trimmed:
break
if trimmed:
break
# add linebreak
if i < len(lines) - 1:
x = MARGIN[1]; y += line_spacing
i_pos += 1 # the '\n' of the .split()
linebreaks += 1
logger.info("word_wrap: i_pos {} lines {} linebreaks done {}".format(i_pos,len(lines),linebreaks))
logger.info("word_wrap: i_pos={} stop_pos={} (should be same)".format(i_pos,stop_pos))
if stop_pos < len(text):
#assert(i_pos == stop_pos)
assert(abs(i_pos - stop_pos) < 2)
if abs(i_pos - stop_pos) >= 2:
logger.warning("word_wrap : abs(i_pos - stop_pos) is {} (but should be zero)".format(abs(i_pos - stop_pos)))
return x, y
def _show_message(surf=None, page="Do you want to play a game?", page_from_pos=0, show_cursor=True, wait_for_keypress=True):
"""
shows message (question) char by char (full-)screen
returns
key pressed by user # e.g "y", "n"
"""
SHOW_CURSOR=show_cursor
font = get_font_for_page(surface=surf, page_size = PAGE_SIZE, margin=MARGIN)
# **
page_in_transition = True
page_transition_pos = page_from_pos
page_transition_state = ""
# **
running = True
user_pressed_key = None
clock = pygame.time.Clock()
while running:
for event in pygame.event.get():
# === event handler ===
if event.type == KEYDOWN:
if (event.key == K_ESCAPE):
events = pygame.event.get()
user_pressed_key = event
running = False
break;
else:
user_pressed_key = event.unicode
running = False
break;
# === show content
surf.fill(BACKGROUND_COLOR)
if page_in_transition:
page_transition_state = page[0:page_transition_pos + 1]
x,y = word_wrap(surf, page_transition_state, None, font, TEXT_COLOR)
if page_transition_pos == len(page): # transition finished
page_in_transition = False
#if time.time() % 1 > 0.2: # speed of transition progress
# page_transition_pos += 1
page_transition_pos += 1
else:
x,y = word_wrap(surf, page, None, font, TEXT_COLOR)
if not wait_for_keypress:
running = False
cursor_pos = page_transition_pos + 1
# === cursor positioning
font.origin = True
line_spacing = font.get_sized_height() + 2
space = font.get_rect(' ')
cursor_width = space.width
cursor_height_percentage = 100
cursor_height = (line_spacing / 100) * 80
if SHOW_CURSOR:
if page_in_transition:
x,y = word_wrap(surf=surf, text=page_transition_state, stop_pos=cursor_pos, font = font, color=TEXT_COLOR, render=False)
else:
x,y = word_wrap(surf=surf, text=page, stop_pos=cursor_pos, font = font, color=TEXT_COLOR, render=False)
if x > MARGIN[1]:
cursor = Rect((x, y - cursor_height), (cursor_width, cursor_height)) # left, top, width, height
else:
cursor = Rect((x,y - cursor_height), (cursor_width, cursor_height)) # left, top, width, height
if time.time() % 1 > 0.5: # blinking
pygame.draw.rect(surf, CURSOR_COLOR, cursor)
# --- TODO save a screenshot or gif-animation for docs
#if not page_in_transition:
# pygame.image.save(surf,'/tmp/screenshot_xask.png') # save screenshot
# ---
clock.tick(30)
pygame.display.update()
return user_pressed_key
def xsay(msg,engine,surf=None,quit_if_done=False,timeout=None):
"""
**experimental** a graphical retro-style version of `say`.
"""
if not surf:
surf = _init_screen(fullscreen=FULLSCREEN)
t1 = ThreadWithReturnValue(target=_show_message,args=(surf,msg,))
t2 = threading.Thread(target=say,args=(msg,engine))
t1.start()
#time.sleep(0.5)
t2.start()
res = t1.join()
t2.join()
if quit_if_done:
pygame.quit()
return res
def xask(msg,r_yes,r_no,engine,surf=None,quit_if_done=False):
key_pressed = xsay(msg,engine,surf,quit_if_done)
is_yes = False
if key_pressed in ['y','Y','j','J']: is_yes = True
if is_yes:
page_from_pos = len(msg)
if r_yes:
msg += key_pressed + "\n" + r_yes
t1 = ThreadWithReturnValue(target=_show_message,args=(surf,msg,page_from_pos,True,False))
t2 = threading.Thread(target=say,args=(r_yes,engine,))
t1.start()
t2.start()
res = t1.join()
t2.join()
else:
page_from_pos = len(msg)
if r_no:
msg += key_pressed + "\n" + r_no[:-1] + "."
t1 = ThreadWithReturnValue(target=_show_message,args=(surf,msg,page_from_pos,True,False))
t2 = threading.Thread(target=say,args=(r_no,engine,))
t1.start()
t2.start()
res = t1.join()
t2.join()
return is_yes
def main():
kwargs = docopt(__doc__, version=str('.'.join([str(el) for el in __version__])))
logger.debug("kwargs={}".format(kwargs))
if '<msg>' in kwargs:
msg = kwargs['<msg>']
reply_y = kwargs['--yes']
exec_y = kwargs['--yes-exec']
reply_n = kwargs['--no']
exec_n = kwargs['--no-exec']
engine = kwargs['--engine']
if not engine in available_engines():
engine=ENGINE_DEFAULT
if not msg:
if _VERBOSITY > 0:
msg = input("what should i say? : ")
else:
msg = input()
surf = _init_screen(fullscreen=FULLSCREEN)
is_yes = xask(msg,reply_y,reply_n,engine,surf,quit_if_done=False)
cmd=None
if is_yes:
if exec_y:
cmd = exec_y
else:
if exec_n:
cmd = exec_n
if cmd:
logger.info("executing '{}'".format(exec_n))
subprocess.run(['{}'].format(cmd))
return is_yes
if __name__ == '__main__':
s = time.perf_counter()
is_yes = main()
elapsed = time.perf_counter() - s
logger.info(f"{__file__} executed in {elapsed:0.2f} seconds.")
yn_rc = 0
if not is_yes:
yn_rc = 1
sys.exit(yn_rc)
| 2,912 | 25 | 172 |
699a7caa0cd3ef8b77366228cf23f5bb5950aef4 | 2,717 | py | Python | Day 77/OldKeypadInForeignLanguage.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 77/OldKeypadInForeignLanguage.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | Day 77/OldKeypadInForeignLanguage.py | sandeep-krishna/100DaysOfCode | af4594fb6933e4281d298fa921311ccc07295a7c | [
"MIT"
] | null | null | null | '''
Some people remain old fashioned and John is one of them. He doesn't like the new smart phones with full keypads and still uses the old keypads which require you to tap a key multiple times to type a single letter. For example, if the keyboard has two keys, one with the letters "adef" and the other one with the letters "zyx", then typing 'a' requires one keystroke, typing 'f' requires four keystrokes, typing 'y' requires two keystrokes, and so on.
He recently moved to a new country where the language is such that his keypad is not the most efficient. In every language some characters occur more often than others. He wants to create a specific keyboard for this language that uses N different letters. He has a large body of text in this language, and has already analyzed it to find the frequencies of all N letters of its alphabet.
You are given an array 'frequencies' with N elements. Each element of frequencies is the number of times one of the letters in the new language appears in the text John has. Each element of frequencies will be strictly positive. (I.e., each of the N letters occurs at least once.)
You are also given an array keySize. The number of elements of keySize is the number of keys on the keyboard. Each element of keySize gives the maximal number of letters that maybe put on one of the keys.
Find an assignment of letters to keys that minimizes the number of keystrokes needed to type the entire text. Output that minimum number of keystrokes. If there is not enough room on the keys and some letters of the alphabet won't fit, Output -1 instead.
Input Format
The first line will contain a number 'N' that specifies the size of 'frequencies' array
The second line will contain N numbers that form the frequencies array
The third line contains a number 'K' that specifies the size of the 'keySize' array
The fourth line contains K numbers that form the keySize array
Output Format
Output a single integer that is answer to the problem.
Constraints
frequencies will contain between 1 and 50 elements, inclusive.
Each element of frequencies will be between 1 and 1,000, inclusive.
keySizes will contain between 1 and 50 elements, inclusive.
Each element of keySizes will be between 1 and 50, inclusive.
SAMPLE INPUT
4
7 3 4 1
2
2 2
SAMPLE OUTPUT
19
'''
n=int(input())
freq=[int(x) for x in input().split()]
k=int(input())
keysizes=[int(x) for x in input().split()]
if n>sum(keysizes):
print('-1')
else:
freq.sort()
total=0
h=1
while len(freq)!=0:
for i in range(len(keysizes)):
try:
total+=freq.pop()*h
except IndexError:
break
keysizes[i] -= 1
for e in keysizes:
if e==0:
keysizes.remove(e)
h+=1
print(total) | 41.8 | 451 | 0.753773 | '''
Some people remain old fashioned and John is one of them. He doesn't like the new smart phones with full keypads and still uses the old keypads which require you to tap a key multiple times to type a single letter. For example, if the keyboard has two keys, one with the letters "adef" and the other one with the letters "zyx", then typing 'a' requires one keystroke, typing 'f' requires four keystrokes, typing 'y' requires two keystrokes, and so on.
He recently moved to a new country where the language is such that his keypad is not the most efficient. In every language some characters occur more often than others. He wants to create a specific keyboard for this language that uses N different letters. He has a large body of text in this language, and has already analyzed it to find the frequencies of all N letters of its alphabet.
You are given an array 'frequencies' with N elements. Each element of frequencies is the number of times one of the letters in the new language appears in the text John has. Each element of frequencies will be strictly positive. (I.e., each of the N letters occurs at least once.)
You are also given an array keySize. The number of elements of keySize is the number of keys on the keyboard. Each element of keySize gives the maximal number of letters that maybe put on one of the keys.
Find an assignment of letters to keys that minimizes the number of keystrokes needed to type the entire text. Output that minimum number of keystrokes. If there is not enough room on the keys and some letters of the alphabet won't fit, Output -1 instead.
Input Format
The first line will contain a number 'N' that specifies the size of 'frequencies' array
The second line will contain N numbers that form the frequencies array
The third line contains a number 'K' that specifies the size of the 'keySize' array
The fourth line contains K numbers that form the keySize array
Output Format
Output a single integer that is answer to the problem.
Constraints
frequencies will contain between 1 and 50 elements, inclusive.
Each element of frequencies will be between 1 and 1,000, inclusive.
keySizes will contain between 1 and 50 elements, inclusive.
Each element of keySizes will be between 1 and 50, inclusive.
SAMPLE INPUT
4
7 3 4 1
2
2 2
SAMPLE OUTPUT
19
'''
n=int(input())
freq=[int(x) for x in input().split()]
k=int(input())
keysizes=[int(x) for x in input().split()]
if n>sum(keysizes):
print('-1')
else:
freq.sort()
total=0
h=1
while len(freq)!=0:
for i in range(len(keysizes)):
try:
total+=freq.pop()*h
except IndexError:
break
keysizes[i] -= 1
for e in keysizes:
if e==0:
keysizes.remove(e)
h+=1
print(total) | 0 | 0 | 0 |
85fc1eacc08132e53a52cf03147fa03f7403b4c0 | 3,016 | py | Python | mosaik_docker/util/config_data.py | ERIGrid2/mosaik-docker | b44958cb50186fd57b67c84dee22109d7d4400c6 | [
"BSD-3-Clause"
] | 1 | 2021-02-18T12:34:17.000Z | 2021-02-18T12:34:17.000Z | mosaik_docker/util/config_data.py | ERIGrid2/mosaik-docker | b44958cb50186fd57b67c84dee22109d7d4400c6 | [
"BSD-3-Clause"
] | null | null | null | mosaik_docker/util/config_data.py | ERIGrid2/mosaik-docker | b44958cb50186fd57b67c84dee22109d7d4400c6 | [
"BSD-3-Clause"
] | 1 | 2020-10-09T11:11:20.000Z | 2020-10-09T11:11:20.000Z | import json
import pathlib
from .._config import CONFIG_FILE_NAME
class ConfigData:
'''
This class handles access to simulation setup configuration data.
'''
# Constructor.
def __setitem__( self, index, value ):
'''
For setting a configuration value.
'''
self.__config_data[index] = value
def __getitem__( self, index ):
'''
For retrieving a configuration value.
'''
return self.__config_data[index]
def __contains__( self, item ):
'''
Returns a boolean value depending on whether the configuration contains the specified item or not.
'''
return item in self.__config_data
def write( self ):
'''
Save configuration.
'''
with open( self.path, 'w' ) as sim_setup_file:
json.dump(
self.__config_data,
sim_setup_file,
indent = 2,
separators = ( ',', ': ' ) )
sim_setup_file.write( '\n' )
@property
def path( self ):
'''
Absolute path to configuration file.
'''
return self.__sim_setup_file_path
@property
def data( self ):
'''
Configuration data as dict.
'''
return self.__config_data
def __recursive_del_empty_str_from_lists( self, obj ):
'''
Helper function: recursively remove empty strings from lists in dicts.
'''
for k,v in obj.items():
if isinstance( v, list ):
if '' in v:
v.remove( '' )
elif isinstance( v, dict ):
self.__recursive_del_empty_str_from_lists( v )
| 30.464646 | 115 | 0.558687 | import json
import pathlib
from .._config import CONFIG_FILE_NAME
class ConfigData:
'''
This class handles access to simulation setup configuration data.
'''
# Constructor.
def __init__( self, setup_dir ):
if not ( isinstance( setup_dir, str ) or isinstance( setup_dir, pathlib.Path ) ):
raise TypeError( 'Parameter \'setup_dir\' must be of type \'str\' or \'pathlib.Path\'' )
try:
setup_dir_path = pathlib.Path( setup_dir ).resolve( strict = True )
except Exception as err:
raise RuntimeError( 'not a valid directory: {}\n{}'.format( setup_dir, err ) )
# Load sim setup configuration.
try:
self.__sim_setup_file_path = pathlib.Path( setup_dir_path, CONFIG_FILE_NAME ).resolve( strict = True )
except Exception as err:
raise RuntimeError( 'not a valid simulation setup: {}\n{}'.format( setup_dir_path, err ) )
with open( self.__sim_setup_file_path ) as sim_setup_file:
try:
self.__config_data = json.load( sim_setup_file )
except Exception as err:
raise Exception( 'Invalid JSON format: {}\n{}'.format( self.__sim_setup_file_path, str( err ) ) )
# Sanitize configuration: remove empty strings from lists.
self.__recursive_del_empty_str_from_lists( self.__config_data )
def __setitem__( self, index, value ):
'''
For setting a configuration value.
'''
self.__config_data[index] = value
def __getitem__( self, index ):
'''
For retrieving a configuration value.
'''
return self.__config_data[index]
def __contains__( self, item ):
'''
Returns a boolean value depending on whether the configuration contains the specified item or not.
'''
return item in self.__config_data
def write( self ):
'''
Save configuration.
'''
with open( self.path, 'w' ) as sim_setup_file:
json.dump(
self.__config_data,
sim_setup_file,
indent = 2,
separators = ( ',', ': ' ) )
sim_setup_file.write( '\n' )
@property
def path( self ):
'''
Absolute path to configuration file.
'''
return self.__sim_setup_file_path
@property
def data( self ):
'''
Configuration data as dict.
'''
return self.__config_data
def __recursive_del_empty_str_from_lists( self, obj ):
'''
Helper function: recursively remove empty strings from lists in dicts.
'''
for k,v in obj.items():
if isinstance( v, list ):
if '' in v:
v.remove( '' )
elif isinstance( v, dict ):
self.__recursive_del_empty_str_from_lists( v )
| 1,190 | 0 | 27 |
e8af7303fb7ef02910f0e067c44c0d11ac46a554 | 12,950 | py | Python | shaDow/para_samplers/base_graph_samplers.py | yxia-fb/shaDow-GNN | 2b867011c7084d4ed1b407e29f3ee09632fcc3dc | [
"MIT"
] | null | null | null | shaDow/para_samplers/base_graph_samplers.py | yxia-fb/shaDow-GNN | 2b867011c7084d4ed1b407e29f3ee09632fcc3dc | [
"MIT"
] | 1 | 2022-01-22T11:20:00.000Z | 2022-01-22T11:20:00.000Z | shaDow/para_samplers/base_graph_samplers.py | yxia-fb/shaDow-GNN | 2b867011c7084d4ed1b407e29f3ee09632fcc3dc | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse
from typing import Union, List
from dataclasses import dataclass, field, fields, InitVar
import scipy.sparse as sp
@dataclass
class Subgraph:
"""
Represents the meta information of sampled subgraphs.
"""
# data fields
indptr : np.ndarray
indices : np.ndarray
data : np.ndarray
node : np.ndarray
edge_index : np.ndarray
target : np.ndarray
hop : np.ndarray
ppr : np.ndarray
# init fields
cap_node_full : InitVar[int]=None
cap_edge_full : InitVar[int]=None
cap_node_subg : InitVar[int]=None
cap_edge_subg : InitVar[int]=None
validate : InitVar[bool]=True
# summary
names_data_fields = ['indptr', 'indices', 'data', 'node', 'edge_index', 'target', 'hop', 'ppr']
def __post_init__(self, cap_node_full, cap_edge_full, cap_node_subg, cap_edge_subg, validate):
"""
All subgraphs sampled by the same sampler should have the same dtype, since cap_*_subg are an upper bound
for all subgraphs under that sampler.
"""
if cap_node_full is not None and cap_edge_full is not None \
and cap_node_subg is not None and cap_edge_subg is not None:
dtype = {'indptr' : np.int64,
'indices' : np.int64,
'data' : np.float32,
'node' : np.int64,
'edge_index': np.int64,
'target' : np.int64,
'hop' : np.int64,
'ppr' : np.float32}
f_dtype = lambda n : np.uint16 if n < 2**16 else np.uint32
if cap_node_full < 2**32:
dtype['node'] = f_dtype(cap_node_full)
if cap_edge_full < 2**32:
dtype['edge_index'] = f_dtype(cap_edge_full)
if cap_node_subg < 2**32:
dtype['indices'] = f_dtype(cap_node_subg)
dtype['target'] = f_dtype(cap_node_subg)
dtype['hop'] = f_dtype(cap_node_subg)
if cap_edge_subg < 2**32:
dtype['indptr'] = f_dtype(cap_edge_subg)
assert set(dtype.keys()) == set(self.names_data_fields)
for n in self.names_data_fields:
v = getattr(self, n)
if v is not None:
setattr(self, n, v.astype(dtype[n], copy=False))
# explicitly handle data -- if it is all 1.
if np.all(self.data == 1.):
self.data = np.broadcast_to(np.array([1.]), self.data.size)
if validate:
self.check_valid()
@classmethod
def cat_to_block_diagonal(cls, subgs : list):
""" Concatenate subgraphs into a full adj matrix (i.e., into the block diagonal form) """
offset_indices = np.cumsum([s.node.size for s in subgs]) # always int64
offset_indptr = np.cumsum([s.edge_index.size for s in subgs]) # ^
offset_indices[1:] = offset_indices[:-1]
offset_indices[0] = 0
offset_indptr[1:] = offset_indptr[:-1]
offset_indptr[0] = 0
node_batch = np.concatenate([s.node for s in subgs]) # keep original dtype
edge_index_batch = np.concatenate([s.edge_index for s in subgs]) # ^
data_batch = np.concatenate([s.data for s in subgs]) # ^
hop_batch = np.concatenate([s.hop for s in subgs]) # ^
if subgs[0].ppr.size == 0:
ppr_batch = np.array([])
else: # need to explicitly check due to .max() function
ppr_batch = np.concatenate([s.ppr/s.ppr.max() for s in subgs]) # renorm ppr
target_batch_itr = [s.target.astype(np.int64) for s in subgs]
indptr_batch_itr = [s.indptr.astype(np.int64) for s in subgs]
indices_batch_itr = [s.indices.astype(np.int64) for s in subgs]
target_batch, indptr_batch, indices_batch = [], [], []
for i in range(len(subgs)):
target_batch.append(target_batch_itr[i] + offset_indices[i])
if i > 0: # end of indptr1 equals beginning of indptr2. So remove one duplicate to ensure correctness.
indptr_batch_itr[i] = indptr_batch_itr[i][1:]
indptr_batch.append(indptr_batch_itr[i] + offset_indptr[i])
indices_batch.append(indices_batch_itr[i] + offset_indices[i])
target_batch = np.concatenate(target_batch)
indptr_batch = np.concatenate(indptr_batch)
indices_batch = np.concatenate(indices_batch)
ret_subg = cls(
indptr=indptr_batch,
indices=indices_batch,
data=data_batch,
node=node_batch,
edge_index=edge_index_batch,
target=target_batch,
hop=hop_batch,
ppr=ppr_batch,
cap_node_full=2**63, # just be safe. Note that concated subgraphs are only used for one batch.
cap_edge_full=2**63,
cap_node_subg=2**63,
cap_edge_subg=2**63,
validate=True
)
return ret_subg
class GraphSampler:
"""
This is the sampler super-class. Any shallow sampler is supposed to perform
the following meta-steps:
1. [optional] Preprocessing: e.g., for PPR sampler, we need to calculate the
PPR vector for each node in the training graph. This is to be performed
only once.
==> Need to override the `preproc()` in sub-class
2. Parallel sampling: launch a batch of graph samplers in parallel and sample
subgraphs independently. For efficiency, the actual sampling operation
happen in C++. And the classes here is mainly just a wrapper.
==> Need to set self.para_sampler to the appropriate C++ sampler
in `__init__()` of the sampler sub-class
3. Post-processing: upon getting the sampled subgraphs, we need to prepare the
appropriate information (e.g., subgraph adj with renamed indices) to
enable the PyTorch trainer. Also, we need to do data conversion from C++
to Python (or, mostly numpy). Post-processing is handled via PyBind11.
"""
def __init__(self, adj, node_target, aug_feat, args_preproc):
"""
Inputs:
adj scipy sparse CSR matrix of the training graph
node_target 1D np array storing the indices of the training nodes
args_preproc dict, addition arguments needed for pre-processing
Outputs:
None
"""
self.adj = adj
self.node_target = np.unique(node_target)
self.aug_feat = aug_feat
# size in terms of number of vertices in subgraph
self.name_sampler = "None"
self.node_subgraph = None
self.preproc(**args_preproc)
def helper_extract_subgraph(self, node_ids, target_ids=None):
"""
Used for serial Python sampler (not for the parallel C++ sampler).
Return adj of node-induced subgraph and other corresponding data struct.
Inputs:
node_ids 1D np array, each element is the ID in the original
training graph.
Outputs:
indptr np array, indptr of the subg adj CSR
indices np array, indices of the subg adj CSR
data np array, data of the subg adj CSR. Since we have aggregator
normalization, we can simply set all data values to be 1
subg_nodes np array, i-th element stores the node ID of the original graph
for the i-th node in the subgraph. Used to index the full feats
and label matrices.
subg_edge_index np array, i-th element stores the edge ID of the original graph
for the i-th edge in the subgraph. Used to index the full array
of aggregation normalization.
"""
# Let n = num subg nodes; m = num subg edges
node_ids = np.unique(node_ids)
node_ids.sort()
orig2subg = {n: i for i, n in enumerate(node_ids)}
n = node_ids.size
indptr = np.zeros(node_ids.size + 1)
indices = []
subg_edge_index = []
subg_nodes = node_ids
for nid in node_ids:
idx_s, idx_e = self.adj.indptr[nid], self.adj.indptr[nid + 1]
neighs = self.adj.indices[idx_s : idx_e]
for i_n, n in enumerate(neighs):
if n in orig2subg:
indices.append(orig2subg[n])
indptr[orig2subg[nid] + 1] += 1
subg_edge_index.append(idx_s + i_n)
indptr = indptr.cumsum().astype(np.int64)
indices = np.array(indices)
subg_edge_index = np.array(subg_edge_index)
data = np.ones(indices.size)
assert indptr[-1] == indices.size == subg_edge_index.size
if target_ids is not None:
return indptr, indices, data, subg_nodes, subg_edge_index,\
np.array([orig2subg[t] for t in target_ids])
else:
return indptr, indices, data, subg_nodes, subg_edge_index
class KHopSamplingBase(GraphSampler):
"""
The sampler performs k-hop sampling, by following the steps:
1. Randomly pick `size_root` number of root nodes from all training nodes;
2. Sample hop-`k` neighborhood from the roots. A node at hop-i will fanout to
at most `budget` nodes at hop-(i+1)
3. Generate node-induced subgraph from the nodes touched by the random walk.
If budget == -1, then we will expand all hop-(i+1) neighbors without any subsampling
"""
def __init__(self, adj, node_target, aug_feat, size_root, depth, budget):
"""
Inputs:
adj see super-class
node_target see super-class
size_root int, number of root nodes randomly picked
depth int, number of hops to expand
budget int, number of hop-(i+1) neighbors to expand
Outputs:
None
"""
self.size_root = size_root
self.depth = depth
self.budget = budget
self.name = "khop"
super().__init__(adj, node_target, aug_feat, {})
class PPRSamplingBase(GraphSampler):
"""
The sampler performs sampling based on PPR score
"""
def __init__(self, adj, node_target, aug_feat, size_root, k, alpha=0.85, epsilon=1e-5, threshold=0):
"""
Inputs:
adj see super-class
node_target see super-class
size_root int, number of root nodes randomly picked
k int, number of hops to expand
budget int, number of hop-(i+1) neighbors to expand
Outputs:
None
"""
self.size_root = size_root
self.k = k
self.alpha = alpha
self.epsilon = epsilon
self.threshold = threshold
self.name = "ppr"
super().__init__(adj, node_target, aug_feat, {})
| 42.739274 | 121 | 0.58417 | import numpy as np
import scipy.sparse
from typing import Union, List
from dataclasses import dataclass, field, fields, InitVar
import scipy.sparse as sp
@dataclass
class Subgraph:
"""
Represents the meta information of sampled subgraphs.
"""
# data fields
indptr : np.ndarray
indices : np.ndarray
data : np.ndarray
node : np.ndarray
edge_index : np.ndarray
target : np.ndarray
hop : np.ndarray
ppr : np.ndarray
# init fields
cap_node_full : InitVar[int]=None
cap_edge_full : InitVar[int]=None
cap_node_subg : InitVar[int]=None
cap_edge_subg : InitVar[int]=None
validate : InitVar[bool]=True
# summary
names_data_fields = ['indptr', 'indices', 'data', 'node', 'edge_index', 'target', 'hop', 'ppr']
def __post_init__(self, cap_node_full, cap_edge_full, cap_node_subg, cap_edge_subg, validate):
"""
All subgraphs sampled by the same sampler should have the same dtype, since cap_*_subg are an upper bound
for all subgraphs under that sampler.
"""
if cap_node_full is not None and cap_edge_full is not None \
and cap_node_subg is not None and cap_edge_subg is not None:
dtype = {'indptr' : np.int64,
'indices' : np.int64,
'data' : np.float32,
'node' : np.int64,
'edge_index': np.int64,
'target' : np.int64,
'hop' : np.int64,
'ppr' : np.float32}
f_dtype = lambda n : np.uint16 if n < 2**16 else np.uint32
if cap_node_full < 2**32:
dtype['node'] = f_dtype(cap_node_full)
if cap_edge_full < 2**32:
dtype['edge_index'] = f_dtype(cap_edge_full)
if cap_node_subg < 2**32:
dtype['indices'] = f_dtype(cap_node_subg)
dtype['target'] = f_dtype(cap_node_subg)
dtype['hop'] = f_dtype(cap_node_subg)
if cap_edge_subg < 2**32:
dtype['indptr'] = f_dtype(cap_edge_subg)
assert set(dtype.keys()) == set(self.names_data_fields)
for n in self.names_data_fields:
v = getattr(self, n)
if v is not None:
setattr(self, n, v.astype(dtype[n], copy=False))
# explicitly handle data -- if it is all 1.
if np.all(self.data == 1.):
self.data = np.broadcast_to(np.array([1.]), self.data.size)
if validate:
self.check_valid()
def _copy(self):
datacopy = {}
for n in self.names_data_fields:
datacopy[n] = getattr(self, n).copy()
return self.__class__(**datacopy)
def check_valid(self):
assert self.indices.size == self.edge_index.size == self.data.size == self.indptr[-1]
assert self.hop.size == 0 or (self.hop.size == self.indptr.size - 1)
assert self.ppr.size == 0 or (self.ppr.size == self.indptr.size - 1)
assert self.indptr.size >= 2, "Subgraph must contain at least 1 node!"
def num_nodes(self):
assert self.node.size == self.indptr.size - 1
return self.node.size
def num_edges(self):
assert self.indices.size == self.edge_index.size == self.data.size == self.indptr[-1]
return self.indices.size
@classmethod
def cat_to_block_diagonal(cls, subgs : list):
""" Concatenate subgraphs into a full adj matrix (i.e., into the block diagonal form) """
offset_indices = np.cumsum([s.node.size for s in subgs]) # always int64
offset_indptr = np.cumsum([s.edge_index.size for s in subgs]) # ^
offset_indices[1:] = offset_indices[:-1]
offset_indices[0] = 0
offset_indptr[1:] = offset_indptr[:-1]
offset_indptr[0] = 0
node_batch = np.concatenate([s.node for s in subgs]) # keep original dtype
edge_index_batch = np.concatenate([s.edge_index for s in subgs]) # ^
data_batch = np.concatenate([s.data for s in subgs]) # ^
hop_batch = np.concatenate([s.hop for s in subgs]) # ^
if subgs[0].ppr.size == 0:
ppr_batch = np.array([])
else: # need to explicitly check due to .max() function
ppr_batch = np.concatenate([s.ppr/s.ppr.max() for s in subgs]) # renorm ppr
target_batch_itr = [s.target.astype(np.int64) for s in subgs]
indptr_batch_itr = [s.indptr.astype(np.int64) for s in subgs]
indices_batch_itr = [s.indices.astype(np.int64) for s in subgs]
target_batch, indptr_batch, indices_batch = [], [], []
for i in range(len(subgs)):
target_batch.append(target_batch_itr[i] + offset_indices[i])
if i > 0: # end of indptr1 equals beginning of indptr2. So remove one duplicate to ensure correctness.
indptr_batch_itr[i] = indptr_batch_itr[i][1:]
indptr_batch.append(indptr_batch_itr[i] + offset_indptr[i])
indices_batch.append(indices_batch_itr[i] + offset_indices[i])
target_batch = np.concatenate(target_batch)
indptr_batch = np.concatenate(indptr_batch)
indices_batch = np.concatenate(indices_batch)
ret_subg = cls(
indptr=indptr_batch,
indices=indices_batch,
data=data_batch,
node=node_batch,
edge_index=edge_index_batch,
target=target_batch,
hop=hop_batch,
ppr=ppr_batch,
cap_node_full=2**63, # just be safe. Note that concated subgraphs are only used for one batch.
cap_edge_full=2**63,
cap_node_subg=2**63,
cap_edge_subg=2**63,
validate=True
)
return ret_subg
def to_csr_sp(self):
num_nodes = self.indptr.size - 1
adj = sp.csr_matrix((self.data, self.indices, self.indptr), shape=(num_nodes, num_nodes))
if self.indices.dtype != np.int64:
adj.indices = adj.indices.astype(self.indices.dtype, copy=False)
adj.indptr = adj.indptr.astype(self.indptr.dtype, copy=False)
return adj
class GraphSampler:
"""
This is the sampler super-class. Any shallow sampler is supposed to perform
the following meta-steps:
1. [optional] Preprocessing: e.g., for PPR sampler, we need to calculate the
PPR vector for each node in the training graph. This is to be performed
only once.
==> Need to override the `preproc()` in sub-class
2. Parallel sampling: launch a batch of graph samplers in parallel and sample
subgraphs independently. For efficiency, the actual sampling operation
happen in C++. And the classes here is mainly just a wrapper.
==> Need to set self.para_sampler to the appropriate C++ sampler
in `__init__()` of the sampler sub-class
3. Post-processing: upon getting the sampled subgraphs, we need to prepare the
appropriate information (e.g., subgraph adj with renamed indices) to
enable the PyTorch trainer. Also, we need to do data conversion from C++
to Python (or, mostly numpy). Post-processing is handled via PyBind11.
"""
def __init__(self, adj, node_target, aug_feat, args_preproc):
"""
Inputs:
adj scipy sparse CSR matrix of the training graph
node_target 1D np array storing the indices of the training nodes
args_preproc dict, addition arguments needed for pre-processing
Outputs:
None
"""
self.adj = adj
self.node_target = np.unique(node_target)
self.aug_feat = aug_feat
# size in terms of number of vertices in subgraph
self.name_sampler = "None"
self.node_subgraph = None
self.preproc(**args_preproc)
def preproc(self, **kwargs):
raise NotImplementedError
def par_sample(self, **kwargs):
return self.para_sampler.par_sample()
def helper_extract_subgraph(self, node_ids, target_ids=None):
"""
Used for serial Python sampler (not for the parallel C++ sampler).
Return adj of node-induced subgraph and other corresponding data struct.
Inputs:
node_ids 1D np array, each element is the ID in the original
training graph.
Outputs:
indptr np array, indptr of the subg adj CSR
indices np array, indices of the subg adj CSR
data np array, data of the subg adj CSR. Since we have aggregator
normalization, we can simply set all data values to be 1
subg_nodes np array, i-th element stores the node ID of the original graph
for the i-th node in the subgraph. Used to index the full feats
and label matrices.
subg_edge_index np array, i-th element stores the edge ID of the original graph
for the i-th edge in the subgraph. Used to index the full array
of aggregation normalization.
"""
# Let n = num subg nodes; m = num subg edges
node_ids = np.unique(node_ids)
node_ids.sort()
orig2subg = {n: i for i, n in enumerate(node_ids)}
n = node_ids.size
indptr = np.zeros(node_ids.size + 1)
indices = []
subg_edge_index = []
subg_nodes = node_ids
for nid in node_ids:
idx_s, idx_e = self.adj.indptr[nid], self.adj.indptr[nid + 1]
neighs = self.adj.indices[idx_s : idx_e]
for i_n, n in enumerate(neighs):
if n in orig2subg:
indices.append(orig2subg[n])
indptr[orig2subg[nid] + 1] += 1
subg_edge_index.append(idx_s + i_n)
indptr = indptr.cumsum().astype(np.int64)
indices = np.array(indices)
subg_edge_index = np.array(subg_edge_index)
data = np.ones(indices.size)
assert indptr[-1] == indices.size == subg_edge_index.size
if target_ids is not None:
return indptr, indices, data, subg_nodes, subg_edge_index,\
np.array([orig2subg[t] for t in target_ids])
else:
return indptr, indices, data, subg_nodes, subg_edge_index
class NodeIIDBase(GraphSampler):
def __init__(self, adj, node_target, aug_feat):
self.name = 'nodeIID'
super().__init__(adj, node_target, aug_feat, {})
def preproc(self, **kwargs):
pass
class KHopSamplingBase(GraphSampler):
"""
The sampler performs k-hop sampling, by following the steps:
1. Randomly pick `size_root` number of root nodes from all training nodes;
2. Sample hop-`k` neighborhood from the roots. A node at hop-i will fanout to
at most `budget` nodes at hop-(i+1)
3. Generate node-induced subgraph from the nodes touched by the random walk.
If budget == -1, then we will expand all hop-(i+1) neighbors without any subsampling
"""
def __init__(self, adj, node_target, aug_feat, size_root, depth, budget):
"""
Inputs:
adj see super-class
node_target see super-class
size_root int, number of root nodes randomly picked
depth int, number of hops to expand
budget int, number of hop-(i+1) neighbors to expand
Outputs:
None
"""
self.size_root = size_root
self.depth = depth
self.budget = budget
self.name = "khop"
super().__init__(adj, node_target, aug_feat, {})
def preproc(self, **kwargs):
pass
class PPRSamplingBase(GraphSampler):
"""
The sampler performs sampling based on PPR score
"""
def __init__(self, adj, node_target, aug_feat, size_root, k, alpha=0.85, epsilon=1e-5, threshold=0):
"""
Inputs:
adj see super-class
node_target see super-class
size_root int, number of root nodes randomly picked
k int, number of hops to expand
budget int, number of hop-(i+1) neighbors to expand
Outputs:
None
"""
self.size_root = size_root
self.k = k
self.alpha = alpha
self.epsilon = epsilon
self.threshold = threshold
self.name = "ppr"
super().__init__(adj, node_target, aug_feat, {})
def preproc(self, **kwargs):
raise NotImplementedError
| 1,329 | 11 | 339 |
1f34da9829da433908eee4db9139797f08e10d81 | 202 | py | Python | source/settings.py | ElPapi42/hexagonal-microservice | 675f6588c9b150712eb5f4c290c7a3f81b273573 | [
"MIT"
] | null | null | null | source/settings.py | ElPapi42/hexagonal-microservice | 675f6588c9b150712eb5f4c290c7a3f81b273573 | [
"MIT"
] | null | null | null | source/settings.py | ElPapi42/hexagonal-microservice | 675f6588c9b150712eb5f4c290c7a3f81b273573 | [
"MIT"
] | null | null | null | import os
import pathlib
from dotenv import load_dotenv
# Load .env vars
load_dotenv(pathlib.Path('.').parent/'.env')
MONGO_URL = os.getenv('MONGO_URL')
MONGO_DATABASE = os.getenv('MONGO_DATABASE')
| 16.833333 | 44 | 0.752475 | import os
import pathlib
from dotenv import load_dotenv
# Load .env vars
load_dotenv(pathlib.Path('.').parent/'.env')
MONGO_URL = os.getenv('MONGO_URL')
MONGO_DATABASE = os.getenv('MONGO_DATABASE')
| 0 | 0 | 0 |
6a4dde058d4d3b742019103c2eca8efcabeb3393 | 541 | py | Python | Day 1 - AOC2020.py | rekbot2/Advent-of-Code-2020 | 9ebaec23441a6498b8f1153d39d86bfaddeecaf7 | [
"MIT"
] | null | null | null | Day 1 - AOC2020.py | rekbot2/Advent-of-Code-2020 | 9ebaec23441a6498b8f1153d39d86bfaddeecaf7 | [
"MIT"
] | null | null | null | Day 1 - AOC2020.py | rekbot2/Advent-of-Code-2020 | 9ebaec23441a6498b8f1153d39d86bfaddeecaf7 | [
"MIT"
] | null | null | null | #Read data
inputList = []
with open('inputs\input1.txt') as f:
for line in f.readlines():
inputList.append(int(line.strip()))
#Define functions
import itertools
import numpy as np
#Solution 1
print(solveProblem(inputList,2))
#Solution 2
print(solveProblem(inputList,3))
| 18.655172 | 57 | 0.656192 | #Read data
inputList = []
with open('inputs\input1.txt') as f:
for line in f.readlines():
inputList.append(int(line.strip()))
#Define functions
import itertools
import numpy as np
def solveProblem(inputList,n):
allCombos = list(itertools.combinations(inputList,n))
for i in allCombos:
combination = list(i)
if sum(combination) == 2020:
out = np.prod(combination)
return out
#Solution 1
print(solveProblem(inputList,2))
#Solution 2
print(solveProblem(inputList,3))
| 230 | 0 | 23 |
50284fd4432dd385e1cddf4f7a3d04bb9b82256d | 4,622 | py | Python | tests/python/proton_tests/reactor_interop.py | mqlight/qpid-proton | e13a089c15ebe674a8f3f02e9f2b3033595b015a | [
"Apache-2.0"
] | null | null | null | tests/python/proton_tests/reactor_interop.py | mqlight/qpid-proton | e13a089c15ebe674a8f3f02e9f2b3033595b015a | [
"Apache-2.0"
] | null | null | null | tests/python/proton_tests/reactor_interop.py | mqlight/qpid-proton | e13a089c15ebe674a8f3f02e9f2b3033595b015a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from .common import Test, free_tcp_port, Skipped
from proton import Message
from proton.handlers import CHandshaker, CFlowController
from proton.reactor import Reactor
import os
import subprocess
from threading import Thread
import time
| 28.012121 | 70 | 0.692774 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from .common import Test, free_tcp_port, Skipped
from proton import Message
from proton.handlers import CHandshaker, CFlowController
from proton.reactor import Reactor
import os
import subprocess
from threading import Thread
import time
class JavaThread(Thread):
def __init__(self, operation, port, count):
Thread.__init__(self)
self.operation = operation
self.port = str(port)
self.count = str(count)
self.result = 1
def run(self):
self.result = subprocess.call(['java',
'org.apache.qpid.proton.ProtonJInterop',
self.operation, self.port, self.count])
class ReceiveHandler:
def __init__(self, count):
self.count = count
self.handlers = [CHandshaker(), CFlowController()]
self.messages = []
def on_reactor_init(self, event):
port = free_tcp_port()
self.acceptor = event.reactor.acceptor("127.0.0.1", port)
self.java_thread = JavaThread("send", port, self.count)
self.java_thread.start()
def on_delivery(self, event):
rcv = event.receiver
msg = Message()
if rcv and msg.recv(rcv):
event.delivery.settle()
self.messages += [msg.body]
self.count -= 1
if (self.count == 0):
self.acceptor.close()
class SendHandler:
def __init__(self, host, num_msgs):
self.host = host
self.num_msgs = num_msgs
self.count = 0
self.handlers = [CHandshaker()]
def on_connection_init(self, event):
conn = event.connection
conn.hostname = self.host
ssn = conn.session()
snd = ssn.sender("sender")
conn.open()
ssn.open()
snd.open()
def on_link_flow(self, event):
snd = event.sender
if snd.credit > 0 and self.count < self.num_msgs:
self.count += 1
msg = Message("message-" + str(self.count))
dlv = snd.send(msg)
dlv.settle()
if (self.count == self.num_msgs):
snd.close()
snd.session.close()
snd.connection.close()
def on_reactor_init(self, event):
event.reactor.connection(self)
class ReactorInteropTest(Test):
def setup(self):
classpath = ""
if ('CLASSPATH' in os.environ):
classpath = os.environ['CLASSPATH']
entries = classpath.split(os.pathsep)
self.proton_j_available = False
for entry in entries:
self.proton_j_available |= entry != "" and os.path.exists(entry)
def protonc_to_protonj(self, count):
if (not self.proton_j_available):
raise Skipped("ProtonJ not found")
port = free_tcp_port()
java_thread = JavaThread("recv", port, count)
java_thread.start()
# Give the Java thread time to spin up a JVM and start listening
# XXX: would be better to parse the stdout output for a message
time.sleep(1)
sh = SendHandler('127.0.0.1:' + str(port), count)
r = Reactor(sh)
r.run()
java_thread.join()
assert(java_thread.result == 0)
def protonj_to_protonc(self, count):
if (not self.proton_j_available):
raise Skipped("ProtonJ not found")
rh = ReceiveHandler(count)
r = Reactor(rh)
r.run()
rh.java_thread.join()
assert(rh.java_thread.result == 0)
for i in range(1, count):
assert(rh.messages[i-1] == ("message-" + str(i)))
def test_protonc_to_protonj_1(self):
self.protonc_to_protonj(1)
def test_protonc_to_protonj_5(self):
self.protonc_to_protonj(5)
def test_protonc_to_protonj_500(self):
self.protonc_to_protonj(500)
def test_protonc_to_protonj_5000(self):
self.protonc_to_protonj(5000)
def test_protonj_to_protonc_1(self):
self.protonj_to_protonc(1)
def test_protonj_to_protonc_5(self):
self.protonj_to_protonc(5)
def test_protonj_to_protonc_500(self):
self.protonj_to_protonc(500)
def test_protonj_to_protonc_5000(self):
self.protonj_to_protonc(5000)
| 2,936 | 11 | 589 |
83c9189faa0c5be14fe68d283ed7c94e5ee6234b | 1,316 | py | Python | Cloud/MQLibMaster.py | josilber2/MQLib | 6e3f1662988c33d1d2efa6e0d7bd1959f0467337 | [
"MIT"
] | 44 | 2015-07-26T04:33:50.000Z | 2021-12-11T13:02:36.000Z | Cloud/MQLibMaster.py | josilber2/MQLib | 6e3f1662988c33d1d2efa6e0d7bd1959f0467337 | [
"MIT"
] | 5 | 2015-07-26T16:52:42.000Z | 2022-03-18T23:30:02.000Z | Cloud/MQLibMaster.py | josilber2/MQLib | 6e3f1662988c33d1d2efa6e0d7bd1959f0467337 | [
"MIT"
] | 32 | 2016-01-11T12:29:10.000Z | 2021-12-29T07:09:48.000Z | import subprocess
import sys
# Validate command-line arguments
if len(sys.argv) < 2 or (not (sys.argv[1] == "METRICS" and len(sys.argv) == 3) and not (sys.argv[1] == "FULL" and len(sys.argv) == 7 and sys.argv[3].isdigit() and all([x.isdigit() for x in sys.argv[4].split("_")]) and sys.argv[5].lstrip("-").isdigit() and sys.argv[6].lstrip("-").isdigit())):
print("Usage:\n python MQLibMaster.py METRICS tag\n [[or]]\n python MQLibMaster.py FULL tag #ITERFORBASELINE SEEDS_SEPARATED_BY_UNDERSCORES MINSECONDS MAXSECONDS")
exit(1)
# Run until it tells us that we're done
while True:
if sys.argv[1] == "METRICS":
p = subprocess.Popen(["python", "MQLibRunner.py", sys.argv[1], sys.argv[2]],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
p = subprocess.Popen(["python", "MQLibRunner.py", sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sys.argv[5], sys.argv[6]],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout:
sys.stdout.write(line)
p.wait()
# MQLibRunner.py will terminate this EC2 node if it completes successfully,
# so if we're still running then it must have failed. We'll just kick
# it off again at the top of the loop.
| 52.64 | 292 | 0.629939 | import subprocess
import sys
# Validate command-line arguments
if len(sys.argv) < 2 or (not (sys.argv[1] == "METRICS" and len(sys.argv) == 3) and not (sys.argv[1] == "FULL" and len(sys.argv) == 7 and sys.argv[3].isdigit() and all([x.isdigit() for x in sys.argv[4].split("_")]) and sys.argv[5].lstrip("-").isdigit() and sys.argv[6].lstrip("-").isdigit())):
print("Usage:\n python MQLibMaster.py METRICS tag\n [[or]]\n python MQLibMaster.py FULL tag #ITERFORBASELINE SEEDS_SEPARATED_BY_UNDERSCORES MINSECONDS MAXSECONDS")
exit(1)
# Run until it tells us that we're done
while True:
if sys.argv[1] == "METRICS":
p = subprocess.Popen(["python", "MQLibRunner.py", sys.argv[1], sys.argv[2]],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
p = subprocess.Popen(["python", "MQLibRunner.py", sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], sys.argv[5], sys.argv[6]],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout:
sys.stdout.write(line)
p.wait()
# MQLibRunner.py will terminate this EC2 node if it completes successfully,
# so if we're still running then it must have failed. We'll just kick
# it off again at the top of the loop.
| 0 | 0 | 0 |
1d4a93a11d384e09680145874bd376743f51deda | 1,695 | py | Python | arena_navigation/arena_local_planner/evaluation/scripts/proto_cluster.py | ignc-research/arena-marl | 3b9b2521436ef7f364a250da71a01e915d840296 | [
"MIT"
] | 7 | 2021-11-11T13:25:25.000Z | 2021-12-25T21:34:41.000Z | arena_navigation/arena_local_planner/evaluation/scripts/proto_cluster.py | ignc-research/arena-marl | 3b9b2521436ef7f364a250da71a01e915d840296 | [
"MIT"
] | 1 | 2021-11-20T20:34:14.000Z | 2021-11-20T20:34:14.000Z | arena_navigation/arena_local_planner/evaluation/scripts/proto_cluster.py | ignc-research/arena-marl | 3b9b2521436ef7f364a250da71a01e915d840296 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# X = np.array([[5,3],
# [10,15],
# [15,12],
# [24,10],
# [30,30],
# [85,70],
# [71,80],
# [60,78],
# [70,55],
# [80,91],])
# cluster = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='ward')
# cluster.fit_predict(X)
# print(cluster.labels_)
# plt.scatter(X[:,0],X[:,1], c=cluster.labels_, cmap='rainbow')
# plt.show()
# %%
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# %%
dates = ['2016-1-1', '2016-1-2', '2016-1-3']
cols = pd.MultiIndex.from_product([dates, ['High', 'Low']])
cols
# pd.DataFrame(data=cols)
# %%
bags = {}
pose_x = np.asarray([1,2,3,4]).T
pose_y = np.asarray([2,2,3,4]).T
t = np.asarray([3,2,3,4]).T
col_xy = np.asarray([4,2,3,4]).T
subgoal_x = np.asarray([5,2,3,4]).T
subgoal_y = np.asarray([6,2,3,4]).T
wpg_x = np.asarray([7,2,3,4]).T
wpg_y = np.asarray([8,2,3,4]).T
bags["run_1"] = [pose_x, pose_y, t, col_xy, subgoal_x, subgoal_y, wpg_x, wpg_y]
bags["run_2"] = [pose_x, pose_y, t, col_xy, subgoal_x, subgoal_y, wpg_x, wpg_y]
# %%
df = pd.DataFrame(data=bags)
df2 = df.to_dict()
df.to_csv("test.csv",index=False)
# %%
df
# %%
df2
# %%
runs = pd.read_excel('runs_ex.xlsx',engine='openpyxl')
type(runs)
runs.to_excel("output.xlsx")
# %%
df2["run_2"]
# %%
bags["run_2"]
# %%
import json
data = {}
data['run'] = []
data['time'] = []
data['path'] = []
data['velocity'] = []
data['collision'] = []
data['run'].append(0)
data['time'].append(1)
data['path'].append(2)
data['velocity'].append(3)
data['collision'].append(4)
with open('data.json', 'w') as outfile:
json.dump(data, outfile)
# %%
| 20.178571 | 87 | 0.60059 | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# X = np.array([[5,3],
# [10,15],
# [15,12],
# [24,10],
# [30,30],
# [85,70],
# [71,80],
# [60,78],
# [70,55],
# [80,91],])
# cluster = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='ward')
# cluster.fit_predict(X)
# print(cluster.labels_)
# plt.scatter(X[:,0],X[:,1], c=cluster.labels_, cmap='rainbow')
# plt.show()
# %%
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# %%
dates = ['2016-1-1', '2016-1-2', '2016-1-3']
cols = pd.MultiIndex.from_product([dates, ['High', 'Low']])
cols
# pd.DataFrame(data=cols)
# %%
bags = {}
pose_x = np.asarray([1,2,3,4]).T
pose_y = np.asarray([2,2,3,4]).T
t = np.asarray([3,2,3,4]).T
col_xy = np.asarray([4,2,3,4]).T
subgoal_x = np.asarray([5,2,3,4]).T
subgoal_y = np.asarray([6,2,3,4]).T
wpg_x = np.asarray([7,2,3,4]).T
wpg_y = np.asarray([8,2,3,4]).T
bags["run_1"] = [pose_x, pose_y, t, col_xy, subgoal_x, subgoal_y, wpg_x, wpg_y]
bags["run_2"] = [pose_x, pose_y, t, col_xy, subgoal_x, subgoal_y, wpg_x, wpg_y]
# %%
df = pd.DataFrame(data=bags)
df2 = df.to_dict()
df.to_csv("test.csv",index=False)
# %%
df
# %%
df2
# %%
runs = pd.read_excel('runs_ex.xlsx',engine='openpyxl')
type(runs)
runs.to_excel("output.xlsx")
# %%
df2["run_2"]
# %%
bags["run_2"]
# %%
import json
data = {}
data['run'] = []
data['time'] = []
data['path'] = []
data['velocity'] = []
data['collision'] = []
data['run'].append(0)
data['time'].append(1)
data['path'].append(2)
data['velocity'].append(3)
data['collision'].append(4)
with open('data.json', 'w') as outfile:
json.dump(data, outfile)
# %%
| 0 | 0 | 0 |
d553b14f039dfc7cfda2b23b209262ef6e222a6d | 7,760 | py | Python | ramsey.py | Stephane-Poirier/Ramsey | 8fa4901080d7371ed2070bd51ddf73ef01216c86 | [
"MIT"
] | null | null | null | ramsey.py | Stephane-Poirier/Ramsey | 8fa4901080d7371ed2070bd51ddf73ef01216c86 | [
"MIT"
] | null | null | null | ramsey.py | Stephane-Poirier/Ramsey | 8fa4901080d7371ed2070bd51ddf73ef01216c86 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'Stéphane-Poirier'
import math
from diff_graph import DiffGraph
from ferrer import FerrerIterator, ferrer_size
import time
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='ramsey : evaluate expected value of Kr presence for a range of sizes')
parser.add_argument("-r", "--Kr", type=int, default=5, help="size of Kr to avoid")
parser.add_argument("-n", "--size_max", type=int, default=51, help="size max of Kn to measure")
parser.add_argument("-m", "--method", type=str, default="triangles", help="method used to evaluate expected value")
options = parser.parse_args()
if options.method == "test":
test()
elif options.method.lower() == "erdos":
print("Erdös method to evaluate expected value of K{}".format(options.Kr))
evaluate_erdos(options.Kr, options.size_max)
elif options.method.lower() == "triangles":
print("Triangles method to evaluate expected value of K{}".format(options.Kr))
evaluate_triangles(options.Kr, options.size_max)
elif options.method.lower() == "stars":
print("Stars method to evaluate expected value of K{}".format(options.Kr))
evaluate_stars(options.Kr, options.size_max)
else:
print("Method {} is not yet implemented".format(options.method))
# n_graph = graph.Graph.from_diffs(({1, 4}, {2, 3, 5}))
# n_graph.set_edge(2, 3, 1)
# print("{}".format(n_graph))
# n_cliques = n_graph.count_cliques()
# print("nb cliques {}".format(n_cliques))
# d_graph = diff_graph.DiffGraph(({1, 4}, {2, 3, 5}))
# print("{}".format(d_graph))
# for lst in FerrerIterator(4, 7, 10):
# cur_size = ferrer_size(lst)
# print("list {} : size {}".format(lst, cur_size))
# n = 17
# qs0 = quadratic_set(n)
# qs1 = set(range(1, n)) - qs0
# d_graph = DiffGraph((qs0, qs1))
# n_cliques = d_graph.count_cliques(isomorphic=True)
# expected_cliques(n, n_cliques, 2, 5, isomorphic=True)
Gp = []
expectations_dict = {}
for n in range(5, 150, 4):
if is_prime(n):
print(n)
start = time.process_time()
qs0 = quadratic_set(n)
qs1 = set(range(1, n)) - qs0
d_graph = DiffGraph((qs0, qs1))
n_cliques = d_graph.count_cliques(isomorphic=True)
print("nb cliques {}".format(n_cliques))
max_cliques = len([x for x in n_cliques[0] if x > 0])-1
Gp.append((n, max_cliques))
min_r = max_cliques+1
for nb_copies in range(2, 3*n+1):
for r in range(min_r, (max_cliques*nb_copies)):
exp = expected_cliques(n, n_cliques, nb_copies, r, isomorphic=True)
if r not in expectations_dict \
or n*nb_copies - math.floor(exp) > expectations_dict[r][1]*expectations_dict[r][2] - math.floor(expectations_dict[r][0]):
expectations_dict[r] = (exp, n, nb_copies)
if exp < 1.0:
break
if exp > n * nb_copies:
min_r = r + 1
print("time {}".format(time.process_time() - start))
print(Gp)
print(expectations_dict)
| 34.035088 | 149 | 0.554253 | # -*- coding: utf-8 -*-
__author__ = 'Stéphane-Poirier'
import math
from diff_graph import DiffGraph
from ferrer import FerrerIterator, ferrer_size
import time
def comb(n, k):
c = 1
for i in range(1, k+1):
c = int(c * (n+1-i) / i)
return c
def multinomial(lst):
res, i = 1, sum(lst)
i0 = lst.index(max(lst))
for a in lst[:i0] + lst[i0+1:]:
for j in range(1,a+1):
res *= i
res //= j
i -= 1
return res
def evaluate_triangles(r, size_max):
t = (r-1) // 3
while 3*t < size_max:
t += 1
n = 3*t
if n <= r:
continue
exp_one_color = 0
for j in range(r // 2 + 1):
exp_one_color += comb(t, j) * comb(t-j, r - 2*j) * (3**(r-2*j)) * (2**(j-r*(r-1)/2))
exp = 2* exp_one_color
print (" size {} : expected value {}".format(n, exp))
def evaluate_stars(r, size_max):
s = (r-1) // 5
while 5*s < size_max:
s += 1
n = 5*s
if n <= r:
continue
exp_one_color = 0
for j in range(r // 2 + 1):
exp_one_color += comb(s, j) * (5**j) * comb(s-j, r - 2*j) * (5**(r-2*j)) * (2**(j-r*(r-1)/2))
exp = 2* exp_one_color
print (" size {} : expected value {}".format(n, exp))
def evaluate_erdos(r, size_max):
m = r
while m < size_max:
m += 1
nb = comb(m, r)
exp = nb * (2 ** (1 - r * (r - 1) / 2))
print(" size {} : expected value {}".format(m, exp))
def is_prime(n):
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
sqn = int(math.sqrt(n)+1)
for p in range(6, sqn+1, 6):
if n % (p-1) == 0 or n % (p+1) == 0:
return False
return True
def quadratic_set(n):
qs = set()
for i in range(1, n):
qs.add((i*i) % n)
return qs
def expected_cliques(orig_graph_size, nb_cliques, nb_copies, size_cliques_expected, isomorphic=True):
if not isomorphic:
print("Case isomorphic False is not yet implemented")
return
nb_colors = len(nb_cliques)
max_cliques_orig = len([x for x in nb_cliques[0] if x > 0])-1
nb_edges_all = (size_cliques_expected * (size_cliques_expected - 1)) // 2
one_color_expectation = 0.0
for cliques_cfg in FerrerIterator(max_cliques_orig, nb_copies, size_cliques_expected):
nb_edges_cfg = sum(c * (k * (k - 1)) // 2 for (k, c) in enumerate(cliques_cfg))
choices_cfg = multinomial([x for x in cliques_cfg if x > 0])
nb_cliques_for_one_choice = 1
for (nc, c) in zip(nb_cliques[0][:max_cliques_orig+1], cliques_cfg):
nb_cliques_for_one_choice *= nc ** c
# to limit under flow effects
while nb_edges_cfg < nb_edges_all and choices_cfg % 2 == 0:
nb_edges_cfg += 1
choices_cfg //= 2
while nb_edges_cfg < nb_edges_all and nb_cliques_for_one_choice % 2 == 0:
nb_edges_cfg += 1
nb_cliques_for_one_choice //= 2
one_color_expectation += (2 ** (nb_edges_cfg - nb_edges_all)) * choices_cfg * nb_cliques_for_one_choice
all_colors_expectation = nb_colors * one_color_expectation
print(" {} copies of G{} ({}) gives expectation {} for cliques of size {}".format(nb_copies, orig_graph_size,
nb_copies*orig_graph_size,
all_colors_expectation,
size_cliques_expected))
return all_colors_expectation
def expected_cliques_range(orig_graph_size, nb_cliques, max_cliques_avoided, isomorphic=True):
if not isomorphic:
print("Case isomorphic False is not yet implemented")
return
max_cliques_orig = len([x for x in nb_cliques[0] if x > 0]) - 1
for cur_cliques_expected in range(max_cliques_orig + 1, max_cliques_avoided + 1):
nb_copies = 1
while nb_copies < max_cliques_avoided:
nb_copies += 1
expected_cliques(orig_graph_size, nb_cliques, cur_cliques_expected, isomorphic)
return
def test():
n = 4
k = 1
t1 = comb(n, k)
print("C({},{}) = {}".format(n, k, t1))
n = 4
k = 3
t1 = comb(n, k)
print("C({},{}) = {}".format(n, k, t1))
n = 4
k = 4
t1 = comb(n, k)
print("C({},{}) = {}".format(n, k, t1))
n = 6
k = 2
t1 = comb(n, k)
print("C({},{}) = {}".format(n, k, t1))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='ramsey : evaluate expected value of Kr presence for a range of sizes')
parser.add_argument("-r", "--Kr", type=int, default=5, help="size of Kr to avoid")
parser.add_argument("-n", "--size_max", type=int, default=51, help="size max of Kn to measure")
parser.add_argument("-m", "--method", type=str, default="triangles", help="method used to evaluate expected value")
options = parser.parse_args()
if options.method == "test":
test()
elif options.method.lower() == "erdos":
print("Erdös method to evaluate expected value of K{}".format(options.Kr))
evaluate_erdos(options.Kr, options.size_max)
elif options.method.lower() == "triangles":
print("Triangles method to evaluate expected value of K{}".format(options.Kr))
evaluate_triangles(options.Kr, options.size_max)
elif options.method.lower() == "stars":
print("Stars method to evaluate expected value of K{}".format(options.Kr))
evaluate_stars(options.Kr, options.size_max)
else:
print("Method {} is not yet implemented".format(options.method))
# n_graph = graph.Graph.from_diffs(({1, 4}, {2, 3, 5}))
# n_graph.set_edge(2, 3, 1)
# print("{}".format(n_graph))
# n_cliques = n_graph.count_cliques()
# print("nb cliques {}".format(n_cliques))
# d_graph = diff_graph.DiffGraph(({1, 4}, {2, 3, 5}))
# print("{}".format(d_graph))
# for lst in FerrerIterator(4, 7, 10):
# cur_size = ferrer_size(lst)
# print("list {} : size {}".format(lst, cur_size))
# n = 17
# qs0 = quadratic_set(n)
# qs1 = set(range(1, n)) - qs0
# d_graph = DiffGraph((qs0, qs1))
# n_cliques = d_graph.count_cliques(isomorphic=True)
# expected_cliques(n, n_cliques, 2, 5, isomorphic=True)
Gp = []
expectations_dict = {}
for n in range(5, 150, 4):
if is_prime(n):
print(n)
start = time.process_time()
qs0 = quadratic_set(n)
qs1 = set(range(1, n)) - qs0
d_graph = DiffGraph((qs0, qs1))
n_cliques = d_graph.count_cliques(isomorphic=True)
print("nb cliques {}".format(n_cliques))
max_cliques = len([x for x in n_cliques[0] if x > 0])-1
Gp.append((n, max_cliques))
min_r = max_cliques+1
for nb_copies in range(2, 3*n+1):
for r in range(min_r, (max_cliques*nb_copies)):
exp = expected_cliques(n, n_cliques, nb_copies, r, isomorphic=True)
if r not in expectations_dict \
or n*nb_copies - math.floor(exp) > expectations_dict[r][1]*expectations_dict[r][2] - math.floor(expectations_dict[r][0]):
expectations_dict[r] = (exp, n, nb_copies)
if exp < 1.0:
break
if exp > n * nb_copies:
min_r = r + 1
print("time {}".format(time.process_time() - start))
print(Gp)
print(expectations_dict)
| 4,225 | 0 | 230 |
f94cea21a0db965d0167768f9473e380760ed90e | 2,783 | py | Python | recipes/hiredis/0.x.x/conanfile.py | cbeattie-tl/conan-center-index | 28518a3cd31df96b0501bdf33c0da02261973289 | [
"MIT"
] | 1 | 2021-11-11T03:07:13.000Z | 2021-11-11T03:07:13.000Z | recipes/hiredis/0.x.x/conanfile.py | cbeattie-tl/conan-center-index | 28518a3cd31df96b0501bdf33c0da02261973289 | [
"MIT"
] | 1 | 2022-03-09T06:33:41.000Z | 2022-03-09T06:33:41.000Z | recipes/hiredis/0.x.x/conanfile.py | cbeattie-tl/conan-center-index | 28518a3cd31df96b0501bdf33c0da02261973289 | [
"MIT"
] | null | null | null | from conans import AutoToolsBuildEnvironment, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.36.0"
| 35.227848 | 107 | 0.626303 | from conans import AutoToolsBuildEnvironment, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.36.0"
class HiredisConan(ConanFile):
name = "hiredis"
description = "Hiredis is a minimalistic C client library for the Redis database."
license = "BSD-3-Clause"
topics = ("hiredis", "redis", "client", "database")
homepage = "https://github.com/redis/hiredis"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _source_subfolder(self):
return "source_subfolder"
def export_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
def validate(self):
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("hiredis {} is not supported on Windows.".format(self.version))
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
# Do not force PIC if static
if not self.options.shared:
makefile = os.path.join(self._source_subfolder, "Makefile")
tools.replace_in_file(makefile, "-fPIC ", "")
def build(self):
self._patch_sources()
with tools.chdir(self._source_subfolder):
autoTools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autoTools.make()
def package(self):
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
with tools.chdir(self._source_subfolder):
autoTools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autoTools.install(vars={
"DESTDIR": tools.unix_path(self.package_folder),
"PREFIX": "",
})
tools.remove_files_by_mask(
os.path.join(self.package_folder, "lib"),
"*.a" if self.options.shared else "*.[so|dylib]*",
)
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "hiredis")
self.cpp_info.libs = ["hiredis"]
| 1,811 | 786 | 23 |
77793f3d0d46e5b024a12630e3c8c942f127f6f8 | 130 | py | Python | tests/main_test.py | deeso/simple-rules | c57ad9a6a6ff18e6c6020e4870a4e7095c28fa11 | [
"MIT"
] | null | null | null | tests/main_test.py | deeso/simple-rules | c57ad9a6a6ff18e6c6020e4870a4e7095c28fa11 | [
"MIT"
] | null | null | null | tests/main_test.py | deeso/simple-rules | c57ad9a6a6ff18e6c6020e4870a4e7095c28fa11 | [
"MIT"
] | null | null | null | import unittest
import os
import signal
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| 14.444444 | 40 | 0.7 | import unittest
import os
import signal
if __name__ == '__main__':
unittest.main()
os.kill(os.getpid(), signal.SIGKILL)
| 0 | 0 | 0 |
a2b6393150ff5d8352980bb1e7cedf448d7f741a | 5,376 | py | Python | tests/__main__.py | emissions-api/sentinel5dl | 67bf50bf8ddc48aa0177df534b2084a733d8b8c9 | [
"MIT"
] | 7 | 2019-10-08T10:49:39.000Z | 2021-06-08T05:27:17.000Z | tests/__main__.py | emissions-api/sentinel5dl | 67bf50bf8ddc48aa0177df534b2084a733d8b8c9 | [
"MIT"
] | 40 | 2019-10-05T23:08:48.000Z | 2021-10-02T18:49:33.000Z | tests/__main__.py | emissions-api/sentinel5dl | 67bf50bf8ddc48aa0177df534b2084a733d8b8c9 | [
"MIT"
] | 8 | 2019-10-06T00:36:48.000Z | 2021-06-08T05:27:20.000Z | import datetime
import os
import pycurl
import sentinel5dl
import sentinel5dl.__main__ as executable
import tempfile
import unittest
import logging
import sys
testpath = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
unittest.main()
| 35.368421 | 78 | 0.607887 | import datetime
import os
import pycurl
import sentinel5dl
import sentinel5dl.__main__ as executable
import tempfile
import unittest
import logging
import sys
testpath = os.path.dirname(os.path.abspath(__file__))
class TestSentinel5dl(unittest.TestCase):
def _mock_http_request(self, path, filename=None):
'''Mock HTTP requests to the ESA API
'''
# download
if filename is not None:
self._count_download += 1
with open(filename, 'wb') as f:
f.write(b'123')
return
# search request
if path.startswith('/api/stub/products?'):
self._count_search_request += 1
with open(os.path.join(testpath, 'products.json'), 'rb') as f:
return f.read()
# checksum request
if path.endswith('/Checksum/Value/$value'):
self._count_checksum_request += 1
# MD5 checksum for string `123`
return b'202CB962AC59075B964B07152D234B70'
def setUp(self):
'''Patch cURL based operation in sentinel5dl so that we do not really
make any HTTP requests and reset the request counters.
'''
if not getattr(sentinel5dl, '__original_http_request', None):
# save the original one
setattr(sentinel5dl, '__original_http_request',
getattr(sentinel5dl, '__http_request'))
setattr(sentinel5dl, '__http_request', self._mock_http_request)
self._count_search_request = 0
self._count_checksum_request = 0
self._count_download = 0
logging.getLogger(sentinel5dl.__name__).setLevel(logging.WARNING)
def test(self):
'''Test search and download.
'''
result = sentinel5dl.search(
polygon='POLYGON((7 49,13 49,13 52,7 52,7 49))',
begin_ts=datetime.datetime.fromtimestamp(0),
end_ts=datetime.datetime.now(),
product='L2__CO____')
# The result returned by the mock contains four products but claims a
# total of eight products, making sentinel5dl request resources twice.
self.assertEqual(self._count_search_request, 2)
self.assertEqual(result['totalresults'], 8)
self.assertEqual(result['totalresults'], len(result['products']))
products = result['products']
with tempfile.TemporaryDirectory() as tmpdir:
# prepare a file which is half-downloaded
file_one = os.path.join(tmpdir, products[0]['identifier'] + '.nc')
with open(file_one, 'wb') as f:
f.write(b'12')
sentinel5dl.download(products, tmpdir)
# test files
for product in products:
filename = os.path.join(tmpdir, product['identifier'] + '.nc')
with open(filename, 'rb') as f:
self.assertEqual(f.read(), b'123')
# We should have downloaded four files and have an additional four
# files storing md5 checksums
self.assertEqual(len(os.listdir(tmpdir)), 8)
# We should have four checksum requests. One for each file
self.assertEqual(self._count_checksum_request, 4)
# We should have downloaded four unique files
self.assertEqual(self._count_download, 4)
def testFailedRequest(self):
sentinel5dl.API = 'http://127.0.0.1:9'
request = getattr(sentinel5dl, '__original_http_request')
# nothing may use port 9. This should always fail
with self.assertRaises(pycurl.error):
request('/', retries=0)
class TestExecutable(unittest.TestCase):
def _mock_search(self, *args, **kwargs):
return {'products': []}
def _mock_download(self, products, _):
self.assertEqual(products, [])
def setUp(self):
# Mock library calls
setattr(executable, 'search', self._mock_search)
setattr(executable, 'download', self._mock_download)
logging.getLogger(sentinel5dl.__name__).setLevel(logging.WARNING)
# override sys.argv. Otherwise argparse is trying to parse it.
sys.argv = sys.argv[0:1] + ['.']
def testNoArguments(self):
'''Test the executable.
'''
executable.main()
def testPolygon(self):
'''Test with an invalid polygon.
'''
sys.argv = [sys.argv[0], '--polygon', '3 1, 4 4, 2 4, 1 2, 3 1', '.']
executable.main()
def testInvalidPolygons(self):
'''Tests with invalid polygons.
'''
invalid_polygons = (
'3 1 4 4, 2 4 1', # coordinates must be pairs
'3 1, 4 4, 2 4, 1 2', # polygon must be closed
'a s, d f, a s, d f', # coordinates must be numbers
)
for invalid_polygon in invalid_polygons:
sys.argv = [sys.argv[0], '--polygon', invalid_polygon, '.']
error_msg = 'Expecting SystemExit error when providing the '\
f'invalid polygon "{invalid_polygon}"'
with self.assertRaises(SystemExit, msg=error_msg) as e:
executable.main()
error_msg = 'Expecting return code != 0 error when providing '\
f'the invalid polygon "{invalid_polygon}"'
self.assertNotEqual(e.exception.code, 0, msg=error_msg)
if __name__ == '__main__':
unittest.main()
| 695 | 4,369 | 46 |
2467fd61220b97eed5bc2fdb0593892f8f2010da | 2,293 | py | Python | broti/modules/poll.py | pcworld/broti | 4f0d1e79cb7f51d1f71ce349426cb01b8ef2b1f1 | [
"BSD-2-Clause"
] | null | null | null | broti/modules/poll.py | pcworld/broti | 4f0d1e79cb7f51d1f71ce349426cb01b8ef2b1f1 | [
"BSD-2-Clause"
] | null | null | null | broti/modules/poll.py | pcworld/broti | 4f0d1e79cb7f51d1f71ce349426cb01b8ef2b1f1 | [
"BSD-2-Clause"
] | 1 | 2021-03-28T18:52:26.000Z | 2021-03-28T18:52:26.000Z | import time
poll_active = False
poll = {}
allowed_users = set()
voted = set()
| 28.308642 | 105 | 0.600087 | import time
poll_active = False
poll = {}
allowed_users = set()
voted = set()
def start_poll(bot, c, e, args):
global poll
global poll_active
global voted
global allowed_users
if len(args) < 2:
bot.reply(c, e, 'Please specify some options')
return
elif poll_active:
bot.reply(c, e, 'There already is a poll running.')
return
elif e.target not in bot.channels:
bot.reply(c, e, 'This command can only be used in channels.')
return
poll_active = True
allowed_users = set(bot.channels[e.target].users())
bot.logger.debug('Starting poll for %s with options %s' \
% (e.source, ' '.join(args)))
poll = dict([(option, 0) for option in args])
bot.reply(c, e,'Poll started. Choose one among %s with *vote. You have 2 minutes.' % ', '.join(args))
bot.hook_timeout(120, end_poll, c, e)
def do_poll(bot, c, e, args):
global poll
global poll_active
global voted
global allowed_users
if len(args) < 1:
return
username, _, _ = e.source.partition('!')
vote = args[0]
if not poll_active:
bot.reply(c, e, 'No poll active at the moment. You may start one ' \
'with *poll')
if username in voted:
bot.reply(c, e, 'You already voted. I am democratic, so each ' \
'user has only one vote.')
elif username not in allowed_users:
bot.reply(c, e, 'You have not been in the channel, when the ' \
'voting began. You are not allowed to vote.')
elif vote not in poll:
bot.reply(c, e, 'This option is not part of the poll.')
else:
voted.add(username)
poll[vote] += 1
bot.reply(c, e, 'Your vote has been accepted.')
def end_poll(bot, c, e):
global poll
global poll_active
bot.reply(c, e, 'Poll has ended. Here are the results:')
for option, count in poll.items():
bot.reply(c, e, '%s: %d' % (option, count))
poll_active = False
def load_module(bot):
bot.hook_command('poll', start_poll)
bot.hook_command('vote', do_poll)
return [hash(start_poll), hash(do_poll)]
def commands():
return [('poll', 'Start a poll', 'poll options[ ...]'),
('vote', 'Vote in an active poll', 'poll option')]
| 2,099 | 0 | 115 |
7eabf07fa7712af884b0cdc51dcd92e429c33234 | 9,369 | py | Python | deps/riak_pb/msgcodegen.py | pexip/os-riak | 9e64fb0412121776c971c8f04e8c96df9f2a31de | [
"Apache-2.0"
] | null | null | null | deps/riak_pb/msgcodegen.py | pexip/os-riak | 9e64fb0412121776c971c8f04e8c96df9f2a31de | [
"Apache-2.0"
] | null | null | null | deps/riak_pb/msgcodegen.py | pexip/os-riak | 9e64fb0412121776c971c8f04e8c96df9f2a31de | [
"Apache-2.0"
] | 11 | 2015-02-11T21:57:01.000Z | 2018-07-25T21:30:12.000Z | # Copyright 2014 Basho Technologies, Inc.
#
# This file is provided to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
distutils commands for generating protocol message-code mappings.
"""
__all__ = ['build_messages', 'clean_messages']
import re
import csv
import os
from os.path import isfile
from distutils import log
from distutils.core import Command
from distutils.file_util import write_file
from datetime import date
LICENSE = """# Copyright {0} Basho Technologies, Inc.
#
# This file is provided to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""".format(date.today().year)
class clean_messages(Command):
"""
Cleans generated message code mappings. Add to the build process
using::
setup(cmd_class={'clean_messages': clean_messages})
"""
description = "clean generated protocol message code mappings"
user_options = [
('destination=', None, 'destination Python source file')
]
class build_messages(Command):
"""
Generates message code mappings. Add to the build process using::
setup(cmd_class={'build_messages': build_messages})
"""
description = "generate protocol message code mappings"
user_options = [
('source=', None, 'source CSV file containing message code mappings'),
('destination=', None, 'destination Python source file')
]
# Used in loading and generating
_pb_imports = set()
_messages = set()
_linesep = os.linesep
_indented_item_sep = ',{0} '.format(_linesep)
_docstring = [
''
'# This is a generated file. DO NOT EDIT.',
'',
'"""',
'Constants and mappings between Riak protocol codes and messages.',
'"""',
''
]
def _format_python2_or_3(self):
"""
Change the PB files to use full pathnames for Python 3.x
and modify the metaclasses to be version agnostic
"""
pb_files = set()
with open(self.source, 'r', buffering=1) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
_, _, proto = row
pb_files.add('riak_pb/{0}_pb2.py'.format(proto))
for im in sorted(pb_files):
with open(im, 'r', buffering=1) as pbfile:
contents = 'from six import *\n' + pbfile.read()
contents = re.sub(r'riak_pb2',
r'riak_pb.riak_pb2',
contents)
# Look for this pattern in the protoc-generated file:
#
# class RpbCounterGetResp(_message.Message):
# __metaclass__ = _reflection.GeneratedProtocolMessageType
#
# and convert it to:
#
# @add_metaclass(_reflection.GeneratedProtocolMessageType)
# class RpbCounterGetResp(_message.Message):
contents = re.sub(
r'class\s+(\S+)\((\S+)\):\s*\n'
'\s+__metaclass__\s+=\s+(\S+)\s*\n',
r'@add_metaclass(\3)\nclass \1(\2):\n', contents)
with open(im, 'w', buffering=1) as pbfile:
pbfile.write(contents)
| 33.223404 | 79 | 0.593873 | # Copyright 2014 Basho Technologies, Inc.
#
# This file is provided to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
distutils commands for generating protocol message-code mappings.
"""
__all__ = ['build_messages', 'clean_messages']
import re
import csv
import os
from os.path import isfile
from distutils import log
from distutils.core import Command
from distutils.file_util import write_file
from datetime import date
LICENSE = """# Copyright {0} Basho Technologies, Inc.
#
# This file is provided to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""".format(date.today().year)
class ComparableMixin(object):
def _compare(self, other, method):
try:
return method(self._cmpkey(), other._cmpkey())
except (AttributeError, TypeError):
# _cmpkey not implemented, or return different type,
# so I can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
class MessageCodeMapping(ComparableMixin):
def __init__(self, code, message, proto):
self.code = int(code)
self.message = message
self.proto = proto
self.message_code_name = self._message_code_name()
self.module_name = 'riak_pb.{0}_pb2'.format(self.proto)
self.message_class = self._message_class()
def _cmpkey(self):
return self.code
def __hash__(self):
return self.code
def _message_code_name(self):
strip_rpb = re.sub(r"^Rpb", "", self.message)
word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', strip_rpb)
word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word)
word = word.replace("-", "_")
return "MSG_CODE_" + word.upper()
def _message_class(self):
try:
pbmod = __import__(self.module_name, globals(), locals(),
[self.message])
klass = pbmod.__dict__[self.message]
return klass
except KeyError:
log.debug("Did not find '{0}' message class in module '{1}'",
self.message, self.module_name)
except ImportError:
log.debug("Could not import module '{0}'", self.module_name)
return None
class clean_messages(Command):
"""
Cleans generated message code mappings. Add to the build process
using::
setup(cmd_class={'clean_messages': clean_messages})
"""
description = "clean generated protocol message code mappings"
user_options = [
('destination=', None, 'destination Python source file')
]
def initialize_options(self):
self.destination = None
def finalize_options(self):
self.set_undefined_options('build_messages',
('destination', 'destination'))
def run(self):
if isfile(self.destination):
self.execute(os.remove, [self.destination],
msg="removing {0}".format(self.destination))
class build_messages(Command):
"""
Generates message code mappings. Add to the build process using::
setup(cmd_class={'build_messages': build_messages})
"""
description = "generate protocol message code mappings"
user_options = [
('source=', None, 'source CSV file containing message code mappings'),
('destination=', None, 'destination Python source file')
]
# Used in loading and generating
_pb_imports = set()
_messages = set()
_linesep = os.linesep
_indented_item_sep = ',{0} '.format(_linesep)
_docstring = [
''
'# This is a generated file. DO NOT EDIT.',
'',
'"""',
'Constants and mappings between Riak protocol codes and messages.',
'"""',
''
]
def initialize_options(self):
self.source = None
self.destination = None
self.update_import = None
def finalize_options(self):
if self.source is None:
self.source = 'src/riak_pb_messages.csv'
if self.destination is None:
self.destination = 'riak_pb/messages.py'
def run(self):
self.make_file(self.source, self.destination,
self._load_and_generate, [])
def _load_and_generate(self):
self._format_python2_or_3()
self._load()
self._generate()
def _load(self):
with open(self.source, 'r', buffering=1) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
message = MessageCodeMapping(*row)
self._messages.add(message)
self._pb_imports.add(message.module_name)
def _generate(self):
self._contents = []
self._generate_doc()
self._generate_imports()
self._generate_codes()
self._generate_classes()
write_file(self.destination, self._contents)
def _generate_doc(self):
# Write the license and docstring header
self._contents.append(LICENSE)
self._contents.extend(self._docstring)
def _generate_imports(self):
# Write imports
for im in sorted(self._pb_imports):
self._contents.append("import {0}".format(im))
def _generate_codes(self):
# Write protocol code constants
self._contents.extend(['', "# Protocol codes"])
for message in sorted(self._messages):
self._contents.append("{0} = {1}".format(message.message_code_name,
message.code))
def _generate_classes(self):
# Write message classes
classes = [self._generate_mapping(message)
for message in sorted(self._messages)]
classes = self._indented_item_sep.join(classes)
self._contents.extend(['',
"# Mapping from code to protobuf class",
'MESSAGE_CLASSES = {',
' ' + classes,
'}'])
def _generate_mapping(self, m):
if m.message_class is not None:
klass = "{0}.{1}".format(m.module_name,
m.message_class.__name__)
else:
klass = "None"
pair = "{0}: {1}".format(m.message_code_name, klass)
if len(pair) > 76:
# Try to satisfy PEP8, lulz
pair = (self._linesep + ' ').join(pair.split(' '))
return pair
def _format_python2_or_3(self):
"""
Change the PB files to use full pathnames for Python 3.x
and modify the metaclasses to be version agnostic
"""
pb_files = set()
with open(self.source, 'r', buffering=1) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
_, _, proto = row
pb_files.add('riak_pb/{0}_pb2.py'.format(proto))
for im in sorted(pb_files):
with open(im, 'r', buffering=1) as pbfile:
contents = 'from six import *\n' + pbfile.read()
contents = re.sub(r'riak_pb2',
r'riak_pb.riak_pb2',
contents)
# Look for this pattern in the protoc-generated file:
#
# class RpbCounterGetResp(_message.Message):
# __metaclass__ = _reflection.GeneratedProtocolMessageType
#
# and convert it to:
#
# @add_metaclass(_reflection.GeneratedProtocolMessageType)
# class RpbCounterGetResp(_message.Message):
contents = re.sub(
r'class\s+(\S+)\((\S+)\):\s*\n'
'\s+__metaclass__\s+=\s+(\S+)\s*\n',
r'@add_metaclass(\3)\nclass \1(\2):\n', contents)
with open(im, 'w', buffering=1) as pbfile:
pbfile.write(contents)
| 4,431 | 30 | 746 |
10a2edaf6f66e70ee3dc6a7b663843fbdaa7a8ab | 1,963 | py | Python | isochrones/tests/test_likelihood.py | Sam-2727/isochrones | 11f49c6c693e91bf275bb6a20af41b5f42e233da | [
"MIT"
] | 100 | 2015-03-12T12:51:03.000Z | 2022-01-07T23:16:01.000Z | isochrones/tests/test_likelihood.py | Sam-2727/isochrones | 11f49c6c693e91bf275bb6a20af41b5f42e233da | [
"MIT"
] | 154 | 2015-02-26T20:47:57.000Z | 2022-03-29T09:51:50.000Z | isochrones/tests/test_likelihood.py | Sam-2727/isochrones | 11f49c6c693e91bf275bb6a20af41b5f42e233da | [
"MIT"
] | 62 | 2015-02-03T17:58:43.000Z | 2021-12-04T22:31:20.000Z | from isochrones.starmodel import StarModel, BasicStarModel
from isochrones import get_ichrone
import numpy as np
mist = get_ichrone("mist")
props = dict(Teff=(5800, 100), logg=(4.5, 0.1), J=(3.58, 0.05), K=(3.22, 0.05), parallax=(100, 0.1))
props_phot = dict(J=(3.58, 0.05), K=(3.22, 0.05), parallax=(100, 0.1))
props_spec = dict(Teff=(5800, 100), logg=(4.5, 0.1), parallax=(100, 0.1))
| 33.844828 | 100 | 0.650535 | from isochrones.starmodel import StarModel, BasicStarModel
from isochrones import get_ichrone
import numpy as np
mist = get_ichrone("mist")
props = dict(Teff=(5800, 100), logg=(4.5, 0.1), J=(3.58, 0.05), K=(3.22, 0.05), parallax=(100, 0.1))
props_phot = dict(J=(3.58, 0.05), K=(3.22, 0.05), parallax=(100, 0.1))
props_spec = dict(Teff=(5800, 100), logg=(4.5, 0.1), parallax=(100, 0.1))
def test_compare_starmodels(props=props):
m1 = StarModel(mist, **props)
m2 = BasicStarModel(mist, **props)
# Ensure priors are identical
for k in ["mass", "feh", "age", "distance", "AV", "eep"]:
m2.set_prior(**{k: m1._priors[k]})
pars = [300, 9.8, 0.01, 100, 0.1]
assert np.isclose(m1.lnlike(pars), m2.lnlike(pars))
assert np.isclose(m1.lnprior(pars), m2.lnprior(pars))
assert np.isclose(m1.lnpost(pars), m2.lnpost(pars))
m1_bin = StarModel(mist, **props, N=2)
m2_bin = BasicStarModel(mist, **props, N=2)
# Ensure priors are identical
for k in ["mass", "feh", "age", "distance", "AV", "eep"]:
m2_bin.set_prior(**{k: m1_bin._priors[k]})
pars = [300, 280, 9.8, 0.01, 100, 0.1]
assert np.isclose(m1_bin.lnlike(pars), m2_bin.lnlike(pars))
assert np.isclose(m1_bin.lnprior(pars), m2_bin.lnprior(pars))
assert np.isclose(m1_bin.lnpost(pars), m2_bin.lnpost(pars))
m1_trip = StarModel(mist, **props, N=3)
m2_trip = BasicStarModel(mist, **props, N=3)
# Ensure priors are identical
for k in ["mass", "feh", "age", "distance", "AV", "eep"]:
m2_trip.set_prior(**{k: m1_trip._priors[k]})
pars = [300, 280, 260.0, 9.8, 0.01, 100, 0.1]
assert np.isclose(m1_trip.lnlike(pars), m2_trip.lnlike(pars))
assert np.isclose(m1_trip.lnprior(pars), m2_trip.lnprior(pars))
assert np.isclose(m1_trip.lnpost(pars), m2_trip.lnpost(pars))
def test_compare_spec():
test_compare_starmodels(props_spec)
def test_compare_phot():
test_compare_starmodels(props_phot)
| 1,501 | 0 | 69 |
294f6146c0dec4a03ddb7a93e0b2ce1bbcaf8d01 | 2,127 | py | Python | video.py | brainbots/bot-video | d6932a762167c5d8026ee9faf874d3fabca692e6 | [
"Apache-2.0"
] | null | null | null | video.py | brainbots/bot-video | d6932a762167c5d8026ee9faf874d3fabca692e6 | [
"Apache-2.0"
] | null | null | null | video.py | brainbots/bot-video | d6932a762167c5d8026ee9faf874d3fabca692e6 | [
"Apache-2.0"
] | null | null | null | import os, fnmatch
from functools import partial
from PyQt5.QtCore import QProcess
from pykeyboard import PyKeyboard
from bots.abstract_bot import AbstractBot
from bots.action import Action
from bots.utility import waitForWindowByTitle
from local_settings import VIDEO_DIR
| 30.385714 | 136 | 0.666667 | import os, fnmatch
from functools import partial
from PyQt5.QtCore import QProcess
from pykeyboard import PyKeyboard
from bots.abstract_bot import AbstractBot
from bots.action import Action
from bots.utility import waitForWindowByTitle
from local_settings import VIDEO_DIR
class VideoBot(AbstractBot):
def __init__(self, id):
actions = ['video.play']
super().__init__(id, actions)
#REQUIRED
self.query = None
self.process = QProcess()
self.commands = ['⏯', '⏪', '⏩']
self.keyboard = None
def extract_attr(self, intent):
query = intent.parameters['video'].lower()
for root, dirs, files in os.walk(VIDEO_DIR):
for f in files:
_lower = f.lower()
if _lower.endswith(('.avi', '.mkv', '.mp4')) and fnmatch.fnmatch(_lower, "*{}*".format(query)):
self.video_path = os.path.abspath(os.path.join(root, f))
self.video_title = f
#TODO: if many matches, allow the user to choose which one
break
def execute(self):
try:
self.process.start("/usr/bin/xdg-open \"{}\"".format(self.video_path))
wnd = waitForWindowByTitle(self.video_title)
self.keyboard = PyKeyboard()
return Action(action_type = 'embed', body = {'hwnd': wnd['hwnd'], 'commands': self.commands}, bot = self.id, keep_context = False)
except Exception as e:
raise(e)
def request_missing_attr(self):
#TODO: Check for missing attr
pass
def has_missing_attr(self):
return False
def is_long_running(self):
return True
def run_command(self, command_index):
arg = None
if command_index == 0:
arg = self.keyboard.space
elif command_index == 1:
arg = self.keyboard.left_key
elif command_index == 2:
arg = self.keyboard.right_key
#fn = lambda: [self.keyboard.tap_key(self.keyboard.escape_key), self.keyboard.tap_key(arg)]
fn = lambda: self.keyboard.tap_key(arg)
return Action(action_type = 'keyboard_event', body = {'fn': fn}, bot = self.id, keep_context = False)
def terminate(self):
self.keyboard = None
self.process.terminate()
self.process = None
| 1,628 | 7 | 222 |
48d6aa01c047142b8c1df4731859c27a355a6555 | 857 | py | Python | provisioning/ubuntu-xenial/config.py | usnistgov/reductus | abb977f8db41975bb577597e23790b8b58b19d98 | [
"Unlicense"
] | null | null | null | provisioning/ubuntu-xenial/config.py | usnistgov/reductus | abb977f8db41975bb577597e23790b8b58b19d98 | [
"Unlicense"
] | null | null | null | provisioning/ubuntu-xenial/config.py | usnistgov/reductus | abb977f8db41975bb577597e23790b8b58b19d98 | [
"Unlicense"
] | null | null | null | #############################################################
# rename or copy this file to config.py if you make changes #
#############################################################
# change this to your fully-qualified domain name to run a
# remote server. The default value of localhost will
# only allow connections from the same computer.
#jsonrpc_servername = "h3.umd.edu"
jsonrpc_servername = "localhost"
jsonrpc_port = 8001
http_port = 8000
serve_staticfiles = False
#use_redis = True
use_diskcache = True
diskcache_params = {"size_limit": int(4*2**30), "shards": 5}
use_msgpack = True
data_sources = [
{
"name": "ncnr",
"url": "https://www.ncnr.nist.gov/pub/",
"start_path": "ncnrdata",
"file_helper_url": "https://www.ncnr.nist.gov/ipeek/listftpfiles.php"
},
]
instruments = ["refl", "ospec", "sans"]
| 32.961538 | 77 | 0.586931 | #############################################################
# rename or copy this file to config.py if you make changes #
#############################################################
# change this to your fully-qualified domain name to run a
# remote server. The default value of localhost will
# only allow connections from the same computer.
#jsonrpc_servername = "h3.umd.edu"
jsonrpc_servername = "localhost"
jsonrpc_port = 8001
http_port = 8000
serve_staticfiles = False
#use_redis = True
use_diskcache = True
diskcache_params = {"size_limit": int(4*2**30), "shards": 5}
use_msgpack = True
data_sources = [
{
"name": "ncnr",
"url": "https://www.ncnr.nist.gov/pub/",
"start_path": "ncnrdata",
"file_helper_url": "https://www.ncnr.nist.gov/ipeek/listftpfiles.php"
},
]
instruments = ["refl", "ospec", "sans"]
| 0 | 0 | 0 |
cc62f446e47f761f3ca74447a2fad3603294de87 | 870 | py | Python | mysite/mongodb/emails.py | dduong711/API_project | a30ee07d2d61af9b57b3f0e21020a45b83db2e00 | [
"MIT"
] | null | null | null | mysite/mongodb/emails.py | dduong711/API_project | a30ee07d2d61af9b57b3f0e21020a45b83db2e00 | [
"MIT"
] | null | null | null | mysite/mongodb/emails.py | dduong711/API_project | a30ee07d2d61af9b57b3f0e21020a45b83db2e00 | [
"MIT"
] | null | null | null | # emails.py
from django.template import loader
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
mongodb_notification_email = NotificationEmail()
| 30 | 75 | 0.717241 | # emails.py
from django.template import loader
from django.core.mail import EmailMultiAlternatives
from django.conf import settings
class NotificationEmail:
subject_template_name = "mongodb/email/action_notification_subject.txt"
email_template_name = "mongodb/email/action_notification_email.txt"
from_email = settings.MONGODB_FROM_EMAIL
to_email = settings.MONGODB_TO_EMAIL
def send_mail(self, context):
subject = loader.render_to_string(
self.subject_template_name, context
)
subject = ''.join(subject.splitlines())
body = loader.render_to_string(
self.email_template_name, context
)
email_message = EmailMultiAlternatives(
subject, body, self.from_email, self.to_email
)
email_message.send()
mongodb_notification_email = NotificationEmail()
| 398 | 264 | 23 |
563b0eaaa4f691314756fd4e23087ed972485419 | 18,040 | py | Python | edb/edgeql/compiler/inference/cardinality.py | haikyuu/edgedb | 73125882a4eff337692ad10af4bfdf15eef341ab | [
"Apache-2.0"
] | null | null | null | edb/edgeql/compiler/inference/cardinality.py | haikyuu/edgedb | 73125882a4eff337692ad10af4bfdf15eef341ab | [
"Apache-2.0"
] | null | null | null | edb/edgeql/compiler/inference/cardinality.py | haikyuu/edgedb | 73125882a4eff337692ad10af4bfdf15eef341ab | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import functools
from edb import errors
from edb.edgeql import qltypes
from edb.schema import objtypes as s_objtypes
from edb.schema import pointers as s_pointers
from edb.ir import ast as irast
from edb.ir import utils as irutils
from .. import context
if TYPE_CHECKING:
from edb.schema import constraints as s_constr
ONE = qltypes.Cardinality.ONE
MANY = qltypes.Cardinality.MANY
@functools.singledispatch
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
@_infer_cardinality.register
| 29.333333 | 79 | 0.645953 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import functools
from edb import errors
from edb.edgeql import qltypes
from edb.schema import objtypes as s_objtypes
from edb.schema import pointers as s_pointers
from edb.ir import ast as irast
from edb.ir import utils as irutils
from .. import context
if TYPE_CHECKING:
from edb.schema import constraints as s_constr
ONE = qltypes.Cardinality.ONE
MANY = qltypes.Cardinality.MANY
def _get_set_scope(
ir_set: irast.Set,
scope_tree: irast.ScopeTreeNode) -> irast.ScopeTreeNode:
new_scope = None
if ir_set.path_scope_id:
new_scope = scope_tree.root.find_by_unique_id(ir_set.path_scope_id)
if new_scope is None:
new_scope = scope_tree
return new_scope
def _max_cardinality(
args: Iterable[qltypes.Cardinality],
) -> qltypes.Cardinality:
if all(a == ONE for a in args):
return ONE
else:
return MANY
def _common_cardinality(
args: Iterable[irast.Base],
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return _max_cardinality(
infer_cardinality(a, scope_tree, env) for a in args)
@functools.singledispatch
def _infer_cardinality(
ir: irast.Expr,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
raise ValueError(f'infer_cardinality: cannot handle {ir!r}')
@_infer_cardinality.register
def __infer_none(
ir: None,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
# Here for debugging purposes.
raise ValueError('invalid infer_cardinality(None, schema) call')
@_infer_cardinality.register
def __infer_statement(
ir: irast.Statement,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(ir.expr, scope_tree, env)
@_infer_cardinality.register
def __infer_config_insert(
ir: irast.ConfigInsert,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(ir.expr, scope_tree, env)
@_infer_cardinality.register
def __infer_emptyset(
ir: irast.EmptySet,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return ONE
@_infer_cardinality.register
def __infer_typeref(
ir: irast.TypeRef,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return ONE
@_infer_cardinality.register
def __infer_type_introspection(
ir: irast.TypeIntrospection,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return ONE
def _is_visible(
ir: irast.Set,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> bool:
parent_fence = scope_tree.parent_fence
if parent_fence is not None:
if scope_tree.namespaces:
path_id = ir.path_id.strip_namespace(scope_tree.namespaces)
else:
path_id = ir.path_id
return parent_fence.is_visible(path_id)
else:
return False
@_infer_cardinality.register
def __infer_set(
ir: irast.Set,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
if _is_visible(ir, scope_tree, env):
return ONE
rptr = ir.rptr
if rptr is not None:
rptrref = rptr.ptrref
if isinstance(rptrref, irast.TypeIntersectionPointerRef):
ind_prefix, ind_ptrs = irutils.collapse_type_intersection(ir)
new_scope = _get_set_scope(ir, scope_tree)
if ind_prefix.rptr is None:
return infer_cardinality(ind_prefix, new_scope, env)
else:
# Expression before type intersection is a path,
# i.e Foo.<bar[IS Type]. In this case we must
# take possible intersection specialization of the
# link union into account.
# We're basically restating the body of this function
# in this block, but with extra conditions.
if _is_visible(ind_prefix, new_scope, env):
return ONE
else:
rptr_spec: Set[irast.PointerRef] = set()
for ind_ptr in ind_ptrs:
rptr_spec.update(ind_ptr.ptrref.rptr_specialization)
if not rptr_spec:
# The type intersection does not narrow the
# pointer union (or there is no union), so
# use the rptr cardinality as if there was no
# intersection.
if rptrref.dir_cardinality is qltypes.Cardinality.ONE:
return infer_cardinality(
rptr.source, new_scope, env)
else:
return MANY
else:
if any(s.dir_cardinality is qltypes.Cardinality.MANY
for s in rptr_spec):
return MANY
else:
new_scope = _get_set_scope(ind_prefix, scope_tree)
return infer_cardinality(
ind_prefix.rptr.source, new_scope, env)
elif rptrref.dir_cardinality is qltypes.Cardinality.ONE:
new_scope = _get_set_scope(ir, scope_tree)
return infer_cardinality(rptr.source, new_scope, env)
else:
return MANY
elif ir.expr is not None:
new_scope = _get_set_scope(ir, scope_tree)
return infer_cardinality(ir.expr, new_scope, env)
else:
return MANY
@_infer_cardinality.register
def __infer_func_call(
ir: irast.FunctionCall,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
# the cardinality of the function call depends on the cardinality
# of non-SET_OF arguments AND the cardinality of the function
# return value
SET_OF = qltypes.TypeModifier.SET_OF
if ir.typemod is qltypes.TypeModifier.SET_OF:
return MANY
else:
args = []
# process positional args
for arg, typemod in zip(ir.args, ir.params_typemods):
if typemod is not SET_OF:
args.append(arg.expr)
if args:
return _common_cardinality(args, scope_tree, env)
else:
return ONE
@_infer_cardinality.register
def __infer_oper_call(
ir: irast.OperatorCall,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
if ir.func_shortname == 'std::UNION':
return MANY
else:
args: List[irast.Base] = []
if ir.typemod is qltypes.TypeModifier.SET_OF:
args = [a.expr for a in ir.args]
else:
for arg, typemod in zip(ir.args, ir.params_typemods):
if typemod is not qltypes.TypeModifier.SET_OF:
args.append(arg.expr)
if args:
return _common_cardinality(args, scope_tree, env)
else:
return ONE
@_infer_cardinality.register
def __infer_const(
ir: irast.BaseConstant,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return ONE
@_infer_cardinality.register
def __infer_param(
ir: irast.Parameter,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return ONE
@_infer_cardinality.register
def __infer_const_set(
ir: irast.ConstantSet,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return ONE if len(ir.elements) == 1 else MANY
@_infer_cardinality.register
def __infer_typecheckop(
ir: irast.TypeCheckOp,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(ir.left, scope_tree, env)
@_infer_cardinality.register
def __infer_typecast(
ir: irast.TypeCast,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return infer_cardinality(ir.expr, scope_tree, env)
def _is_ptr_or_self_ref(
ir_expr: irast.Base,
result_expr: irast.Set,
env: context.Environment,
) -> bool:
if not isinstance(ir_expr, irast.Set):
return False
else:
ir_set = ir_expr
srccls = env.set_types[result_expr]
return (
isinstance(srccls, s_objtypes.ObjectType) and
ir_set.expr is None and
(env.set_types[ir_set] == srccls or (
ir_set.rptr is not None and
srccls.getptr(
env.schema,
ir_set.rptr.ptrref.shortname.name) is not None
))
)
def extract_filters(
result_set: irast.Set,
filter_set: irast.Set,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> Sequence[Tuple[s_pointers.Pointer, irast.Set]]:
schema = env.schema
scope_tree = _get_set_scope(filter_set, scope_tree)
ptr: s_pointers.Pointer
ptr_filters = []
expr = filter_set.expr
if isinstance(expr, irast.OperatorCall):
if expr.func_shortname == 'std::=':
left, right = (a.expr for a in expr.args)
op_card = _common_cardinality(
[left, right], scope_tree, env)
result_stype = env.set_types[result_set]
if op_card == MANY:
pass
elif _is_ptr_or_self_ref(left, result_set, env):
if infer_cardinality(right, scope_tree, env) == ONE:
left_stype = env.set_types[left]
if left_stype == result_stype:
assert isinstance(left_stype, s_objtypes.ObjectType)
_ptr = left_stype.getptr(schema, 'id')
else:
_ptr = env.schema.get(left.rptr.ptrref.name)
assert isinstance(_ptr, s_pointers.Pointer)
ptr = _ptr
ptr_filters.append((ptr, right))
elif _is_ptr_or_self_ref(right, result_set, env):
if infer_cardinality(left, scope_tree, env) == ONE:
right_stype = env.set_types[right]
if right_stype == result_stype:
assert isinstance(right_stype, s_objtypes.ObjectType)
_ptr = right_stype.getptr(schema, 'id')
else:
_ptr = env.schema.get(right.rptr.ptrref.name)
assert isinstance(_ptr, s_pointers.Pointer)
ptr = _ptr
ptr_filters.append((ptr, left))
elif expr.func_shortname == 'std::AND':
left, right = (a.expr for a in expr.args)
ptr_filters.extend(
extract_filters(result_set, left, scope_tree, env))
ptr_filters.extend(
extract_filters(result_set, right, scope_tree, env))
return ptr_filters
def _analyse_filter_clause(
result_set: irast.Set,
filter_clause: irast.Set,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
schema = env.schema
filtered_ptrs = extract_filters(result_set, filter_clause, scope_tree, env)
if filtered_ptrs:
exclusive_constr: s_constr.Constraint = schema.get('std::exclusive')
for ptr, _ in filtered_ptrs:
ptr = cast(
s_pointers.Pointer,
ptr.get_nearest_non_derived_parent(env.schema),
)
is_unique = (
ptr.is_id_pointer(schema) or
any(c.issubclass(schema, exclusive_constr)
for c in ptr.get_constraints(schema).objects(schema))
)
if is_unique:
# Bingo, got an equality filter on a link with a
# unique constraint
return ONE
return MANY
def _infer_stmt_cardinality(
result_set: irast.Set,
filter_clause: Optional[irast.Set],
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
result_card = infer_cardinality(result_set, scope_tree, env)
if result_card == ONE or filter_clause is None:
return result_card
return _analyse_filter_clause(
result_set, filter_clause, scope_tree, env)
@_infer_cardinality.register
def __infer_select_stmt(
ir: irast.SelectStmt,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
if ir.cardinality:
return ir.cardinality
else:
if (ir.limit is not None and
isinstance(ir.limit.expr, irast.IntegerConstant) and
ir.limit.expr.value == '1'):
# Explicit LIMIT 1 clause.
stmt_card = ONE
else:
stmt_card = _infer_stmt_cardinality(
ir.result, ir.where, scope_tree, env)
if ir.iterator_stmt:
iter_card = infer_cardinality(ir.iterator_stmt, scope_tree, env)
stmt_card = _max_cardinality((stmt_card, iter_card))
return stmt_card
@_infer_cardinality.register
def __infer_insert_stmt(
ir: irast.InsertStmt,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
if ir.cardinality:
return ir.cardinality
else:
if ir.iterator_stmt:
return infer_cardinality(ir.iterator_stmt, scope_tree, env)
else:
# INSERT without a FOR is always a singleton.
return ONE
@_infer_cardinality.register
def __infer_update_stmt(
ir: irast.UpdateStmt,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
if ir.cardinality:
return ir.cardinality
else:
stmt_card = _infer_stmt_cardinality(
ir.subject, ir.where, scope_tree, env)
if ir.iterator_stmt:
iter_card = infer_cardinality(ir.iterator_stmt, scope_tree, env)
stmt_card = _max_cardinality((stmt_card, iter_card))
return stmt_card
@_infer_cardinality.register
def __infer_delete_stmt(
ir: irast.DeleteStmt,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
if ir.cardinality:
return ir.cardinality
else:
stmt_card = _infer_stmt_cardinality(
ir.subject, None, scope_tree, env)
if ir.iterator_stmt:
iter_card = infer_cardinality(ir.iterator_stmt, scope_tree, env)
stmt_card = _max_cardinality((stmt_card, iter_card))
return stmt_card
@_infer_cardinality.register
def __infer_stmt(
ir: irast.Stmt,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
if ir.cardinality:
return ir.cardinality
else:
return infer_cardinality(ir.result, scope_tree, env)
@_infer_cardinality.register
def __infer_slice(
ir: irast.SliceIndirection,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
# slice indirection cardinality depends on the cardinality of
# the base expression and the slice index expressions
args = [ir.expr]
if ir.start is not None:
args.append(ir.start)
if ir.stop is not None:
args.append(ir.stop)
return _common_cardinality(args, scope_tree, env)
@_infer_cardinality.register
def __infer_index(
ir: irast.IndexIndirection,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
# index indirection cardinality depends on both the cardinality of
# the base expression and the index expression
return _common_cardinality([ir.expr, ir.index], scope_tree, env)
@_infer_cardinality.register
def __infer_array(
ir: irast.Array,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return _common_cardinality(ir.elements, scope_tree, env)
@_infer_cardinality.register
def __infer_tuple(
ir: irast.Tuple,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
return _common_cardinality(
[el.val for el in ir.elements], scope_tree, env)
@_infer_cardinality.register
def __infer_tuple_indirection(
ir: irast.TupleIndirection,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
# the cardinality of the tuple indirection is the same as the
# cardinality of the underlying tuple
return infer_cardinality(ir.expr, scope_tree, env)
def infer_cardinality(
ir: irast.Base,
scope_tree: irast.ScopeTreeNode,
env: context.Environment,
) -> qltypes.Cardinality:
result = env.inferred_cardinality.get((ir, scope_tree))
if result is not None:
return result
result = _infer_cardinality(ir, scope_tree, env)
if result not in {ONE, MANY}:
raise errors.QueryError(
'could not determine the cardinality of '
'set produced by expression',
context=ir.context)
env.inferred_cardinality[ir, scope_tree] = result
return result
| 15,371 | 0 | 757 |
c382159dff554857c86f8c0173e004dc0e7afefa | 675 | py | Python | alembic/versions/2711340c6d9d_added_orientation_column_to_pi_model.py | PeterGrace/pi_director | 217f8c504830d2e8c18f166b62b8138d3d25a167 | [
"MIT"
] | 12 | 2015-08-28T20:48:29.000Z | 2021-08-23T02:56:55.000Z | alembic/versions/2711340c6d9d_added_orientation_column_to_pi_model.py | PeterGrace/pi_director | 217f8c504830d2e8c18f166b62b8138d3d25a167 | [
"MIT"
] | 21 | 2015-08-31T19:41:04.000Z | 2016-02-17T21:42:39.000Z | alembic/versions/2711340c6d9d_added_orientation_column_to_pi_model.py | PeterGrace/pi_director | 217f8c504830d2e8c18f166b62b8138d3d25a167 | [
"MIT"
] | 7 | 2015-08-28T20:50:03.000Z | 2020-06-06T12:49:29.000Z | """added orientation column to pi model
Revision ID: 2711340c6d9d
Revises: 490d49497045
Create Date: 2015-09-25 09:43:33.202018
"""
# revision identifiers, used by Alembic.
revision = '2711340c6d9d'
down_revision = '490d49497045'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
| 23.275862 | 81 | 0.706667 | """added orientation column to pi model
Revision ID: 2711340c6d9d
Revises: 490d49497045
Create Date: 2015-09-25 09:43:33.202018
"""
# revision identifiers, used by Alembic.
revision = '2711340c6d9d'
down_revision = '490d49497045'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('PiUrl', sa.Column('orientation', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('PiUrl', 'orientation')
### end Alembic commands ###
| 307 | 0 | 46 |