hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acee75f7247618ee8b176671dc108d445b8664b2 | 4,829 | py | Python | nadypy/models/system_information.py | Nadybot/nadypy | ae6cbb886d233fde491aee501cb72bf993f3a02f | [
"MIT"
] | null | null | null | nadypy/models/system_information.py | Nadybot/nadypy | ae6cbb886d233fde491aee501cb72bf993f3a02f | [
"MIT"
] | null | null | null | nadypy/models/system_information.py | Nadybot/nadypy | ae6cbb886d233fde491aee501cb72bf993f3a02f | [
"MIT"
] | null | null | null | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.basic_system_information import BasicSystemInformation
from ..models.channel_info import ChannelInfo
from ..models.config_statistics import ConfigStatistics
from ..models.memory_information import MemoryInformation
from ..models.misc_system_information import MiscSystemInformation
from ..models.system_stats import SystemStats
from ..types import UNSET, Unset
T = TypeVar("T", bound="SystemInformation")
@attr.s(auto_attribs=True)
class SystemInformation:
""" """
basic: Union[Unset, BasicSystemInformation] = UNSET
memory: Union[Unset, MemoryInformation] = UNSET
misc: Union[Unset, MiscSystemInformation] = UNSET
config: Union[Unset, ConfigStatistics] = UNSET
stats: Union[Unset, SystemStats] = UNSET
channels: Union[Unset, List[ChannelInfo]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
basic: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.basic, Unset):
basic = self.basic.to_dict()
memory: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.memory, Unset):
memory = self.memory.to_dict()
misc: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.misc, Unset):
misc = self.misc.to_dict()
config: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.config, Unset):
config = self.config.to_dict()
stats: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.stats, Unset):
stats = self.stats.to_dict()
channels: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.channels, Unset):
channels = []
for channels_item_data in self.channels:
channels_item = channels_item_data.to_dict()
channels.append(channels_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if basic is not UNSET:
field_dict["basic"] = basic
if memory is not UNSET:
field_dict["memory"] = memory
if misc is not UNSET:
field_dict["misc"] = misc
if config is not UNSET:
field_dict["config"] = config
if stats is not UNSET:
field_dict["stats"] = stats
if channels is not UNSET:
field_dict["channels"] = channels
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
_basic = d.pop("basic", UNSET)
basic: Union[Unset, BasicSystemInformation]
if isinstance(_basic, Unset):
basic = UNSET
else:
basic = BasicSystemInformation.from_dict(_basic)
_memory = d.pop("memory", UNSET)
memory: Union[Unset, MemoryInformation]
if isinstance(_memory, Unset):
memory = UNSET
else:
memory = MemoryInformation.from_dict(_memory)
_misc = d.pop("misc", UNSET)
misc: Union[Unset, MiscSystemInformation]
if isinstance(_misc, Unset):
misc = UNSET
else:
misc = MiscSystemInformation.from_dict(_misc)
_config = d.pop("config", UNSET)
config: Union[Unset, ConfigStatistics]
if isinstance(_config, Unset):
config = UNSET
else:
config = ConfigStatistics.from_dict(_config)
_stats = d.pop("stats", UNSET)
stats: Union[Unset, SystemStats]
if isinstance(_stats, Unset):
stats = UNSET
else:
stats = SystemStats.from_dict(_stats)
channels = []
_channels = d.pop("channels", UNSET)
for channels_item_data in _channels or []:
channels_item = ChannelInfo.from_dict(channels_item_data)
channels.append(channels_item)
system_information = cls(
basic=basic,
memory=memory,
misc=misc,
config=config,
stats=stats,
channels=channels,
)
system_information.additional_properties = d
return system_information
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 32.85034 | 77 | 0.621247 |
acee760ea94e4938d69274a529657a8affda9ee9 | 267 | py | Python | src/torchprune/torchprune/method/pfp/pfp_tracker.py | dani3l125/torchprune | f2589ec7514bd531ddaa7da3aed6388bb13712d3 | [
"MIT"
] | 74 | 2021-03-05T01:25:00.000Z | 2022-03-26T06:15:32.000Z | src/torchprune/torchprune/method/pfp/pfp_tracker.py | dani3l125/torchprune | f2589ec7514bd531ddaa7da3aed6388bb13712d3 | [
"MIT"
] | 4 | 2021-05-25T06:01:22.000Z | 2022-01-24T22:38:09.000Z | src/torchprune/torchprune/method/pfp/pfp_tracker.py | dani3l125/torchprune | f2589ec7514bd531ddaa7da3aed6388bb13712d3 | [
"MIT"
] | 7 | 2021-03-24T14:14:32.000Z | 2022-02-19T17:27:56.000Z | """All sensitivity-related trackers."""
import torch
from ..base_sens import BaseSensTracker
class PFPTracker(BaseSensTracker):
"""The tracker for all PFP variations."""
def _reduction(self, g_sens_f, dim):
return torch.max(g_sens_f, dim=dim)[0]
| 20.538462 | 46 | 0.71161 |
acee76284ebc64069bd1c5710a5b1ef146ffc759 | 1,470 | py | Python | setup.py | hiaselhans/pyvat | 82b9f774541c08d3b7b0830d0e27a8b6a904a675 | [
"Apache-2.0"
] | null | null | null | setup.py | hiaselhans/pyvat | 82b9f774541c08d3b7b0830d0e27a8b6a904a675 | [
"Apache-2.0"
] | null | null | null | setup.py | hiaselhans/pyvat | 82b9f774541c08d3b7b0830d0e27a8b6a904a675 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import re
with open("README.rst", "r") as fh:
long_description = fh.read()
with open("pyvat/version.py", "r") as fp:
version = re.match("__version__ = '([0-9\.]+)'", fp.read()).group(1)
packages = [
'pyvat',
]
requires = [
'requests>=1.0.0,<3.0',
'pycountry',
'enum34; python_version < "3.4"',
]
tests_require = [
'nose',
'rednose',
'flake8',
'unittest2',
]
setup(
name='pyvat',
version=version,
description='VAT validation for Python',
long_description=long_description,
long_description_content_type="text/x-rst",
author='Iconfinder',
author_email='support@iconfinder.com',
url='https://www.iconfinder.com',
project_urls={
'Issue Tracker': 'https://github.com/iconfinder/pyvat/issues',
},
packages=packages,
install_requires=requires,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.7',
),
)
| 24.915254 | 72 | 0.614286 |
acee7668f00c3d3b9cd21d29c3d76cc7f1e1a890 | 3,063 | py | Python | insights/parsers/tests/test_vdsm_conf.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 121 | 2017-05-30T20:23:25.000Z | 2022-03-23T12:52:15.000Z | insights/parsers/tests/test_vdsm_conf.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 1,977 | 2017-05-26T14:36:03.000Z | 2022-03-31T10:38:53.000Z | insights/parsers/tests/test_vdsm_conf.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 244 | 2017-05-30T20:22:57.000Z | 2022-03-26T10:09:39.000Z | import doctest
from insights.parsers import vdsm_conf
from insights.tests import context_wrap
VDSM_CONF = '''
[vars]
ssl = true
cpu_affinity = 1
[addresses]
management_port = 54321
qq = 345
'''
VDSM_LOGGER_CONF = '''
[loggers]
keys=root,vds,storage,virt,ovirt_hosted_engine_ha,ovirt_hosted_engine_ha_config,IOProcess,devel
[handlers]
keys=console,syslog,logfile
[formatters]
keys=long,simple,none,sysform
[logger_root]
level=DEBUG
handlers=syslog,logfile
propagate=0
[logger_vds]
level=DEBUG
handlers=syslog,logfile
qualname=vds
propagate=0
[logger_storage]
level=DEBUG
handlers=logfile
qualname=storage
propagate=0
[logger_ovirt_hosted_engine_ha]
level=DEBUG
handlers=
qualname=ovirt_hosted_engine_ha
propagate=1
[logger_ovirt_hosted_engine_ha_config]
level=DEBUG
handlers=
qualname=ovirt_hosted_engine_ha.env.config
propagate=0
[logger_IOProcess]
level=DEBUG
handlers=logfile
qualname=IOProcess
propagate=0
[logger_virt]
level=DEBUG
handlers=logfile
qualname=virt
propagate=0
[logger_devel]
level=DEBUG
handlers=logfile
qualname=devel
propagate=0
[handler_syslog]
level=WARN
class=handlers.SysLogHandler
formatter=sysform
args=('/dev/log', handlers.SysLogHandler.LOG_USER)
[handler_logfile]
class=vdsm.logUtils.UserGroupEnforcingHandler
args=('vdsm', 'kvm', '/var/log/vdsm/vdsm.log',)
filters=storage.misc.TracebackRepeatFilter
level=DEBUG
formatter=long
[handler_console]
class: StreamHandler
args: []
formatter: none
[formatter_simple]
format: %(asctime)s:%(levelname)s:%(message)s
[formatter_none]
format: %(message)s
[formatter_long]
format: %(asctime)s %(levelname)-5s (%(threadName)s) [%(name)s] %(message)s (%(module)s:%(lineno)d)
class: vdsm.logUtils.TimezoneFormatter
[formatter_sysform]
format= vdsm %(name)s %(levelname)s %(message)s
datefmt=
'''
def test_vdsm_conf_ini():
result = vdsm_conf.VDSMConfIni(context_wrap(VDSM_CONF))
assert sorted(result.sections()) == sorted(['vars', 'addresses'])
assert result.has_option('vars', 'ssl')
assert result.getboolean('vars', 'ssl')
assert result.getint('vars', 'cpu_affinity') == 1
assert result.getint('addresses', 'management_port') == 54321
assert result.getint('addresses', 'qq') == 345
def test_vdsm_logger_conf():
conf = vdsm_conf.VDSMLoggerConf(context_wrap(VDSM_LOGGER_CONF))
assert len(conf.sections()) == 18
assert conf.has_option('loggers', 'keys') is True
assert conf.getboolean('logger_root', 'propagate') is False
assert conf.get('logger_ovirt_hosted_engine_ha', 'level') == 'DEBUG'
assert conf.get('formatter_sysform', 'datefmt') == ''
assert conf.has_option('formatter_long', 'class') is True
assert conf.items('loggers') == {'keys': 'root,vds,storage,virt,ovirt_hosted_engine_ha,ovirt_hosted_engine_ha_config,IOProcess,devel'}
def test_documentation():
env = {'conf': vdsm_conf.VDSMConfIni(context_wrap(VDSM_CONF)),
'vdsm_logger_conf': vdsm_conf.VDSMLoggerConf(context_wrap(VDSM_LOGGER_CONF))}
failed_count, tests = doctest.testmod(vdsm_conf, globs=env)
assert failed_count == 0
| 22.858209 | 138 | 0.76461 |
acee7713c0178ae14c2d5f4f7c69beb6ef6ef1f8 | 1,140 | py | Python | ads/dataset/feature_engineering_transformer.py | oracle/accelerated-data-science | d594ed0c8c1365daf4cf9e860daebc760fa9a24b | [
"UPL-1.0",
"Apache-2.0"
] | 20 | 2022-02-22T19:07:09.000Z | 2022-03-16T17:21:42.000Z | ads/dataset/feature_engineering_transformer.py | oracle/accelerated-data-science | d594ed0c8c1365daf4cf9e860daebc760fa9a24b | [
"UPL-1.0",
"Apache-2.0"
] | null | null | null | ads/dataset/feature_engineering_transformer.py | oracle/accelerated-data-science | d594ed0c8c1365daf4cf9e860daebc760fa9a24b | [
"UPL-1.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
from __future__ import print_function, absolute_import
from sklearn.base import TransformerMixin
from ads.dataset.progress import DummyProgressBar
class FeatureEngineeringTransformer(TransformerMixin):
def __init__(self, feature_metadata=None):
self.feature_metadata_ = feature_metadata
self.function_ = None
self.function_kwargs_ = None
def __repr__(self):
return "No feature engineering transformations"
def fit(self, X, y=None):
self.function_ = None
self.function_kwargs_ = None
del self.feature_metadata_
return self
def fit_transform(self, X, y=None, **fit_params):
return self.fit(X, y=y).transform(X, fit_transform=True)
def transform(self, df, progress=DummyProgressBar(), fit_transform=False):
if self.function_ is not None:
return df.pipe(self.function_, **self.function_kwargs_)
return df
| 31.666667 | 104 | 0.70614 |
acee77264474405e60e9b25ef66e47f751f0a7ad | 584 | py | Python | tweets/migrations/0001_initial.py | pnads/myTweetme | afd6fa28fef681e6be4e64816a4dc4c914b97620 | [
"MIT"
] | null | null | null | tweets/migrations/0001_initial.py | pnads/myTweetme | afd6fa28fef681e6be4e64816a4dc4c914b97620 | [
"MIT"
] | null | null | null | tweets/migrations/0001_initial.py | pnads/myTweetme | afd6fa28fef681e6be4e64816a4dc4c914b97620 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-09 05:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True)),
('image', models.FileField(blank=True, null=True, upload_to='images/')),
],
),
]
| 25.391304 | 114 | 0.577055 |
acee7789421e712e67186e78bbaaed3999390889 | 1,122 | py | Python | PythonClient/multirotor/speaker.py | altay13/AirSim | a42fb69e6a692ec154f25abd80c0b49ef45caac4 | [
"MIT"
] | 6,115 | 2019-05-07T05:29:14.000Z | 2022-03-31T12:46:36.000Z | PythonClient/multirotor/speaker.py | altay13/AirSim | a42fb69e6a692ec154f25abd80c0b49ef45caac4 | [
"MIT"
] | 2,306 | 2019-05-07T00:17:31.000Z | 2022-03-31T23:31:46.000Z | PythonClient/multirotor/speaker.py | altay13/AirSim | a42fb69e6a692ec154f25abd80c0b49ef45caac4 | [
"MIT"
] | 2,059 | 2019-05-07T03:07:43.000Z | 2022-03-31T06:31:19.000Z | ###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: speaker.py
# Authors: Chris Lovett
#
# Requires: Python 3.x
#
###################################################################################################
import pyaudio
class Speaker:
def __init__(self):
self.output_stream = None
self.audio = pyaudio.PyAudio()
def open(self, audio_format, num_channels, rate):
# open speakers so we can hear what it is processing...
self.output_stream = self.audio.open(format=audio_format,
channels=num_channels,
rate=rate,
output=True)
def write(self, data):
if self.output_stream:
self.output_stream.write(data)
def close(self):
if self.output_stream:
self.output_stream.close()
self.output_stream = None
def is_closed(self):
return self.output_stream is None
| 30.324324 | 99 | 0.454545 |
acee778c5294bc3896ff2200809fca9c1a777583 | 2,283 | py | Python | watchman/test/async/AsyncWatchmanTestCase.py | 0xgpapad/watchman | 71ae8281ca0d583db7c2c8f057a489a86eded406 | [
"MIT"
] | 3 | 2022-02-10T10:48:36.000Z | 2022-02-21T23:18:10.000Z | watchman/test/async/AsyncWatchmanTestCase.py | 0xgpapad/watchman | 71ae8281ca0d583db7c2c8f057a489a86eded406 | [
"MIT"
] | null | null | null | watchman/test/async/AsyncWatchmanTestCase.py | 0xgpapad/watchman | 71ae8281ca0d583db7c2c8f057a489a86eded406 | [
"MIT"
] | 1 | 2022-02-06T10:29:46.000Z | 2022-02-06T10:29:46.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
import asyncio
import errno
import os
import os.path
import unittest
import WatchmanInstance
from pywatchman_aio import AIOClient as WatchmanClient
class AsyncWatchmanTestCase(unittest.TestCase):
def setUp(self):
self.loop = asyncio.get_event_loop()
sockpath = WatchmanInstance.getSharedInstance().getSockPath()
self.client = self.loop.run_until_complete(WatchmanClient.from_socket(sockpath))
def tearDown(self):
self.client.close()
def run(self, result):
assert result
super(AsyncWatchmanTestCase, self).run(result)
return result
def touch(self, fname, times=None):
try:
os.utime(fname, times)
except OSError as e:
if e.errno == errno.ENOENT:
with open(fname, "a"):
os.utime(fname, times)
else:
raise
def touch_relative(self, base, *fname):
fname = os.path.join(base, *fname)
self.touch(fname, None)
def watchman_command(self, *args):
task = asyncio.wait_for(self.client.query(*args), 10)
return self.loop.run_until_complete(task)
def get_file_list(self, root):
expr = {"expression": ["exists"], "fields": ["name"]}
res = self.watchman_command("query", root, expr)["files"]
return res
def assert_sub_contains_all(self, sub, what):
files = set(sub["files"])
for obj in what:
assert obj in files, str(obj) + " was not in subscription " + repr(sub)
def assert_file_sets_equal(self, iter1, iter2, message=None):
set1 = set(iter1)
set2 = set(iter2)
self.assertEqual(set1, set2, message)
# Wait for the file list to match the input set
def assert_root_file_set(self, root, files):
self.assert_file_sets_equal(self.get_file_list(root), files)
def wait_for_sub(self, name, root, timeout=10):
client = self.client
task = asyncio.wait_for(client.get_subscription(name, root), timeout)
return self.loop.run_until_complete(task)
| 30.851351 | 88 | 0.648708 |
acee77f72fa859d09337fa04b871524964afcfe0 | 7,443 | py | Python | gslib/addlhelp/subdirs.py | rjschwei/gsutil | 4b5fdbafc2a4f498e7e4c755c5045c9483327f0c | [
"Apache-2.0"
] | null | null | null | gslib/addlhelp/subdirs.py | rjschwei/gsutil | 4b5fdbafc2a4f498e7e4c755c5045c9483327f0c | [
"Apache-2.0"
] | null | null | null | gslib/addlhelp/subdirs.py | rjschwei/gsutil | 4b5fdbafc2a4f498e7e4c755c5045c9483327f0c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about subdirectory handling in gsutil."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
This section provides details about how subdirectories work in gsutil.
Most users probably don't need to know these details, and can simply use
the commands (like cp -r) that work with subdirectories. We provide this
additional documentation to help users understand how gsutil handles
subdirectories differently than most GUI / web-based tools (e.g., why
those other tools create "dir_$folder$" objects), and also to explain cost and
performance implications of the gsutil approach, for those interested in such
details.
gsutil provides the illusion of a hierarchical file tree atop the "flat"
name space supported by the Google Cloud Storage service. To the service,
the object gs://your-bucket/abc/def.txt is just an object that happens to
have "/" characters in its name. There is no "abc" directory; just a single
object with the given name. This diagram:
.. image:: https://cloud.google.com/storage/images/gsutil-subdirectories.svg
illustrates how gsutil provides a hierarchical view of objects in a bucket.
gsutil achieves the hierarchical file tree illusion by applying a variety of
rules, to try to make naming work the way users would expect. For example, in
order to determine whether to treat a destination URL as an object name or the
root of a directory under which objects should be copied gsutil uses these
rules:
1. If the destination object ends with a "/" gsutil treats it as a directory.
For example, if you run the command:
gsutil cp your-file gs://your-bucket/abc/
gsutil will create the object gs://your-bucket/abc/your-file.
2. If the destination object is XYZ and an object exists called XYZ_$folder$
gsutil treats XYZ as a directory. For example, if you run the command:
gsutil cp your-file gs://your-bucket/abc
and there exists an object called abc_$folder$, gsutil will create the
object gs://your-bucket/abc/your-file.
3. If you attempt to copy multiple source files to a destination URL, gsutil
treats the destination URL as a directory. For example, if you run
the command:
gsutil cp -r your-dir gs://your-bucket/abc
gsutil will create objects like gs://your-bucket/abc/your-dir/file1, etc.
(assuming file1 is a file under the source directory your-dir).
4. If none of the above rules applies, gsutil performs a bucket listing to
determine if the target of the operation is a prefix match to the
specified string. For example, if you run the command:
gsutil cp your-file gs://your-bucket/abc
gsutil will make a bucket listing request for the named bucket, using
delimiter="/" and prefix="abc". It will then examine the bucket listing
results and determine whether there are objects in the bucket whose path
starts with gs://your-bucket/abc/, to determine whether to treat the target
as an object name or a directory name. In turn this impacts the name of the
object you create: If the above check indicates there is an "abc" directory
you will end up with the object gs://your-bucket/abc/your-file; otherwise
you will end up with the object gs://your-bucket/abc. (See
"HOW NAMES ARE CONSTRUCTED" under "gsutil help cp" for more details.)
This rule-based approach stands in contrast to the way many tools work, which
create objects to mark the existence of folders (such as "dir_$folder$").
gsutil understands several conventions used by such tools but does not
require such marker objects to implement naming behavior consistent with
UNIX commands.
A downside of the gsutil subdirectory naming approach is it requires an extra
bucket listing before performing the needed cp or mv command. However those
listings are relatively inexpensive, because they use delimiter and prefix
parameters to limit result data. Moreover, gsutil makes only one bucket
listing request per cp/mv command, and thus amortizes the bucket listing cost
across all transferred objects (e.g., when performing a recursive copy of a
directory to the cloud).
<B>POTENTIAL FOR SURPRISING DESTINATION SUBDIRECTORY NAMING</B>
The above rules-based approach for determining how destination paths are
constructed can lead to the following surprise: Suppose you start by trying to
upload everything under a local directory to a bucket "subdirectory" that
doesn't yet exist:
gsutil cp -r ./your-dir/* gs://your-bucket/new
where there are directories under your-dir (say, dir1 and dir2). The first
time you run this command it will create the objects:
gs://your-bucket/new/dir1/abc
gs://your-bucket/new/dir2/abc
because gs://your-bucket/new doesn't yet exist. If you run the same command
again, because gs://your-bucket/new does now exist, it will create the
additional objects:
gs://your-bucket/new/your-dir/dir1/abc
gs://your-bucket/new/your-dir/dir2/abc
Beyond the fact that this naming behavior can surprise users, one particular
case you should be careful about is if you script gsutil uploads with a retry
loop. If you do this and the first attempt copies some but not all files,
the second attempt will encounter an already existing source subdirectory
and result in the above-described naming problem.
There are a couple of ways to avoid this problem:
1. Use gsutil rsync. Since rsync doesn't use the Unix cp-defined directory
naming rules, it will work consistently whether the destination subdirectory
exists or not.
2. If using rsync won't work for you, you can start by creating a
"placeholder" object to establish that the destination is a subdirectory, by
running a command such as:
gsutil cp some-file gs://your-bucket/new/placeholder
At this point running the gsutil cp -r command noted above will
consistently treat gs://your-bucket/new as a subdirectory. Once you have
at least one object under that subdirectory you can delete the placeholder
object and subsequent uploads to that subdirectory will continue to work
with naming working as you'd expect.
""")
class CommandOptions(HelpProvider):
"""Additional help about subdirectory handling in gsutil."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='subdirs',
help_name_aliases=[
'dirs', 'directory', 'directories', 'folder', 'folders', 'hierarchy',
'subdir', 'subdirectory', 'subdirectories'],
help_type='additional_help',
help_one_line_summary='How Subdirectories Work',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| 45.109091 | 80 | 0.749966 |
acee78a6298f0fe0ce33bbac841f3b54a3b90b4d | 3,965 | py | Python | sdk/python/pulumi_aws/cfg/aggregate_authorization.py | johnktims/pulumi-aws | c838bc79043f5376c66fc66275a1e012edd3ab7d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cfg/aggregate_authorization.py | johnktims/pulumi-aws | c838bc79043f5376c66fc66275a1e012edd3ab7d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/cfg/aggregate_authorization.py | johnktims/pulumi-aws | c838bc79043f5376c66fc66275a1e012edd3ab7d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class AggregateAuthorization(pulumi.CustomResource):
account_id: pulumi.Output[str]
"""
Account ID
"""
arn: pulumi.Output[str]
"""
The ARN of the authorization
"""
region: pulumi.Output[str]
"""
Region
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, account_id=None, region=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages an AWS Config Aggregate Authorization
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: Account ID
:param pulumi.Input[str] region: Region
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_id is None:
raise TypeError("Missing required property 'account_id'")
__props__['account_id'] = account_id
if region is None:
raise TypeError("Missing required property 'region'")
__props__['region'] = region
__props__['tags'] = tags
__props__['arn'] = None
super(AggregateAuthorization, __self__).__init__(
'aws:cfg/aggregateAuthorization:AggregateAuthorization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, account_id=None, arn=None, region=None, tags=None):
"""
Get an existing AggregateAuthorization resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: Account ID
:param pulumi.Input[str] arn: The ARN of the authorization
:param pulumi.Input[str] region: Region
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["account_id"] = account_id
__props__["arn"] = arn
__props__["region"] = region
__props__["tags"] = tags
return AggregateAuthorization(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 39.257426 | 140 | 0.65372 |
acee7a3bdbaca91f51b11f6989014e9262ec0cad | 2,817 | py | Python | telestream_cloud_qc_sdk/telestream_cloud_qc/models/hdr_standard_type.py | pandastream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | null | null | null | telestream_cloud_qc_sdk/telestream_cloud_qc/models/hdr_standard_type.py | pandastream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | 2 | 2016-07-06T14:13:31.000Z | 2018-03-07T12:54:58.000Z | telestream_cloud_qc_sdk/telestream_cloud_qc/models/hdr_standard_type.py | Telestream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class HdrStandardType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
GENERICHDR = "GenericHdr"
HDR10 = "Hdr10"
ARIBB67 = "AribB67"
allowable_values = [GENERICHDR, HDR10, ARIBB67] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""HdrStandardType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HdrStandardType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HdrStandardType):
return True
return self.to_dict() != other.to_dict()
| 27.349515 | 74 | 0.574015 |
acee7a94de5a3377f7abfb02d2acf99d045b6ca5 | 45,154 | py | Python | tests/test_warp.py | darrenleeweber/rasterio | b0c1989ff90353226eafb1d1c8379948ff0ca461 | [
"BSD-3-Clause"
] | null | null | null | tests/test_warp.py | darrenleeweber/rasterio | b0c1989ff90353226eafb1d1c8379948ff0ca461 | [
"BSD-3-Clause"
] | null | null | null | tests/test_warp.py | darrenleeweber/rasterio | b0c1989ff90353226eafb1d1c8379948ff0ca461 | [
"BSD-3-Clause"
] | null | null | null | import json
"""rasterio.warp module tests"""
import sys
import pytest
from affine import Affine
import numpy as np
import rasterio
from rasterio.control import GroundControlPoint
from rasterio.crs import CRS
from rasterio.enums import Resampling
from rasterio.env import GDALVersion
from rasterio.errors import (GDALBehaviorChangeException, CRSError, GDALVersionError)
from rasterio.warp import (
reproject,
transform_geom,
transform,
transform_bounds,
calculate_default_transform,
aligned_target,
SUPPORTED_RESAMPLING,
GDAL2_RESAMPLING,
)
from rasterio import windows
from .conftest import requires_gdal22, requires_gdal3, requires_gdal_lt_3
gdal_version = GDALVersion.runtime()
DST_TRANSFORM = Affine(300.0, 0.0, -8789636.708, 0.0, -300.0, 2943560.235)
def flatten_coords(coordinates):
"""Yield a flat sequence of coordinates to help testing"""
for elem in coordinates:
if isinstance(elem, (float, int)):
yield elem
else:
for x in flatten_coords(elem):
yield x
reproj_expected = (
({"CHECK_WITH_INVERT_PROJ": False}, 6644), ({"CHECK_WITH_INVERT_PROJ": True}, 6644)
)
class ReprojectParams(object):
"""Class to assist testing reprojection by encapsulating parameters."""
def __init__(self, left, bottom, right, top, width, height, src_crs, dst_crs):
self.width = width
self.height = height
src_res = float(right - left) / float(width)
self.src_transform = Affine(src_res, 0, left, 0, -src_res, top)
self.src_crs = src_crs
self.dst_crs = dst_crs
dt, dw, dh = calculate_default_transform(
src_crs, dst_crs, width, height, left, bottom, right, top
)
self.dst_transform = dt
self.dst_width = dw
self.dst_height = dh
def default_reproject_params():
return ReprojectParams(
left=-120,
bottom=30,
right=-80,
top=70,
width=80,
height=80,
src_crs=CRS.from_epsg(4326),
dst_crs=CRS.from_epsg(2163),
)
def uninvertable_reproject_params():
return ReprojectParams(
left=-120,
bottom=30,
right=-80,
top=70,
width=80,
height=80,
src_crs=CRS.from_epsg(4326),
dst_crs=CRS.from_epsg(26836),
)
WGS84_crs = CRS.from_epsg(4326)
def test_transform_src_crs_none():
with pytest.raises(CRSError):
transform(None, WGS84_crs, [], [])
def test_transform_dst_crs_none():
with pytest.raises(CRSError):
transform(WGS84_crs, None, [], [])
def test_transform_bounds_src_crs_none():
with pytest.raises(CRSError):
transform_bounds(None, WGS84_crs, 0, 0, 0, 0)
def test_transform_bounds_dst_crs_none():
with pytest.raises(CRSError):
transform_bounds(WGS84_crs, None, 0, 0, 0, 0)
def test_transform_geom_src_crs_none():
with pytest.raises(CRSError):
transform_geom(None, WGS84_crs, None)
def test_transform_geom_dst_crs_none():
with pytest.raises(CRSError):
transform_geom(WGS84_crs, None, None)
def test_reproject_src_crs_none():
with pytest.raises(CRSError):
reproject(
np.ones((2, 2)),
np.zeros((2, 2)),
src_transform=Affine.identity(),
dst_transform=Affine.identity(),
dst_crs=WGS84_crs,
)
def test_reproject_dst_crs_none():
with pytest.raises(CRSError):
reproject(
np.ones((2, 2)),
np.zeros((2, 2)),
src_transform=Affine.identity(),
dst_transform=Affine.identity(),
src_crs=WGS84_crs,
)
def test_transform():
"""2D and 3D."""
WGS84_crs = CRS.from_epsg(4326)
WGS84_points = ([12.492269], [41.890169], [48.])
ECEF_crs = CRS.from_epsg(4978)
ECEF_points = ([4642610.], [1028584.], [4236562.])
ECEF_result = transform(WGS84_crs, ECEF_crs, *WGS84_points)
assert np.allclose(np.array(ECEF_result), np.array(ECEF_points))
UTM33_crs = CRS.from_epsg(32633)
UTM33_points = ([291952], [4640623])
UTM33_result = transform(WGS84_crs, UTM33_crs, *WGS84_points[:2])
assert np.allclose(np.array(UTM33_result), np.array(UTM33_points))
def test_transform_bounds():
with rasterio.open("tests/data/RGB.byte.tif") as src:
l, b, r, t = src.bounds
assert np.allclose(
transform_bounds(src.crs, CRS.from_epsg(4326), l, b, r, t),
(
-78.95864996545055,
23.564991210854686,
-76.57492370013823,
25.550873767433984,
),
)
def test_transform_bounds__esri_wkt():
left, bottom, right, top = \
(-78.95864996545055, 23.564991210854686,
-76.57492370013823, 25.550873767433984)
dst_projection_string = (
'PROJCS["USA_Contiguous_Albers_Equal_Area_Conic_USGS_version",'
'GEOGCS["GCS_North_American_1983",DATUM["D_North_American_1983",'
'SPHEROID["GRS_1980",6378137.0,298.257222101]],'
'PRIMEM["Greenwich",0.0],'
'UNIT["Degree",0.0174532925199433]],'
'PROJECTION["Albers"],'
'PARAMETER["false_easting",0.0],'
'PARAMETER["false_northing",0.0],'
'PARAMETER["central_meridian",-96.0],'
'PARAMETER["standard_parallel_1",29.5],'
'PARAMETER["standard_parallel_2",45.5],'
'PARAMETER["latitude_of_origin",23.0],'
'UNIT["Meter",1.0],'
'VERTCS["NAVD_1988",'
'VDATUM["North_American_Vertical_Datum_1988"],'
'PARAMETER["Vertical_Shift",0.0],'
'PARAMETER["Direction",1.0],UNIT["Centimeter",0.01]]]')
assert np.allclose(
transform_bounds(CRS.from_epsg(4326),
dst_projection_string,
left,
bottom,
right,
top),
(
1721263.7931814701,
219684.49332178483,
2002926.56696663,
479360.16562217404),
)
def test_transform_bounds_densify():
# This transform is non-linear along the edges, so densification produces
# a different result than otherwise
src_crs = CRS.from_epsg(4326)
dst_crs = CRS.from_epsg(2163)
assert np.allclose(
transform_bounds(src_crs, dst_crs, -120, 40, -80, 64, densify_pts=0),
(-1684649.41338, -350356.81377, 1684649.41338, 2234551.18559),
)
assert np.allclose(
transform_bounds(src_crs, dst_crs, -120, 40, -80, 64, densify_pts=100),
(-1684649.41338, -555777.79210, 1684649.41338, 2234551.18559),
)
def test_transform_bounds_no_change():
"""Make sure that going from and to the same crs causes no change."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
l, b, r, t = src.bounds
assert np.allclose(transform_bounds(src.crs, src.crs, l, b, r, t), src.bounds)
def test_transform_bounds_densify_out_of_bounds():
with pytest.raises(ValueError):
transform_bounds(
CRS.from_epsg(4326),
CRS.from_epsg(32610),
-120,
40,
-80,
64,
densify_pts=-10,
)
def test_calculate_default_transform():
target_transform = Affine(
0.0028535715391804096,
0.0,
-78.95864996545055,
0.0,
-0.0028535715391804096,
25.550873767433984,
)
with rasterio.open("tests/data/RGB.byte.tif") as src:
wgs84_crs = CRS.from_epsg(4326)
dst_transform, width, height = calculate_default_transform(
src.crs, wgs84_crs, src.width, src.height, *src.bounds
)
assert dst_transform.almost_equals(target_transform)
assert width == 835
assert height == 696
def test_calculate_default_transform_single_resolution():
with rasterio.open("tests/data/RGB.byte.tif") as src:
target_resolution = 0.1
target_transform = Affine(
target_resolution,
0.0,
-78.95864996545055,
0.0,
-target_resolution,
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
resolution=target_resolution
)
assert dst_transform.almost_equals(target_transform)
assert width == 24
assert height == 20
def test_calculate_default_transform_multiple_resolutions():
with rasterio.open("tests/data/RGB.byte.tif") as src:
target_resolution = (0.2, 0.1)
target_transform = Affine(
target_resolution[0],
0.0,
-78.95864996545055,
0.0,
-target_resolution[1],
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
resolution=target_resolution
)
assert dst_transform.almost_equals(target_transform)
assert width == 12
assert height == 20
def test_calculate_default_transform_dimensions():
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_width, dst_height = (113, 103)
target_transform = Affine(
0.02108612597535966,
0.0,
-78.95864996545055,
0.0,
-0.0192823863230055,
25.550873767433984,
)
dst_transform, width, height = calculate_default_transform(
src.crs,
CRS.from_epsg(4326),
src.width,
src.height,
*src.bounds,
dst_width=dst_width,
dst_height=dst_height
)
assert dst_transform.almost_equals(target_transform)
assert width == dst_width
assert height == dst_height
def test_reproject_ndarray():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 438113
def test_reproject_view():
"""Source views are reprojected properly"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
window = windows.Window(100, 100, 500, 500)
# window = windows.get_data_window(source)
reduced_array = source[window.toslices()]
reduced_transform = windows.transform(window, src.transform)
# Assert that we're working with a view.
assert reduced_array.base is source
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
reduced_array,
out,
src_transform=reduced_transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 299199
def test_reproject_epsg():
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:3857"}
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert (out > 0).sum() == 438113
def test_reproject_out_of_bounds():
"""Using EPSG code is not appropriate for the transform.
Should return blank image.
"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:32619"}
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert not out.any()
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_nodata(options, expected):
# Older combinations of GDAL and PROJ might have got this transformation wrong.
# Results look better with GDAL 3.
nodata = 215
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=nodata,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=nodata,
)
assert (out == 1).sum() == expected
assert (out == nodata).sum() == (
params.dst_width * params.dst_height - expected
)
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_nodata_nan(options, expected):
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.float32)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=np.nan,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=np.nan,
)
assert (out == 1).sum() == expected
assert np.isnan(out).sum() == (params.dst_width * params.dst_height - expected)
@requires_gdal3
@pytest.mark.parametrize("options, expected", reproj_expected)
def test_reproject_dst_nodata_default(options, expected):
"""If nodata is not provided, destination will be filled with 0."""
with rasterio.Env(**options):
params = uninvertable_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = np.zeros((params.dst_width, params.dst_height), dtype=source.dtype)
out.fill(120) # Fill with arbitrary value
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
)
assert (out == 1).sum() == expected
assert (out == 0).sum() == (params.dst_width * params.dst_height - expected)
def test_reproject_invalid_dst_nodata():
"""dst_nodata must be in value range of data type."""
params = default_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = source.copy()
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=999999999,
)
def test_reproject_invalid_src_nodata():
"""src_nodata must be in range for data type."""
params = default_reproject_params()
source = np.ones((params.width, params.height), dtype=np.uint8)
out = source.copy()
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=999999999,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=215,
)
def test_reproject_init_nodata_tofile(tmpdir):
"""Test that nodata is being initialized."""
params = default_reproject_params()
tiffname = str(tmpdir.join("foo.tif"))
source1 = np.zeros((params.width, params.height), dtype=np.uint8)
source2 = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
kwargs = {
"count": 1,
"width": params.width,
"height": params.height,
"dtype": np.uint8,
"driver": "GTiff",
"crs": params.dst_crs,
"transform": params.dst_transform,
}
with rasterio.open(tiffname, "w", **kwargs) as dst:
reproject(
source1,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
# 200s should be overwritten by 100s
reproject(
source2,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
with rasterio.open(tiffname) as src:
assert src.read().max() == 100
def test_reproject_no_init_nodata_tofile(tmpdir):
"""Test that nodata is not being initialized."""
params = default_reproject_params()
tiffname = str(tmpdir.join("foo.tif"))
source1 = np.zeros((params.width, params.height), dtype=np.uint8)
source2 = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
kwargs = {
"count": 1,
"width": params.width,
"height": params.height,
"dtype": np.uint8,
"driver": "GTiff",
"crs": params.dst_crs,
"transform": params.dst_transform,
}
with rasterio.open(tiffname, "w", **kwargs) as dst:
reproject(
source1,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
reproject(
source2,
rasterio.band(dst, 1),
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
init_dest_nodata=False,
)
# 200s should remain along with 100s
with rasterio.open(tiffname) as src:
data = src.read()
assert data.max() == 200
def test_reproject_no_init_nodata_toarray():
"""Test that nodata is being initialized."""
params = default_reproject_params()
source1 = np.zeros((params.width, params.height))
source2 = source1.copy()
out = source1.copy()
# fill both sources w/ arbitrary values
rows, cols = source1.shape
source1[:rows // 2, :cols // 2] = 200
source2[rows // 2:, cols // 2:] = 100
reproject(
source1,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
)
assert out.max() == 200
assert out.min() == 0
reproject(
source2,
out,
src_transform=params.src_transform,
src_crs=params.src_crs,
src_nodata=0.0,
dst_transform=params.dst_transform,
dst_crs=params.dst_crs,
dst_nodata=0.0,
init_dest_nodata=False,
)
# 200s should NOT be overwritten by 100s
assert out.max() == 200
assert out.min() == 0
def test_reproject_multi():
"""Ndarry to ndarray."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read()
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
destin = np.empty(source.shape, dtype=np.uint8)
reproject(
source,
destin,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
assert destin.any()
def test_warp_from_file():
"""File to ndarray."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
destin = np.empty(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 1), destin, dst_transform=DST_TRANSFORM, dst_crs=dst_crs
)
assert destin.any()
def test_warp_from_to_file(tmpdir):
"""File to file."""
tiffname = str(tmpdir.join("foo.tif"))
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
kwargs = src.meta.copy()
kwargs.update(transform=DST_TRANSFORM, crs=dst_crs)
with rasterio.open(tiffname, "w", **kwargs) as dst:
for i in (1, 2, 3):
reproject(rasterio.band(src, i), rasterio.band(dst, i))
def test_warp_from_to_file_multi(tmpdir):
"""File to file."""
tiffname = str(tmpdir.join("foo.tif"))
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = dict(
proj="merc",
a=6378137,
b=6378137,
lat_ts=0.0,
lon_0=0.0,
x_0=0.0,
y_0=0,
k=1.0,
units="m",
nadgrids="@null",
wktext=True,
no_defs=True,
)
kwargs = src.meta.copy()
kwargs.update(transform=DST_TRANSFORM, crs=dst_crs)
with rasterio.open(tiffname, "w", **kwargs) as dst:
for i in (1, 2, 3):
reproject(rasterio.band(src, i), rasterio.band(dst, i), num_threads=2)
@pytest.fixture(scope="function")
def polygon_3373():
"""An EPSG:3373 polygon."""
return {
"type": "Polygon",
"coordinates": (
(
(798842.3090855901, 6569056.500655151),
(756688.2826828464, 6412397.888771972),
(755571.0617232556, 6408461.009397383),
(677605.2284582685, 6425600.39266733),
(677605.2284582683, 6425600.392667332),
(670873.3791649605, 6427248.603432341),
(664882.1106069803, 6407585.48425362),
(663675.8662823177, 6403676.990080649),
(485120.71963574126, 6449787.167760638),
(485065.55660851026, 6449802.826920689),
(485957.03982722526, 6452708.625101285),
(487541.24541826674, 6457883.292107048),
(531008.5797472061, 6605816.560367976),
(530943.7197027118, 6605834.9333479265),
(531888.5010308184, 6608940.750411527),
(533299.5981959199, 6613962.642851984),
(533403.6388841148, 6613933.172096095),
(576345.6064638699, 6761983.708069147),
(577649.6721159086, 6766698.137844516),
(578600.3589008929, 6770143.99782289),
(578679.4732294685, 6770121.638265098),
(655836.640492081, 6749376.357102599),
(659913.0791150068, 6764770.1314677475),
(661105.8478791204, 6769515.168134831),
(661929.4670843681, 6772800.8565198565),
(661929.4670843673, 6772800.856519875),
(661975.1582566603, 6772983.354777632),
(662054.7979028501, 6772962.86384242),
(841909.6014891531, 6731793.200435557),
(840726.455490463, 6727039.8672589315),
(798842.3090855901, 6569056.500655151),
),
),
}
def test_transform_geom_polygon_cutting(polygon_3373):
geom = polygon_3373
result = transform_geom("EPSG:3373", "EPSG:4326", geom, antimeridian_cutting=True)
assert result["type"] == "MultiPolygon"
assert len(result["coordinates"]) == 2
def test_transform_geom_polygon_offset(polygon_3373):
geom = polygon_3373
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, antimeridian_cutting=True, antimeridian_offset=0
)
assert result["type"] == "MultiPolygon"
assert len(result["coordinates"]) == 2
def test_transform_geom_polygon_precision(polygon_3373):
geom = polygon_3373
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision_iso(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom("EPSG:3373", "EPSG:3373", geom, precision=1)
assert int(result["coordinates"][0][0] * 10) == 7988423
def test_transform_geom_linearring_precision(polygon_3373):
ring = polygon_3373["coordinates"][0]
geom = {"type": "LinearRing", "coordinates": ring}
result = transform_geom(
"EPSG:3373", "EPSG:4326", geom, precision=1, antimeridian_cutting=True
)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
def test_transform_geom_linestring_precision_z(polygon_3373):
ring = polygon_3373["coordinates"][0]
x, y = zip(*ring)
ring = list(zip(x, y, [0.0 for i in range(len(x))]))
geom = {"type": "LineString", "coordinates": ring}
result = transform_geom("EPSG:3373", "EPSG:3373", geom, precision=1)
assert int(result["coordinates"][0][0] * 10) == 7988423
assert int(result["coordinates"][0][2] * 10) == 0
def test_transform_geom_multipolygon(polygon_3373):
geom = {"type": "MultiPolygon", "coordinates": [polygon_3373["coordinates"]]}
result = transform_geom("EPSG:3373", "EPSG:4326", geom, precision=1)
assert all(round(x, 1) == x for x in flatten_coords(result["coordinates"]))
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_reproject_resampling(path_rgb_byte_tif, method):
# Expected count of nonzero pixels for each resampling method, based
# on running rasterio with each of the following configurations
expected = {
Resampling.nearest: 438113,
Resampling.bilinear: 439280,
Resampling.cubic: 437888,
Resampling.cubic_spline: 440475,
Resampling.lanczos: 436001,
Resampling.average: 439419,
Resampling.mode: 437298,
Resampling.max: 439464,
Resampling.min: 436397,
Resampling.med: 437194,
Resampling.q1: 436397,
Resampling.q3: 438948,
}
with rasterio.open(path_rgb_byte_tif) as src:
source = src.read(1)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs={"init": "epsg:3857"},
resampling=method,
)
assert np.count_nonzero(out) == expected[method]
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_reproject_resampling_alpha(method):
"""Reprojection of a source with alpha band succeeds"""
# Expected count of nonzero pixels for each resampling method, based
# on running rasterio with each of the following configurations
expected = {
Resampling.nearest: 438113,
Resampling.bilinear: 439280,
Resampling.cubic: 437888,
Resampling.cubic_spline: 440475,
Resampling.lanczos: 436001,
Resampling.average: 439419,
Resampling.mode: 437298,
Resampling.max: 439464,
Resampling.min: 436397,
Resampling.med: 437194,
Resampling.q1: 436397,
Resampling.q3: 438948,
}
with rasterio.open("tests/data/RGBA.byte.tif") as src:
source = src.read(1)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs={"init": "epsg:3857"},
resampling=method,
)
assert np.count_nonzero(out) == expected[method]
@pytest.mark.skipif(
gdal_version.at_least("2.0"), reason="Tests only applicable to GDAL < 2.0"
)
@pytest.mark.parametrize("method", GDAL2_RESAMPLING)
def test_reproject_not_yet_supported_resampling(method):
"""Test resampling methods not yet supported by this version of GDAL"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:32619"}
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(GDALVersionError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=method,
)
def test_reproject_unsupported_resampling():
"""Values not in enums. Resampling are not supported."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:32619"}
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=99,
)
def test_reproject_unsupported_resampling_guass():
"""Resampling.gauss is unsupported."""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:32619"}
out = np.empty(src.shape, dtype=np.uint8)
with pytest.raises(ValueError):
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.gauss,
)
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_resample_default_invert_proj(method):
"""Nearest and bilinear should produce valid results
with the default Env
"""
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile.copy()
dst_crs = {"init": "epsg:32619"}
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method,
)
assert out.mean() > 0
def test_target_aligned_pixels():
"""Issue 853 has been resolved"""
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile.copy()
dst_crs = {"init": "epsg:3857"}
with rasterio.Env(CHECK_WITH_INVERT_PROJ=False):
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
dst_affine, dst_width, dst_height = aligned_target(
dst_affine, dst_width, dst_height, 100000.0
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
# Check that there is no black borders
assert out[:, 0].all()
assert out[:, -1].all()
assert out[0, :].all()
assert out[-1, :].all()
@pytest.mark.parametrize("method", SUPPORTED_RESAMPLING)
def test_resample_no_invert_proj(method):
"""Nearest and bilinear should produce valid results with
CHECK_WITH_INVERT_PROJ = False
"""
if method in (
Resampling.bilinear,
Resampling.cubic,
Resampling.cubic_spline,
Resampling.lanczos,
):
pytest.xfail(
reason="Some resampling methods succeed but produce blank images. "
"See https://github.com/mapbox/rasterio/issues/614"
)
with rasterio.Env(CHECK_WITH_INVERT_PROJ=False):
with rasterio.open("tests/data/world.rgb.tif") as src:
source = src.read(1)
profile = src.profile.copy()
dst_crs = {"init": "epsg:32619"}
# Calculate the ideal dimensions and transformation in the new crs
dst_affine, dst_width, dst_height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
profile["height"] = dst_height
profile["width"] = dst_width
out = np.empty(shape=(dst_height, dst_width), dtype=np.uint8)
# see #614, some resampling methods succeed but produce blank images
out = np.empty(src.shape, dtype=np.uint8)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method,
)
assert out.mean() > 0
def test_reproject_crs_none():
"""Reproject with crs is None should not cause segfault"""
src = np.random.random(25).reshape((1, 5, 5))
srcaff = Affine(1.1, 0.0, 0.0, 0.0, 1.1, 0.0)
srccrs = None
dst = np.empty(shape=(1, 11, 11))
dstaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
dstcrs = None
with pytest.raises(ValueError):
reproject(
src,
dst,
src_transform=srcaff,
src_crs=srccrs,
dst_transform=dstaff,
dst_crs=dstcrs,
resampling=Resampling.nearest,
)
def test_reproject_identity_src():
"""Reproject with an identity like source matrices."""
src = np.random.random(25).reshape((1, 5, 5))
dst = np.empty(shape=(1, 10, 10))
dstaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
crs = {"init": "epsg:3857"}
src_affines = [
Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), # Identity both positive
Affine(1.0, 0.0, 0.0, 0.0, -1.0, 0.0), # Identity with negative e
]
for srcaff in src_affines:
# reproject expected to not raise any error in any of the srcaff
reproject(
src,
dst,
src_transform=srcaff,
src_crs=crs,
dst_transform=dstaff,
dst_crs=crs,
resampling=Resampling.nearest,
)
def test_reproject_identity_dst():
"""Reproject with an identity like destination matrices."""
src = np.random.random(100).reshape((1, 10, 10))
srcaff = Affine(0.5, 0.0, 0.0, 0.0, 0.5, 0.0)
dst = np.empty(shape=(1, 5, 5))
crs = {"init": "epsg:3857"}
dst_affines = [
Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), # Identity both positive
Affine(1.0, 0.0, 0.0, 0.0, -1.0, 0.0), # Identity with negative e
]
for dstaff in dst_affines:
# reproject expected to not raise any error in any of the dstaff
reproject(
src,
dst,
src_transform=srcaff,
src_crs=crs,
dst_transform=dstaff,
dst_crs=crs,
resampling=Resampling.nearest,
)
@pytest.fixture(scope="function")
def rgb_byte_profile():
with rasterio.open("tests/data/RGB.byte.tif") as src:
return src.profile
def test_reproject_gcps_transform_exclusivity():
"""gcps and transform can't be used together."""
with pytest.raises(ValueError):
reproject(1, 1, gcps=[0], src_transform=[0])
def test_reproject_gcps(rgb_byte_profile):
"""Reproject using ground control points for the source"""
source = np.ones((3, 800, 800), dtype=np.uint8) * 255
out = np.zeros(
(3, rgb_byte_profile["height"], rgb_byte_profile["height"]), dtype=np.uint8
)
src_gcps = [
GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),
GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),
GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),
GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),
]
reproject(
source,
out,
src_crs="epsg:32618",
gcps=src_gcps,
dst_transform=rgb_byte_profile["transform"],
dst_crs=rgb_byte_profile["crs"],
resampling=Resampling.nearest,
)
assert not out.all()
assert not out[:, 0, 0].any()
assert not out[:, 0, -1].any()
assert not out[:, -1, -1].any()
assert not out[:, -1, 0].any()
@requires_gdal22(
reason="GDAL 2.2.0 and newer has different antimeridian cutting behavior."
)
def test_transform_geom_gdal22():
"""Enabling `antimeridian_cutting` has no effect on GDAL 2.2.0 or newer
where antimeridian cutting is always enabled. This could produce
unexpected geometries, so an exception is raised.
"""
geom = {"type": "Point", "coordinates": [0, 0]}
with pytest.raises(GDALVersionError):
transform_geom("EPSG:4326", "EPSG:3857", geom, antimeridian_cutting=False)
def test_issue1056():
"""Warp sucessfully from RGB's upper bands to an array"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = {"init": "EPSG:3857"}
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 2),
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
)
def test_reproject_dst_nodata():
"""Affirm resolution of issue #1395"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
source = src.read(1)
dst_crs = {"init": "epsg:3857"}
out = np.empty(src.shape, dtype=np.float32)
reproject(
source,
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
src_nodata=0,
dst_nodata=np.nan,
resampling=Resampling.nearest,
)
assert (out[~np.isnan(out)] > 0.0).sum() == 438113
assert out[0, 0] != 0
assert np.isnan(out[0, 0])
def test_issue1401():
"""The warp_mem_limit keyword argument is in effect"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = {"init": "epsg:3857"}
out = np.zeros(src.shape, dtype=np.uint8)
reproject(
rasterio.band(src, 2),
out,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
resampling=Resampling.nearest,
warp_mem_limit=4000,
)
def test_reproject_dst_alpha(path_rgb_msk_byte_tif):
"""Materialization of external mask succeeds"""
with rasterio.open(path_rgb_msk_byte_tif) as src:
nrows, ncols = src.shape
dst_arr = np.zeros((src.count + 1, nrows, ncols), dtype=np.uint8)
reproject(
rasterio.band(src, src.indexes),
dst_arr,
src_transform=src.transform,
src_crs=src.crs,
dst_transform=DST_TRANSFORM,
dst_crs={"init": "epsg:3857"},
dst_alpha=4,
)
assert dst_arr[3].any()
@pytest.mark.xfail(
rasterio.__gdal_version__ in ["2.2.0", "2.2.1", "2.2.2", "2.2.3"],
reason=(
"GDAL had regression in 2.2.X series, fixed in 2.2.4,"
" reproject used dst index instead of src index when destination was single band"
),
)
def test_issue1350():
"""Warp bands other than 1 or All"""
with rasterio.open("tests/data/RGB.byte.tif") as src:
dst_crs = {"init": "epsg:3857"}
reprojected = []
for dtype, idx in zip(src.dtypes, src.indexes):
out = np.zeros((1,) + src.shape, dtype=dtype)
reproject(
rasterio.band(src, idx),
out,
resampling=Resampling.nearest,
dst_transform=DST_TRANSFORM,
dst_crs=dst_crs,
)
reprojected.append(out)
for i in range(1, len(reprojected)):
assert not (reprojected[0] == reprojected[i]).all()
def test_issue_1446():
"""Confirm resolution of #1446"""
g = transform_geom(
CRS.from_epsg(4326),
CRS.from_epsg(32610),
{"type": "Point", "coordinates": (-122.51403808499907, 38.06106733107932)},
)
assert round(g["coordinates"][0], 1) == 542630.9
assert round(g["coordinates"][1], 1) == 4212702.1
@requires_gdal_lt_3
def test_issue_1446_b():
"""Confirm that lines aren't thrown as reported in #1446"""
src_crs = CRS.from_epsg(4326)
dst_crs = CRS(
{
"proj": "sinu",
"lon_0": 350.85607029556,
"x_0": 0,
"y_0": 0,
"a": 3396190,
"b": 3396190,
"units": "m",
"no_defs": True,
}
)
collection = json.load(open("tests/data/issue1446.geojson"))
geoms = {f["properties"]["fid"]: f["geometry"] for f in collection["features"]}
transformed_geoms = {
k: transform_geom(src_crs, dst_crs, g) for k, g in geoms.items()
}
# Before the fix, this geometry was thrown eastward of 0.0. It should be between -350 and -250.
assert all([-350 < x < -150 for x, y in transformed_geoms[183519]["coordinates"]])
def test_issue_1076():
"""Confirm fix of #1076"""
arr = (np.random.random((20, 30)) * 100).astype('int32')
fill_value = 42
newarr = np.full((200, 300), fill_value=fill_value, dtype='int32')
src_crs = CRS.from_epsg(32632)
src_transform = Affine(600.0, 0.0, 399960.0, 0.0, -600.0, 6100020.0)
dst_transform = Affine(60.0, 0.0, 399960.0, 0.0, -60.0, 6100020.0)
reproject(arr, newarr,
src_transform=src_transform,
dst_transform=dst_transform,
src_crs=src_crs,
dst_crs=src_crs,
resample=Resampling.nearest)
assert not (newarr == fill_value).all()
def test_reproject_init_dest_nodata():
"""No pixels should transfer over"""
crs = CRS.from_epsg(4326)
transform = Affine.identity()
source = np.zeros((1, 100, 100))
destination = np.ones((1, 100, 100))
reproject(
source, destination, src_crs=crs, src_transform=transform,
dst_crs=crs, dst_transform=transform,
src_nodata=0, init_dest_nodata=False
)
assert destination.all()
| 29.80462 | 99 | 0.599725 |
acee7aa27d099663ec21d2ace6d25f349e75478c | 9,068 | py | Python | wrapanapi/entities/base.py | ManageIQ/mgmtsystem | 1a0ee5b99ef3770e119c6264f4e452640c4275bf | [
"MIT"
] | 13 | 2016-09-13T07:30:02.000Z | 2019-05-22T09:14:27.000Z | wrapanapi/entities/base.py | ManageIQ/mgmtsystem | 1a0ee5b99ef3770e119c6264f4e452640c4275bf | [
"MIT"
] | 228 | 2016-06-15T10:23:38.000Z | 2020-01-13T13:49:31.000Z | wrapanapi/entities/base.py | ManageIQ/mgmtsystem | 1a0ee5b99ef3770e119c6264f4e452640c4275bf | [
"MIT"
] | 61 | 2016-07-21T15:59:52.000Z | 2019-09-23T11:03:41.000Z | """
wrapanapi.entities.base
Provides method/class definitions for handling any entity on a provider
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from reprlib import aRepr
from wrapanapi.utils import LoggerMixin
from wrapanapi.exceptions import NotFoundError
class Entity(LoggerMixin, metaclass=ABCMeta):
"""
Base class to represent any object on a provider system as well
as methods for manipulating that entity (deleting, renaming, etc.)
Provides properties/methods that should be applicable
across all entities on all systems.
"""
def __init__(self, system, raw=None, **kwargs):
"""
Constructor for an entity
An entity is always tied to a specific system
Args:
system -- the implementation of wrapanapi.systems.System this entity "resides on"
raw -- the raw representation of this entity, if already known. This can be an instance
object as returned by the underlying API/library we use to communicate with
'system', or it may simply be a dict of JSON data.
kwargs -- kwargs that are required to uniquely identify this entity
An entity can be instantiated in two ways:
1) passing in the 'raw' data
2) passing in the 'minimal params' (via the kwargs) needed to be able to get the
correct 'raw' data from the API
Sometimes kwargs may be required even with method #1 if the 'raw' data which represents
this entity doesn't provide all the info necessary to look it up (for example, the
'azure.storage.models.Blob' class does not contain info on 'container', which is needed
to look up the blob)
'kwargs' should be the smallest set of args we can use to pull the right info for this
entity from the system using self.refresh(). These 'unique kwargs' correlate to the
self._identifying_attrs property below. For many systems, this may be just a uuid, or in
cases of systems on which names cannot be duplicated, this may be just the 'name' itself.
'raw' may optionally be passed in at instantiation in cases where we already have obtained
the raw data for an entity. If this is the case, instance variables will need to be set for
the 'unique kwargs' based on the given raw data and 'unique kwargs' are not required.
Whether an instance is created using 'raw', or created using 'kwargs', if it is the same
entity, the self._identifying_attrs property MUST be equal.
"""
self.system = system
self._raw = raw
self._kwargs = kwargs
@abstractproperty
def _identifying_attrs(self):
"""
Return the list of attributes that make this instance uniquely identifiable without
needing to query the API for updated data. This should be a dict of kwarg_name, kwarg_value
for the **kwargs that self.__init__() requires.
"""
@property
def _log_id(self):
"""
Return an str which identifies this VM quickly in logs. Uses _identifying_attrs so that
API doesn't need to be queried repeatedly.
"""
string = ""
for key, val in self._identifying_attrs.items():
string = "{}{}={} ".format(string, key, val)
return "<{}>".format(string.strip())
def __eq__(self, other):
"""
Define a method for asserting if this instance is equal to another instances of
the same type.
This should validate that the system and 'unique identifiers' are equal.
The unique identifying attributes that are passed in at init such as uuid or name
are used to assert equality, not raw data, since certain params of the raw data
are subject to change.
"""
if not isinstance(other, self.__class__):
return False
try:
return (self.system == other.system and
self._identifying_attrs == other._identifying_attrs)
except AttributeError:
return False
def __repr__(self):
"""Represent object.
Example:
<wrapanapi.systems.msazure.AzureInstance system=<AzureSystem>
raw=<azure.mgmt.compute.v2017_03_30.models.virtual_machine.VirtualMachine>,
kwargs['name']=u'ansinha_test', kwargs['resource_group']=u'Automation'
>
"""
# Show object type for system and raw
params_repr = (
"system=<{sys_obj_cls}> raw=<{raw_obj_mod}.{raw_obj_cls}>"
.format(
sys_obj_cls=self.system.__class__.__name__,
raw_obj_mod=self._raw.__class__.__module__,
raw_obj_cls=self._raw.__class__.__name__
)
)
# Show kwarg key/value for each unique kwarg
a_repr = aRepr
a_repr.maxstring = 100
a_repr.maxother = 100
for key, val in self._identifying_attrs.items():
params_repr = (
"{existing_params_repr}, kwargs['{kwarg_key}']={kwarg_val}"
.format(
existing_params_repr=params_repr,
kwarg_key=key,
kwarg_val=a_repr.repr(val),
)
)
return "<{mod_name}.{class_name} {params_repr}>".format(
mod_name=self.__class__.__module__,
class_name=self.__class__.__name__,
params_repr=params_repr,
)
def __str__(self):
try:
return self.name
except Exception:
return self.uuid
@abstractproperty
def name(self):
"""
Returns name from most recent raw data.
If you need the most up-to-date name, you must call self.refresh() before accessing
this property.
"""
@abstractproperty
def uuid(self):
"""
Returns uuid from most recent raw data.
If you need the most up-to-date uuid, you must call self.refresh() before accessing
this property.
If the system has no concept of a 'uuid' then some other string value can be used here
that guarantees uniqueness. This should not return 'None'
"""
@classmethod
def get_all_subclasses(cls):
"""
Return all subclasses that inherit from this class
"""
for subclass in cls.__subclasses__():
for nested_subclass in subclass.get_all_subclasses():
yield nested_subclass
yield subclass
@abstractmethod
def refresh(self):
"""
Re-pull the data for this entity using the system's API and update
this instance's attributes.
This method should be called any time the most up-to-date info needs to be
returned
This method should re-set self.raw with fresh data for this entity
Returns:
New value of self.raw
Raises:
NotFoundError if this entity is not found on the system
"""
@abstractmethod
def delete(self):
"""
Removes the entity on the provider
"""
@abstractmethod
def cleanup(self):
"""
Removes the entity on the provider and any of its associated resources
This should be more than a simple delete, though if that takes care of
the job and cleans up everything, simply calling "self.delete()" works
"""
def rename(self):
"""
Rename entity.
May not be implemented for all entities.
This should update self.raw (via self.refresh() or other) to ensure that
the self.name property is correct after a successful rename.
"""
raise NotImplementedError
@property
def exists(self):
"""
Checks if this entity exists on the system
Catches NotFoundError to return False if the entity does not exist
"""
try:
self.refresh()
except NotFoundError:
return False
return True
@property
def raw(self):
"""
Returns the raw data returned by this system's underlying API/library
Can be an object instance, or a dict
"""
if not self._raw:
self.refresh()
return self._raw
@raw.setter
def raw(self, value):
"""
Sets the raw data
"""
self._raw = value
class EntityMixin(object):
"""
Usually an Entity also provides a mixin which defines methods/properties that should
be defined by a wrapanapi.systems.System that manages that type of entity
For example, for a 'Vm' entity, example abstract methods would be:
get_vm, list_vm, find_vm, create_vm
These methods should return instances (or a list of instances) which describe the entity
However, methods for operating on a retrieved entity should be defined in the Entity class
"""
# There may be some common methods/properties that apply at the base level in future...
pass
| 34.348485 | 99 | 0.629135 |
acee7b59c97996c78ba409b2c25faff1ee383f50 | 593 | py | Python | src/back/kite/lockfile.py | khamidou/kite | c049faf8522c8346c22c70f2a35a35db6b4a155d | [
"BSD-3-Clause"
] | 136 | 2015-01-06T01:14:35.000Z | 2022-01-20T17:04:52.000Z | src/back/kite/lockfile.py | khamidou/kite | c049faf8522c8346c22c70f2a35a35db6b4a155d | [
"BSD-3-Clause"
] | 3 | 2016-01-14T21:37:10.000Z | 2019-04-17T02:44:08.000Z | src/back/kite/lockfile.py | khamidou/kite | c049faf8522c8346c22c70f2a35a35db6b4a155d | [
"BSD-3-Clause"
] | 38 | 2015-02-28T14:12:26.000Z | 2021-01-17T21:01:02.000Z | from fcntl import flock, LOCK_EX, LOCK_UN
class LockingException(Exception):
pass
class FileLock(object):
def __init__(self, name, mode="rw+"):
self.name = name
self.fd = open(name, mode)
def acquire(self):
flock(self.fd.fileno(), LOCK_EX)
def release(self):
flock(self.fd.fileno(), LOCK_UN)
self.fd.close()
self.fd = None
def __enter__(self):
self.acquire() # flock is blocking, so there's no need to handle a timeout
return self
def __exit__(self, type, value, traceback):
self.release()
| 23.72 | 82 | 0.615514 |
acee7c359a811a22d03519138db7f9ae7550d953 | 88 | py | Python | output/models/ms_data/regex/re_i13_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_i13_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_i13_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.regex.re_i13_xsd.re_i13 import Doc
__all__ = [
"Doc",
]
| 14.666667 | 61 | 0.715909 |
acee7e66579d30b59968b22d63e87595d430ba49 | 930 | py | Python | fn_aws_utilities/fn_aws_utilities/util/aws_config.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2020-08-25T03:43:07.000Z | 2020-08-25T03:43:07.000Z | fn_aws_utilities/fn_aws_utilities/util/aws_config.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2019-07-08T16:57:48.000Z | 2019-07-08T16:57:48.000Z | fn_aws_utilities/fn_aws_utilities/util/aws_config.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
class AWSConfig:
def __init__(self, opts):
self.my_aws_secret_access_key = opts.get("aws_secret_access_key")
self.my_aws_access_key_id = opts.get("aws_access_key_id")
self.aws_region_name = opts.get("aws_region_name")
self.aws_sms_topic_name = opts.get("aws_sms_topic_name")
if self.aws_region_name is None:
raise Exception("aws_region_name undefined in app.config")
if self.my_aws_access_key_id is None:
raise Exception("aws_access_key_id undefined in app.config")
if self.my_aws_secret_access_key is None:
raise Exception("aws_secret_access_key undefined in app.config")
if self.aws_sms_topic_name is None:
raise Exception("aws_sms_topic_name undefined in app.config") | 38.75 | 76 | 0.7 |
acee7f26d35dd90dbb4fb70503cd283c9c94d9d5 | 89 | py | Python | noa_class.py | noah18-meet/meet201617YL1cs-chat | e8e445971d79d798efe940bab4c60538f5cfb572 | [
"MIT"
] | null | null | null | noa_class.py | noah18-meet/meet201617YL1cs-chat | e8e445971d79d798efe940bab4c60538f5cfb572 | [
"MIT"
] | null | null | null | noa_class.py | noah18-meet/meet201617YL1cs-chat | e8e445971d79d798efe940bab4c60538f5cfb572 | [
"MIT"
] | null | null | null | class Cat ():
def make_sound(self):
print ("miau")
c = Cat()
c.make_sound()
| 12.714286 | 25 | 0.550562 |
acee7f9bd761a3690a122233b0eb89d1ba733a05 | 2,597 | py | Python | examples/dfp/v201505/exchange_rate_service/update_exchange_rates.py | coxmediagroup/googleads-python-lib | f85d5d8ab771e93b03b616ef65e2d3082aeef484 | [
"Apache-2.0"
] | 1 | 2015-08-12T14:47:40.000Z | 2015-08-12T14:47:40.000Z | examples/dfp/v201505/exchange_rate_service/update_exchange_rates.py | coxmediagroup/googleads-python-lib | f85d5d8ab771e93b03b616ef65e2d3082aeef484 | [
"Apache-2.0"
] | 1 | 2020-07-24T15:10:10.000Z | 2020-07-24T15:10:10.000Z | examples/dfp/v201505/exchange_rate_service/update_exchange_rates.py | coxmediagroup/googleads-python-lib | f85d5d8ab771e93b03b616ef65e2d3082aeef484 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the value of an exchange rate.
To create exchange rates, run create_exchange_rates.py.
Tags: ExchangeRateService.getExchangeRatesByStatement
"""
__author__ = 'Nicholas Chen'
# Import appropriate modules from the client library.
from googleads import dfp
EXCHANGE_RATE_ID = 'INSERT_EXCHANGE_RATE_ID_HERE'
def main(client, exchange_rate_id):
# Initialize appropriate service.
exchange_rate_service = client.GetService('ExchangeRateService',
version='v201505')
# Create a statement to get an exchange rate by its ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': exchange_rate_id
}
}]
query = 'WHERE id = :id'
# Create a filter statement.
statement = dfp.FilterStatement(query, values, 1)
# Get rate cards by statement.
response = exchange_rate_service.getExchangeRatesByStatement(
statement.ToStatement())
if 'results' in response:
exchange_rate = response['results'][0]
# Update the exchange rate value to 1.5.
exchange_rate['exchangeRate'] = long(15000000000)
exchange_rates = exchange_rate_service.updateExchangeRates([exchange_rate])
if exchange_rates:
for exchange_rate in exchange_rates:
print ('Exchange rate with id \'%s,\' currency code \'%s,\' '
'direction \'%s,\' and exchange rate \'%.2f\' '
'was updated.' % (exchange_rate['id'],
exchange_rate['currencyCode'],
exchange_rate['direction'],
(float(exchange_rate['exchangeRate']) /
10000000000)))
else:
print 'No exchange rates were updated.'
else:
print 'No exchange rates found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, EXCHANGE_RATE_ID)
| 32.4625 | 79 | 0.664613 |
acee80babd5b34483bc4b89996c0bd2b7345c303 | 411 | py | Python | pyp/wrapper.py | liying2008/PYP_Manager | fd006912e071a97c5682c402b16d42c15844fe95 | [
"MIT"
] | null | null | null | pyp/wrapper.py | liying2008/PYP_Manager | fd006912e071a97c5682c402b16d42c15844fe95 | [
"MIT"
] | null | null | null | pyp/wrapper.py | liying2008/PYP_Manager | fd006912e071a97c5682c402b16d42c15844fe95 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'liying'
import time
from functools import wraps
def fn_timer(func):
@wraps(func)
def function_timer(*args, **kwargs):
t0 = time.time()
result = func(*args, **kwargs)
t1 = time.time()
print ("[ time running -> %s: %s seconds ]" % (func.func_name, str(t1 - t0)))
return result
return function_timer
| 19.571429 | 85 | 0.583942 |
acee817c4f7c0468d47aa644b0d9c53ac941d015 | 7,507 | py | Python | kubernetes/client/models/v1_flex_persistent_volume_source.py | carloscastrojumo/python | f461dd42d48650a4ae1b41d630875cad9fcb68ad | [
"Apache-2.0"
] | 2 | 2021-03-09T12:42:05.000Z | 2021-03-09T13:27:50.000Z | kubernetes/client/models/v1_flex_persistent_volume_source.py | carloscastrojumo/python | f461dd42d48650a4ae1b41d630875cad9fcb68ad | [
"Apache-2.0"
] | 7 | 2021-04-13T03:04:42.000Z | 2022-03-02T03:10:18.000Z | kubernetes/client/models/v1_flex_persistent_volume_source.py | carloscastrojumo/python | f461dd42d48650a4ae1b41d630875cad9fcb68ad | [
"Apache-2.0"
] | 1 | 2021-06-13T09:21:37.000Z | 2021-06-13T09:21:37.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.17
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1FlexPersistentVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'driver': 'str',
'fs_type': 'str',
'options': 'dict(str, str)',
'read_only': 'bool',
'secret_ref': 'V1SecretReference'
}
attribute_map = {
'driver': 'driver',
'fs_type': 'fsType',
'options': 'options',
'read_only': 'readOnly',
'secret_ref': 'secretRef'
}
def __init__(self, driver=None, fs_type=None, options=None, read_only=None, secret_ref=None, local_vars_configuration=None): # noqa: E501
"""V1FlexPersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._driver = None
self._fs_type = None
self._options = None
self._read_only = None
self._secret_ref = None
self.discriminator = None
self.driver = driver
if fs_type is not None:
self.fs_type = fs_type
if options is not None:
self.options = options
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
@property
def driver(self):
"""Gets the driver of this V1FlexPersistentVolumeSource. # noqa: E501
Driver is the name of the driver to use for this volume. # noqa: E501
:return: The driver of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1FlexPersistentVolumeSource.
Driver is the name of the driver to use for this volume. # noqa: E501
:param driver: The driver of this V1FlexPersistentVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def fs_type(self):
"""Gets the fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
:return: The fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1FlexPersistentVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script. # noqa: E501
:param fs_type: The fs_type of this V1FlexPersistentVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def options(self):
"""Gets the options of this V1FlexPersistentVolumeSource. # noqa: E501
Optional: Extra command options if any. # noqa: E501
:return: The options of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: dict(str, str)
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this V1FlexPersistentVolumeSource.
Optional: Extra command options if any. # noqa: E501
:param options: The options of this V1FlexPersistentVolumeSource. # noqa: E501
:type: dict(str, str)
"""
self._options = options
@property
def read_only(self):
"""Gets the read_only of this V1FlexPersistentVolumeSource. # noqa: E501
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:return: The read_only of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1FlexPersistentVolumeSource.
Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:param read_only: The read_only of this V1FlexPersistentVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
:return: The secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
:rtype: V1SecretReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1FlexPersistentVolumeSource.
:param secret_ref: The secret_ref of this V1FlexPersistentVolumeSource. # noqa: E501
:type: V1SecretReference
"""
self._secret_ref = secret_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1FlexPersistentVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1FlexPersistentVolumeSource):
return True
return self.to_dict() != other.to_dict()
| 32.081197 | 199 | 0.617957 |
acee81bafa8abbaed058895281da2d3cade257b6 | 1,264 | py | Python | audio/hmm/audio_hmm_single.py | scottshepard/MScA-Robotics-Capstone | 29762ef87274fcd4d86a69918edc44f2a9f99ed5 | [
"MIT"
] | 3 | 2019-11-16T20:38:10.000Z | 2020-04-11T01:24:36.000Z | audio/hmm/audio_hmm_single.py | scottshepard/MScA-Robotics-Capstone | 29762ef87274fcd4d86a69918edc44f2a9f99ed5 | [
"MIT"
] | 1 | 2019-12-05T01:57:28.000Z | 2019-12-05T01:57:28.000Z | audio/hmm/audio_hmm_single.py | MScA-Robotics/capstone-project-3 | 29762ef87274fcd4d86a69918edc44f2a9f99ed5 | [
"MIT"
] | 2 | 2020-05-17T19:56:12.000Z | 2020-06-23T02:09:30.000Z | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
import os
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GroupKFold, GridSearchCV
import pickle
from UrbanHMM import UrbanHMMClassifier
import multiprocessing
print(multiprocessing.cpu_count())
modelspath = 'models'
if not os.path.exists(modelspath):
os.makedirs(modelspath)
fulldatasetpath = '../downsampled/'
metadata = pd.read_csv('../UrbanSound8K.csv')
le = LabelEncoder()
le.fit(metadata['class'])
class_mapping = dict(zip(le.classes_, le.transform(le.classes_)))
Nstates = 5
Ncoef = 20
urban_hmm = UrbanHMMClassifier(class_map = class_mapping, num_states = Nstates, num_cep_coef = Ncoef)
urban_hmm.fit(X = list(fulldatasetpath + metadata[metadata['fold'] != 10]['slice_file_name'].astype(str)),
y = le.transform(metadata[metadata['fold'] != 10]['class']))
scored = urban_hmm.score(X = list(fulldatasetpath + metadata[metadata['fold'] == 10]['slice_file_name'].astype(str)),
y = le.transform(metadata[metadata['fold'] == 10]['class']))
print("Fold 10 Score")
print(scored)
fname = "./models/hmm_fold10test_s{}_c{}.pkl".format(Nstates, Ncoef)
pickle.dump(grid_search.best_estimator_, open(name, "wb"))
| 27.478261 | 118 | 0.720728 |
acee837b0c5f13a6aa51003aad4e4ecf0ca45289 | 77,353 | py | Python | tastypie/resources.py | bisio/django-tastypie | 5591d043269cff8f1d12e68780ecd1c5b12bd7c7 | [
"BSD-3-Clause"
] | null | null | null | tastypie/resources.py | bisio/django-tastypie | 5591d043269cff8f1d12e68780ecd1c5b12bd7c7 | [
"BSD-3-Clause"
] | null | null | null | tastypie/resources.py | bisio/django-tastypie | 5591d043269cff8f1d12e68780ecd1c5b12bd7c7 | [
"BSD-3-Clause"
] | null | null | null | import logging
import warnings
import django
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS, LOOKUP_SEP
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.cache import patch_cache_control
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch', 'head']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch', 'head'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
if request.is_ajax():
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
return http.HttpBadRequest(e.args[0])
except ValidationError, e:
return http.HttpBadRequest(', '.join(e.messages))
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
if isinstance(exception, (NotFound, ObjectDoesNotExist)):
response_class = HttpResponseNotFound
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
if not isinstance(exception, (NotFound, ObjectDoesNotExist)):
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=sys.exc_info(), extra={'status_code': 500, 'request':request})
if django.VERSION < (1, 3, 0) and getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
# Due to the way Django parses URLs, ``get_multiple`` won't work without
# a trailing slash.
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<pk_list>\w[\w/;-]*)/$" % self._meta.resource_name, self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
A hook for adding your own URLs or overriding the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.override_urls() + self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.is_authorized(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
if request_method == "options":
allows = ','.join(map(str.upper, allowed))
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
return request_method
def is_authorized(self, request, object=None):
"""
Handles checking of permissions to see if the user has authorization
to GET, POST, PUT, or DELETE this resource. If ``object`` is provided,
the authorization backend can apply additional row-level permissions
checking.
"""
auth_result = self._meta.authorization.is_authorized(request, object)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpForbidden())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def build_bundle(self, obj=None, data=None, request=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(obj=obj, data=data, request=request)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
# URL-related methods.
def get_resource_uri(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
A ``return reverse("api_dispatch_detail", kwargs={'resource_name':
self.resource_name, 'pk': object.id})`` should be all that would
be needed.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def get_resource_list_uri(self):
"""
Returns a URL specific to this resource's list endpoint.
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
try:
return self._build_reverse_url("api_dispatch_list", kwargs=kwargs)
except NoReverseMatch:
return None
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix)-1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return self.obj_get(request=request, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
setattr(bundle.obj, field_object.attribute, value.obj)
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Allows the ``Authorization`` class to further limit the object list.
Also a hook to customize per ``Resource``.
"""
if hasattr(self._meta.authorization, 'apply_limits'):
object_list = self._meta.authorization.apply_limits(request, object_list)
return object_list
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, request=None, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, request=None, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(request=request, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, request=None, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, request=None, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
bundle = self._meta.cache.get(cache_key)
if bundle is None:
bundle = self.obj_get(request=request, **kwargs)
self._meta.cache.set(cache_key, bundle)
return bundle
def obj_create(self, bundle, request=None, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, request=None, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, request=None, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, request=None, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def is_valid(self, bundle, request=None):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, request)
if len(errors):
if request:
desired_format = self.determine_format(request)
else:
desired_format = self._meta.default_format
serialized = self.serialize(request, errors, desired_format)
response = http.HttpBadRequest(content=serialized, content_type=build_content_type(desired_format))
raise ImmediateHttpResponse(response=response)
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_list_uri(), limit=self._meta.limit)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]
to_be_serialized['objects'] = [self.full_dehydrate(bundle) for bundle in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def head_list(self, request, **kwargs):
"""
Returns a an empty response with the correct response type based on the existence of the resource
Indicates the existence of the resource without providing any serialized data.
"""
# if we make it this far, the resource exists.
return HttpResponse()
def head_detail(self, request, **kwargs):
"""
Returns a an empty response with the correct response type.
Indicates the existence of the resource without providing any serialized data.
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices()
return HttpResponse()
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not 'objects' in deserialized:
raise BadRequest("Invalid data sent.")
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized['objects']:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.is_valid(bundle, request)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized['objects'] = [self.full_dehydrate(bundle) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
self.is_valid(bundle, request)
try:
updated_bundle = self.obj_update(bundle, request=request, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
self.is_valid(bundle, request)
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
try:
self.obj_delete(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
if "objects" not in deserialized:
raise BadRequest("Invalid data sent.")
if len(deserialized["objects"]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for data in deserialized["objects"]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data))
bundle.obj.pk = obj.pk
self.is_valid(bundle, request)
self.obj_create(bundle, request=request)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data))
self.is_valid(bundle, request)
self.obj_create(bundle, request=request)
if len(deserialized.get('deleted_objects', [])) and 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deserialized.get('deleted_objects', []):
obj = self.get_via_uri(uri, request=request)
self.obj_delete(request=request, _obj=obj)
return http.HttpAccepted()
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
return http.HttpAccepted()
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
self.is_valid(original_bundle, request)
return self.obj_update(original_bundle, request=request, pk=original_bundle.obj.pk)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get', 'head'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
obj_pks = kwargs.get('pk_list', '').split(';')
objects = []
not_found = []
for pk in obj_pks:
try:
obj = self.obj_get(request, pk=pk)
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
except ObjectDoesNotExist:
not_found.append(pk)
object_list = {
'objects': objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
if f.get_internal_type() in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif f.get_internal_type() in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif f.get_internal_type() in ('FloatField',):
result = fields.FloatField
elif f.get_internal_type() in ('DecimalField',):
result = fields.DecimalField
elif f.get_internal_type() in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField'):
result = fields.IntegerField
elif f.get_internal_type() in ('FileField', 'ImageField'):
result = fields.FileField
elif f.get_internal_type() == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif f.get_internal_type() == 'ForeignKey':
# result = ForeignKey
# elif f.get_internal_type() == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in QUERY_TERMS.keys():
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
if value in ['true', 'True', True]:
value = True
elif value in ['false', 'False', False]:
value = False
elif value in ('nil', 'none', 'None', None):
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = filters.getlist(filter_expr)
else:
value = value.split(',')
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(request, 'GET'):
# Grab a mutable copy.
filters = request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
base_object_list = self.apply_filters(request, applicable_filters)
return self.apply_authorization_limits(request, base_object_list)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
base_object_list = self.get_object_list(request).filter(**kwargs)
object_list = self.apply_authorization_limits(request, base_object_list)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
return object_list[0]
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_update(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not bundle.obj.pk:
# Attempt to hydrate data from kwargs before doing a lookup for the object.
# This step is needed so certain values (like datetime) will pass model validation.
try:
bundle.obj = self.get_object_list(request).model()
bundle.data.update(kwargs)
bundle = self.full_hydrate(bundle)
lookup_kwargs = kwargs.copy()
for key in kwargs.keys():
if key == 'pk':
continue
elif getattr(bundle.obj, key, NOT_AVAILABLE) is not NOT_AVAILABLE:
lookup_kwargs[key] = getattr(bundle.obj, key)
else:
del lookup_kwargs[key]
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(request, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which can be used to narrow the query.
"""
base_object_list = self.get_object_list(request).filter(**kwargs)
authed_object_list = self.apply_authorization_limits(request, base_object_list)
if hasattr(authed_object_list, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
authed_object_list.delete()
else:
for authed_obj in authed_object_list:
authed_obj.delete()
def obj_delete(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
obj = kwargs.pop('_obj', None)
if not hasattr(obj, 'delete'):
try:
obj = self.obj_get(request, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
obj.delete()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
with transaction.commit_on_success():
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and getattr(bundle.obj, 'pk', None):
bundle.obj.delete()
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.blank:
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = None
# Because sometimes it's ``None`` & that's OK.
if related_obj:
related_obj.save()
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = getattr(bundle.obj, field_object.attribute)
if hasattr(related_mngr, 'clear'):
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_bundle.obj.save()
related_objs.append(related_bundle.obj)
related_mngr.add(*related_objs)
def get_resource_uri(self, bundle_or_obj):
"""
Handles generating a resource URI for a single resource.
Uses the model's ``pk`` in order to create the URI.
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.pk
else:
kwargs['pk'] = bundle_or_obj.id
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
return self._build_reverse_url("api_dispatch_detail", kwargs=kwargs)
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| 37.880999 | 174 | 0.623647 |
acee83bf632effd295db8290336787578d6c4859 | 2,284 | py | Python | uproot/write/TFree.py | riga/uproot | 78de42f849079c35fd05ae22033e56f02492b6c1 | [
"BSD-3-Clause"
] | 1 | 2021-03-18T23:33:35.000Z | 2021-03-18T23:33:35.000Z | uproot/write/TFree.py | riga/uproot | 78de42f849079c35fd05ae22033e56f02492b6c1 | [
"BSD-3-Clause"
] | 17 | 2020-01-28T22:33:27.000Z | 2021-06-10T21:05:49.000Z | sparse/repos/chnzhangrui/SgTopWorkshop/binder/uproot/write/TFree.py | yuvipanda/mybinder.org-analytics | 7b654e3e21dea790505c626d688aa15640ea5808 | [
"BSD-3-Clause"
] | 1 | 2021-07-17T12:55:22.000Z | 2021-07-17T12:55:22.000Z | #!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import struct
import numpy
class TFree(object):
def __init__(self, fEND):
self.fFirst = fEND
self.fLast = int(math.ceil(fEND / 2000000000.0)) * 2000000000
_format_big = struct.Struct(">hqq")
_format_small = struct.Struct(">hii")
def write(self, cursor, sink):
if self.fLast > numpy.iinfo(numpy.int32).max:
cursor.write_fields(sink, self._format_big, 1001, self.fFirst, self.fLast)
else:
cursor.write_fields(sink, self._format_small, 1, self.fFirst, self.fLast)
def size(self):
if self.fLast > numpy.iinfo(numpy.int32).max:
return TFree._format_big.size
else:
return TFree._format_small.size
| 41.527273 | 86 | 0.737303 |
acee84066a925a300ed01c2422e3aec544521213 | 213 | py | Python | services/users/serializers.py | mrubio-chavarria/nonSpot | 1bae76dffc1a8ae124537230bcc5599b7b0d30c2 | [
"MIT"
] | 1 | 2020-04-13T11:58:49.000Z | 2020-04-13T11:58:49.000Z | services/users/serializers.py | mrubio-chavarria/nonSpot | 1bae76dffc1a8ae124537230bcc5599b7b0d30c2 | [
"MIT"
] | 3 | 2020-04-09T20:33:54.000Z | 2020-04-09T20:34:01.000Z | services/users/serializers.py | mrubio-chavarria/nonSpot | 1bae76dffc1a8ae124537230bcc5599b7b0d30c2 | [
"MIT"
] | 1 | 2020-04-18T17:07:31.000Z | 2020-04-18T17:07:31.000Z | import requests
from rest_framework import serializers
from services.users.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
| 17.75 | 50 | 0.741784 |
acee840e4ad60017fd765815ec0f73a5030712ac | 241 | py | Python | lambdata/wrangle_example.py | thecodinguru/lambdata | ce6ed504caf51dd83c27e2225d2fdf99942be5a6 | [
"MIT"
] | null | null | null | lambdata/wrangle_example.py | thecodinguru/lambdata | ce6ed504caf51dd83c27e2225d2fdf99942be5a6 | [
"MIT"
] | null | null | null | lambdata/wrangle_example.py | thecodinguru/lambdata | ce6ed504caf51dd83c27e2225d2fdf99942be5a6 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
df = pd.read_csv("https://raw.githubusercontent.com/thecodinguru/lambdata/master/lambdata/data/GlobalTemperatures.csv")
from lambdata import wrangle_df
df2 = wrangle_df.wrangle(df)
print(df2.head()) | 24.1 | 119 | 0.79668 |
acee846dada96eea6dfcbde2d7eab953910b9849 | 5,577 | py | Python | generalfile/path.py | ManderaGeneral/generalfile | 4591c8ff7da9be3d75aa6684f7b9bd405eca5089 | [
"MIT"
] | 1 | 2021-02-10T11:22:16.000Z | 2021-02-10T11:22:16.000Z | generalfile/path.py | ManderaGeneral/generalfile | 4591c8ff7da9be3d75aa6684f7b9bd405eca5089 | [
"MIT"
] | null | null | null | generalfile/path.py | ManderaGeneral/generalfile | 4591c8ff7da9be3d75aa6684f7b9bd405eca5089 | [
"MIT"
] | null | null | null |
import pathlib
import os
from generallibrary import VerInfo, TreeDiagram, Recycle, classproperty, deco_cache
from generalfile.errors import InvalidCharacterError
from generalfile.path_lock import Path_ContextManager
from generalfile.path_operations import Path_Operations
from generalfile.path_strings import Path_Strings
from generalfile.optional_dependencies.path_spreadsheet import Path_Spreadsheet
from generalfile.optional_dependencies.path_text import Path_Text
from generalfile.optional_dependencies.path_cfg import Path_Cfg
class Path(TreeDiagram, Recycle, Path_ContextManager, Path_Operations, Path_Strings, Path_Spreadsheet, Path_Text, Path_Cfg):
""" Immutable cross-platform Path.
Built on pathlib and TreeDiagram.
Implements rules to ensure cross-platform compatability.
Adds useful methods.
Todo: Binary extension. """
verInfo = VerInfo()
_path_delimiter = verInfo.pathDelimiter
Path = ...
_recycle_keys = {"path": lambda path: Path.scrub("" if path is None else str(path))}
_alternative_chars = {_path_delimiter: "/", ":": ":", ".": "."}
def __init__(self, path=None): # Don't have parent here because of Recycle
self.path = self.scrub(str_path="" if path is None else str(path))
self._path = pathlib.Path(self.path)
self._latest_listdir = set()
copy_node = NotImplemented # Maybe something like this to disable certain methods
@classproperty
def path_delimiter(cls):
return cls._path_delimiter
def spawn_parents(self):
if not self.get_parent(spawn=False) and self.path and not self.is_root():
try:
index = self.path.rindex(self.path_delimiter) + 1
except ValueError:
index = 0
self.set_parent(Path(path=self.path[:index]))
def spawn_children(self):
if self.is_folder():
old_children = {path.name() for path in self.get_children(spawn=False)}
try:
new_children = set(os.listdir(self.path if self.path else "."))
except PermissionError:
new_children = set()
for name in old_children.symmetric_difference(new_children):
path = Path(path=self / name)
path.set_parent(self if name in new_children else None)
def __str__(self):
return getattr(self, "path", "<Path not loaded yet>")
# return self.path
def __repr__(self):
return self.name()
def __fspath__(self):
return self.path
def __format__(self, format_spec):
return self.path.__format__(format_spec)
def __truediv__(self, other):
""" :rtype: generalfile.Path """
# print("here", self._recycle_instances)
return self.Path(self._path / str(other))
def __eq__(self, other):
if isinstance(other, Path):
other = other.path
else:
other = self._scrub("" if other is None else str(other))
return self.path == other
def __hash__(self):
return hash(self.path)
def __contains__(self, item):
return self.path.__contains__(item)
@classmethod
def _scrub(cls, str_path):
str_path = cls._replace_delimiters(str_path=str_path)
str_path = cls._invalid_characters(str_path=str_path)
str_path = cls._trim(str_path=str_path)
str_path = cls._delimiter_suffix_if_root(str_path=str_path)
return str_path
@classmethod
@deco_cache()
def scrub(cls, str_path):
return cls._scrub(str_path=str_path)
@classmethod
@deco_cache()
def _replace_delimiters(cls, str_path):
str_path = str_path.replace("/", cls.path_delimiter)
str_path = str_path.replace("\\", cls.path_delimiter)
return str_path
@classmethod
@deco_cache()
def _invalid_characters(cls, str_path):
# Simple invalid characters testing from Windows
for character in '<>"|?*':
if character in str_path:
raise InvalidCharacterError(f"Invalid character '{character}' in '{str_path}'")
if ":" in str_path:
if not cls.verInfo.pathRootHasColon:
raise InvalidCharacterError(f"Path has a colon but '{cls.verInfo.os}' doesn't use colon for path root: '{str_path}'")
if str_path[1] != ":":
raise InvalidCharacterError(f"Path has a colon but there's no colon at index 1: '{str_path}'")
if len(str_path) >= 3 and str_path[2] != cls.path_delimiter:
raise InvalidCharacterError(f"Path has a colon but index 2 is not a delimiter: '{str_path}'")
if ":" in str_path[2:]:
raise InvalidCharacterError(f"Path has a colon that's not at index 1: '{str_path}'")
if str_path.endswith("."):
raise InvalidCharacterError(f"Path cannot end with a dot ('.').")
return str_path
@classmethod
@deco_cache()
def _trim(cls, str_path):
if not cls.verInfo.pathRootIsDelimiter and str_path.startswith(cls.path_delimiter):
str_path = str_path[1:]
if str_path.endswith(cls.path_delimiter) and len(str_path) > 1:
str_path = str_path[0:-1]
return str_path
@classmethod
@deco_cache()
def _delimiter_suffix_if_root(cls, str_path):
if len(str_path) == 2 and str_path[1] == ":":
return f"{str_path}{cls.path_delimiter}"
return str_path
setattr(Path, "Path", Path)
| 28.025126 | 133 | 0.645329 |
acee85027050f0b3e03ff9162d0338d8a38dcb3d | 793 | py | Python | impl/recommender/dependency.py | dustywind/bachelor-thesis | be06aaeb1b4d73f727a19029a3416a9b8043194d | [
"MIT"
] | null | null | null | impl/recommender/dependency.py | dustywind/bachelor-thesis | be06aaeb1b4d73f727a19029a3416a9b8043194d | [
"MIT"
] | null | null | null | impl/recommender/dependency.py | dustywind/bachelor-thesis | be06aaeb1b4d73f727a19029a3416a9b8043194d | [
"MIT"
] | null | null | null | """Some comment
"""
#from . import DatabaseManager
class Dependency(object):
"""This class will be used to build dependencies amongst different Managers.
These dependencies can for example be tables in a database.
:param database_manager: a instance of a database_manager. The database_manager *must* provide a function to build the dependencies for the class inheriting ``Dependency``
:type database_manager: recommender.DatabaseManager
"""
def __init__(self, database_manager):
self._database_manager = database_manager
self.build_dependencies()
def build_dependencies(self):
"""This method can be used to ensure that all necessarry preparations for using the inheriting class are made.
"""
raise NotImplementedError()
| 37.761905 | 175 | 0.735183 |
acee87269de38c5afcc9577b696b2d9e96852134 | 149 | py | Python | Questoes/b1_q09_piso.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | Questoes/b1_q09_piso.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | Questoes/b1_q09_piso.py | viniciusm0raes/python | c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce | [
"MIT"
] | null | null | null | metros = float(input('Quantos metros de piso vc deseja? '))
preco = 70
total = metros*preco
print('O preço total do pedido é: R$ %.2f' % (total))
| 18.625 | 59 | 0.66443 |
acee88352d2797acee6c95cb2b8986663d2481f7 | 3,168 | py | Python | speedtest.py | jsouthin/speedtest | 42d6bc4304102111f5a5504a522046880c14ce66 | [
"Apache-2.0"
] | null | null | null | speedtest.py | jsouthin/speedtest | 42d6bc4304102111f5a5504a522046880c14ce66 | [
"Apache-2.0"
] | null | null | null | speedtest.py | jsouthin/speedtest | 42d6bc4304102111f5a5504a522046880c14ce66 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
pd.options.mode.chained_assignment = None # default='warn'
logfile = "/home/pi/Projects/speedtest/speedtest.log"
chart_file = "/home/pi/Projects/speedtest/speedtest.png"
df = pd.read_table(logfile, header=None)
# you might need to update this if you have a slightly different setup that I do
df['time'] = df[0].apply(lambda x: x.split("[")[1].split("]")[0])
df['message'] = df[0].apply(lambda x: x.split("]")[1])
dl = df[df['message'].str.contains('Download')]
ul = df[df['message'].str.contains('Upload')]
ul['upload speed'] = ul['message'].apply(lambda x: x.split(" ")[2])
dl['download speed'] = dl['message'].apply(lambda x: x.split(" ")[2])
out = dl[['time','download speed']].merge(ul[['time','upload speed']])
out['time'] = pd.to_datetime(out['time'])
out['download speed'] = out['download speed'].astype(float)
out['upload speed'] = out['upload speed'].astype(float)
out['date'] = out['time'].dt.floor("D")
qt90 = out.groupby(pd.Grouper(key='time',freq='D')).quantile(q=0.9).reset_index().drop('time',axis=1)
qt50 = out.groupby(pd.Grouper(key='time',freq='D')).quantile(q=0.5).reset_index().drop('time',axis=1)
qt10 = out.groupby(pd.Grouper(key='time',freq='D')).quantile(q=0.1).reset_index().drop('time',axis=1)
out = out.merge(qt90, how='inner', on='date').rename(columns={"download speed_x": "download speed", "upload speed_x": "upload speed","download speed_y":"p90 download","upload speed_y":"p90 upload"})
out = out.merge(qt10, how='inner', on='date').rename(columns={"download speed_x": "download speed", "upload speed_x": "upload speed","download speed_y":"p10 download","upload speed_y":"p10 upload"})
out = out.merge(qt50, how='inner', on='date').drop('date',axis=1).rename(columns={"download speed_x": "download speed", "upload speed_x": "upload speed","download speed_y":"p50 download","upload speed_y":"p50 upload"})
out['rolling dl'] = out['download speed'].rolling(window=12).mean()
fig, ax = plt.subplots(figsize=(10,6))
ax.scatter(out.time, out['download speed'], label='Download', marker='.', s=1, alpha = 0.5)
ax.scatter(out.time, out['upload speed'] , label='Upload', marker = '.', s=1, alpha = 0.5)
ax.plot(out.time, out['p50 download'], linestyle='--',linewidth=0.5, label='p50 download')
ax.plot(out.time, out['p50 upload'], linestyle='--',linewidth=0.5, label='p50 upload')
ax.plot(out.time, out['p10 download'], linestyle='--',linewidth=0.5, label='p90 download')
ax.plot(out.time, out['p10 upload'], linestyle='--',linewidth=0.5, label='p90 upload')
ax.plot(out.time, out['rolling dl'] ,linewidth=1, label='rolling 1hr m.a. dl')
ax.axhline(y=34,linestyle='dotted',linewidth=2,c='grey',label="Stay Fast Guarantee = 34 Mbps", alpha = 0.5)
ax.set_title("Speedtest.net internet speeds")
ax.set_xlabel("Datetime")
ax.set_ylabel("Mbps")
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d %H:%M"))
ax.xaxis.set_minor_formatter(mdates.DateFormatter("%H:%M"))
plt.xticks(rotation=90)
#ax.set_ylim(4,40)
#ax.set_yscale('log')
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.savefig(chart_file, dpi=300, bbox_inches = "tight") | 56.571429 | 218 | 0.692866 |
acee885c1d9f4852b32e24ee6000ba792cd01775 | 2,421 | py | Python | pascal.py | ChuanqiTan/DeepLabv3.pytorch | 260db5812ae3c85f0aacd5ec9bc0e3d8c5d2d067 | [
"BSD-3-Clause"
] | null | null | null | pascal.py | ChuanqiTan/DeepLabv3.pytorch | 260db5812ae3c85f0aacd5ec9bc0e3d8c5d2d067 | [
"BSD-3-Clause"
] | null | null | null | pascal.py | ChuanqiTan/DeepLabv3.pytorch | 260db5812ae3c85f0aacd5ec9bc0e3d8c5d2d067 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import torch.utils.data as data
import os
from PIL import Image
from utils import preprocess
class VOCSegmentation(data.Dataset):
CLASSES = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"potted-plant",
"sheep",
"sofa",
"train",
"tv/monitor",
]
def __init__(
self,
root,
train=True,
transform=None,
target_transform=None,
download=False,
crop_size=None,
):
self.root = root
_voc_root = os.path.join(self.root, "VOC2012")
_list_dir = os.path.join(_voc_root, "list")
self.transform = transform
self.target_transform = target_transform
self.train = train
self.crop_size = crop_size
if download:
self.download()
if self.train:
_list_f = os.path.join(_list_dir, "train_aug.txt")
else:
_list_f = os.path.join(_list_dir, "val.txt")
self.images = []
self.masks = []
with open(_list_f, "r") as lines:
for line in lines:
_image = _voc_root + line.split()[0]
_mask = _voc_root + line.split()[1]
assert os.path.isfile(_image)
assert os.path.isfile(_mask)
self.images.append(_image)
self.masks.append(_mask)
def __getitem__(self, index):
_img = Image.open(self.images[index]).convert("RGB")
_target = Image.open(self.masks[index])
_img, _target = preprocess(
_img,
_target,
flip=True if self.train else False,
scale=(0.5, 2.0) if self.train else None,
crop=(self.crop_size, self.crop_size),
is_train=self.train,
)
if self.transform is not None:
_img = self.transform(_img)
if self.target_transform is not None:
_target = self.target_transform(_target)
return _img, _target
def __len__(self):
return len(self.images)
def download(self):
raise NotImplementedError("Automatic download not yet implemented.")
| 25.484211 | 76 | 0.534077 |
acee885f1442a9a211c78bc8309fcadd9aed6bcb | 1,456 | py | Python | ceilometer/publisher/__init__.py | orbitfp7/ceilometer | 9905da14bbdf06f95e1e056c9ca0e18087214d0f | [
"Apache-2.0"
] | 2 | 2015-09-07T09:15:26.000Z | 2015-09-30T02:13:23.000Z | ceilometer/publisher/__init__.py | orbitfp7/ceilometer | 9905da14bbdf06f95e1e056c9ca0e18087214d0f | [
"Apache-2.0"
] | null | null | null | ceilometer/publisher/__init__.py | orbitfp7/ceilometer | 9905da14bbdf06f95e1e056c9ca0e18087214d0f | [
"Apache-2.0"
] | 1 | 2019-09-16T02:11:41.000Z | 2019-09-16T02:11:41.000Z | #
# Copyright 2013 Intel Corp.
# Copyright 2013-2014 eNovance
#
# Author: Yunhong Jiang <yunhong.jiang@intel.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo.utils import netutils
import six
from stevedore import driver
def get_publisher(url, namespace='ceilometer.publisher'):
"""Get publisher driver and load it.
:param URL: URL for the publisher
:param namespace: Namespace to use to look for drivers.
"""
parse_result = netutils.urlsplit(url)
loaded_driver = driver.DriverManager(namespace, parse_result.scheme)
return loaded_driver.driver(parse_result)
@six.add_metaclass(abc.ABCMeta)
class PublisherBase(object):
"""Base class for plugins that publish the sampler."""
def __init__(self, parsed_url):
pass
@abc.abstractmethod
def publish_samples(self, context, samples):
"""Publish samples into final conduit."""
| 30.333333 | 75 | 0.736264 |
acee893a0d2a10962d69430173a294d40f837876 | 1,164 | py | Python | api/urls.py | Kraloz/ngen | c20d64819af2f3a9ba7893d8e2be98a86c7ea8f3 | [
"MIT"
] | null | null | null | api/urls.py | Kraloz/ngen | c20d64819af2f3a9ba7893d8e2be98a86c7ea8f3 | [
"MIT"
] | null | null | null | api/urls.py | Kraloz/ngen | c20d64819af2f3a9ba7893d8e2be98a86c7ea8f3 | [
"MIT"
] | null | null | null | """tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from api import views
router = routers.DefaultRouter()
router.register(r'plantas', views.PlantaViewSet)
router.register(r'cultivos', views.CultivoViewSet)
router.register(r'germinaciones', views.GerminacionViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] | 37.548387 | 80 | 0.745704 |
acee895951325eec6aeb19310c89824a66d2880f | 9,517 | py | Python | Model.py | CODEJIN/listen_attend_spell | d7629458c95dfab77091f4e127cf1cd074e44a50 | [
"MIT"
] | 1 | 2020-07-20T17:01:15.000Z | 2020-07-20T17:01:15.000Z | Model.py | CODEJIN/listen_attend_spell | d7629458c95dfab77091f4e127cf1cd074e44a50 | [
"MIT"
] | null | null | null | Model.py | CODEJIN/listen_attend_spell | d7629458c95dfab77091f4e127cf1cd074e44a50 | [
"MIT"
] | 1 | 2020-02-15T23:28:11.000Z | 2020-02-15T23:28:11.000Z | import tensorflow as tf
import numpy as np
import json, os, time
from threading import Thread
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from datetime import datetime
from Feeder import Feeder
import Modules
with open('Hyper_Parameters.json', 'r') as f:
hp_Dict = json.load(f)
class LAS:
def __init__(self, is_Training= False):
self.feeder = Feeder(is_Training= is_Training)
self.Model_Generate()
def Model_Generate(self):
layer_Dict = {}
layer_Dict['Mel'] = tf.keras.layers.Input(shape=[None, hp_Dict['Sound']['Mel_Dim']], dtype= tf.float32)
layer_Dict['Mel_Length'] = tf.keras.layers.Input(shape=[], dtype= tf.int32)
layer_Dict['Token'] = tf.keras.layers.Input(shape=[None,], dtype= tf.int32)
layer_Dict['Inference_Listener'] = tf.keras.layers.Input(shape=[None, hp_Dict['Listener']['Uni_Direction_Cell_Size'][-1] * 2], dtype= tf.float32)
layer_Dict['Listener'] = Modules.Listner()(layer_Dict['Mel'])
layer_Dict['Speller'] = Modules.Speller()
layer_Dict['Train', 'Speller'], _ = layer_Dict['Speller']([
layer_Dict['Token'],
layer_Dict['Listener'],
layer_Dict['Mel_Length']
])
layer_Dict['Inference', 'Speller'], layer_Dict['Inference', 'Attention'] = layer_Dict['Speller']([
layer_Dict['Token'],
layer_Dict['Inference_Listener'],
layer_Dict['Mel_Length']
])
self.model_Dict = {
'Train': tf.keras.Model(
inputs=[layer_Dict['Mel'], layer_Dict['Mel_Length'], layer_Dict['Token']],
outputs= layer_Dict['Train', 'Speller']
),
('Inference', 'Listener'): tf.keras.Model( #Encoder의 반복적 계산을 막기 위함
inputs= layer_Dict['Mel'],
outputs= layer_Dict['Listener']
),
('Inference', 'Speller'): tf.keras.Model(
inputs= [layer_Dict['Inference_Listener'], layer_Dict['Mel_Length'], layer_Dict['Token']],
outputs= [layer_Dict['Inference', 'Speller'], layer_Dict['Inference', 'Attention']]
),
}
self.model_Dict['Train'].summary()
self.model_Dict['Inference', 'Listener'].summary()
self.model_Dict['Inference', 'Speller'].summary()
#optimizer는 @tf.function의 밖에 있어야 함
self.optimizer = tf.keras.optimizers.Adam(
learning_rate= hp_Dict['Train']['Learning_Rate'],
beta_1= hp_Dict['Train']['ADAM']['Beta1'],
beta_2= hp_Dict['Train']['ADAM']['Beta2'],
epsilon= hp_Dict['Train']['ADAM']['Epsilon'],
)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, hp_Dict['Sound']['Mel_Dim']], dtype=tf.float32),
tf.TensorSpec(shape=[None,], dtype=tf.int32),
tf.TensorSpec(shape=[None, None], dtype=tf.int32),
tf.TensorSpec(shape=[None,], dtype=tf.int32)
],
autograph= True,
experimental_relax_shapes= True
)
def Train_Step(self, mels, mel_lengths, tokens, token_lengths):
with tf.GradientTape() as tape:
logits = self.model_Dict['Train'](inputs= [mels, mel_lengths, tokens[:, :-1]], training= True)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels= tokens[:, 1:],
logits= logits
)
loss *= tf.sequence_mask(
lengths= token_lengths,
maxlen= tf.shape(loss)[-1],
dtype= tf.float32
)
loss = tf.reduce_mean(loss)
gradients = tape.gradient(loss, self.model_Dict['Train'].trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model_Dict['Train'].trainable_variables))
return loss
# @tf.function
def Inference_Listener_Step(self, mels):
return self.model_Dict['Inference', 'Listener'](inputs= mels, training= False)
# Don't use @tf.function here. it makes slower.
def Inference_Speller_Step(self, listeners, mel_lengths, initial_tokens):
tokens = tf.zeros(shape=[tf.shape(listeners)[0], 0], dtype= tf.int32)
for _ in range(hp_Dict['Speller']['Max_Length']):
tokens = tf.concat([initial_tokens, tokens], axis=-1)
logits, attention_History = self.model_Dict['Inference', 'Speller'](inputs= [listeners, mel_lengths, tokens], training= False)
tokens = tf.argmax(logits, axis=-1, output_type= tf.int32)
return tokens, attention_History
def Restore(self):
checkpoint_File_Path = os.path.join(hp_Dict['Checkpoint_Path'], 'CHECKPOINT.H5').replace('\\', '/')
if not os.path.exists('{}.index'.format(checkpoint_File_Path)):
print('There is no checkpoint.')
return
self.model_Dict['Train'].load_weights(checkpoint_File_Path)
print('Checkpoint \'{}\' is loaded.'.format(checkpoint_File_Path))
def Train(self):
def Run_Inference():
wav_Path_List = []
with open('Inference_Wav_Path_in_Train.txt', 'r') as f:
for line in f.readlines():
wav_Path_List.append(line.strip())
self.Inference(wav_Path_List)
step = 0
Run_Inference()
while True:
start_Time = time.time()
loss = self.Train_Step(**self.feeder.Get_Train_Pattern())
step += 1
display_List = [
'Time: {:0.3f}'.format(time.time() - start_Time),
'Step: {}'.format(step),
'Loss: {:0.5f}'.format(loss)
]
print('\t\t'.join(display_List))
if step % hp_Dict['Train']['Checkpoint_Save_Timing'] == 0:
os.makedirs(os.path.join(hp_Dict['Checkpoint_Path']).replace("\\", "/"), exist_ok= True)
self.model_Dict['Train'].save_weights(os.path.join(hp_Dict['Checkpoint_Path'], 'CHECKPOINT.H5').replace('\\', '/'))
if step % hp_Dict['Train']['Inference_Timing'] == 0:
Run_Inference()
def Inference(self, wav_Path_List, label= None):
print('Inference running...')
inference_Pattern = self.feeder.Get_Inference_Pattern(wav_Path_List)
listeners = self.Inference_Listener_Step(mels= inference_Pattern['mels'])
tokens, attention_History = self.Inference_Speller_Step(
listeners= listeners,
mel_lengths= inference_Pattern['mel_lengths'],
initial_tokens=inference_Pattern['initial_tokens']
)
export_Inference_Thread = Thread(
target= self.Export_Inference,
args= [
wav_Path_List,
inference_Pattern['mels'],
inference_Pattern['mel_lengths'],
tokens.numpy(),
attention_History.numpy(),
label or datetime.now().strftime("%Y%m%d.%H%M%S")
]
)
export_Inference_Thread.daemon = True
export_Inference_Thread.start()
def Export_Inference(self, wav_Path_List, mel_List, mel_Length_List, token_List, attention_History_List, label):
os.makedirs(os.path.join(hp_Dict['Inference_Path'], 'Plot').replace("\\", "/"), exist_ok= True)
index_Token_Dict = {index: token for token, index in self.feeder.token_Index_Dict.items()}
for index, (wav_Path, mel, mel_Length, token, attention_History) in enumerate(zip(wav_Path_List, mel_List, mel_Length_List, token_List, attention_History_List)):
mel = mel[:mel_Length]
attention_History = attention_History[:, :(mel_Length + (mel_Length % 2)) // (2 ** (len(hp_Dict['Listener']['Uni_Direction_Cell_Size']) - 1))]
if len(np.where(token == self.feeder.token_Index_Dict['<E>'])[0]) > 0:
stop_Index = np.where(token == self.feeder.token_Index_Dict['<E>'])[0][0]
token = token[:stop_Index]
attention_History = attention_History[:stop_Index]
token = [index_Token_Dict[x] for x in token]
if len(token) == 0:
print('The exported token length of \'{}\' is zero. It is skipped.'.format(wav_Path))
continue
new_Figure = plt.figure(figsize=(24, 24), dpi=100)
plt.subplot2grid((3, 1), (0, 0))
plt.imshow(np.transpose(mel), aspect='auto', origin='lower')
plt.title('Mel Path: {}'.format(wav_Path))
plt.colorbar()
plt.subplot2grid((3, 1), (1, 0), rowspan=2)
plt.imshow(attention_History, aspect='auto', origin='lower')
plt.title('Attention history Inference: {}'.format(''.join(token if token[-1] != self.feeder.token_Index_Dict['<E>'] else token[:-1])))
plt.yticks(
range(attention_History.shape[0]),
token,
fontsize = 10
)
plt.colorbar()
plt.tight_layout()
plt.savefig(
os.path.join(hp_Dict['Inference_Path'], 'Plot', '{}.IDX_{}.PNG'.format(label, index)).replace("\\", "/")
)
plt.close(new_Figure)
if __name__ == '__main__':
new_Model = LAS(is_Training= True)
new_Model.Restore()
new_Model.Train() | 43.259091 | 169 | 0.579384 |
acee895c92e08679a70abfa486cf7bad6ea7c82c | 4,988 | py | Python | alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeApplySendModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeApplySendModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeApplySendModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MonitorInfo import MonitorInfo
from alipay.aop.api.domain.PayeeInfo import PayeeInfo
class AlipayCommerceEducateTuitioncodeApplySendModel(object):
def __init__(self):
self._complain_url = None
self._fund_type = None
self._monitor_info = None
self._out_apply_id = None
self._payee_info = None
self._scene_type = None
self._smid = None
self._sys_service_provider_id = None
@property
def complain_url(self):
return self._complain_url
@complain_url.setter
def complain_url(self, value):
self._complain_url = value
@property
def fund_type(self):
return self._fund_type
@fund_type.setter
def fund_type(self, value):
self._fund_type = value
@property
def monitor_info(self):
return self._monitor_info
@monitor_info.setter
def monitor_info(self, value):
if isinstance(value, MonitorInfo):
self._monitor_info = value
else:
self._monitor_info = MonitorInfo.from_alipay_dict(value)
@property
def out_apply_id(self):
return self._out_apply_id
@out_apply_id.setter
def out_apply_id(self, value):
self._out_apply_id = value
@property
def payee_info(self):
return self._payee_info
@payee_info.setter
def payee_info(self, value):
if isinstance(value, PayeeInfo):
self._payee_info = value
else:
self._payee_info = PayeeInfo.from_alipay_dict(value)
@property
def scene_type(self):
return self._scene_type
@scene_type.setter
def scene_type(self, value):
self._scene_type = value
@property
def smid(self):
return self._smid
@smid.setter
def smid(self, value):
self._smid = value
@property
def sys_service_provider_id(self):
return self._sys_service_provider_id
@sys_service_provider_id.setter
def sys_service_provider_id(self, value):
self._sys_service_provider_id = value
def to_alipay_dict(self):
params = dict()
if self.complain_url:
if hasattr(self.complain_url, 'to_alipay_dict'):
params['complain_url'] = self.complain_url.to_alipay_dict()
else:
params['complain_url'] = self.complain_url
if self.fund_type:
if hasattr(self.fund_type, 'to_alipay_dict'):
params['fund_type'] = self.fund_type.to_alipay_dict()
else:
params['fund_type'] = self.fund_type
if self.monitor_info:
if hasattr(self.monitor_info, 'to_alipay_dict'):
params['monitor_info'] = self.monitor_info.to_alipay_dict()
else:
params['monitor_info'] = self.monitor_info
if self.out_apply_id:
if hasattr(self.out_apply_id, 'to_alipay_dict'):
params['out_apply_id'] = self.out_apply_id.to_alipay_dict()
else:
params['out_apply_id'] = self.out_apply_id
if self.payee_info:
if hasattr(self.payee_info, 'to_alipay_dict'):
params['payee_info'] = self.payee_info.to_alipay_dict()
else:
params['payee_info'] = self.payee_info
if self.scene_type:
if hasattr(self.scene_type, 'to_alipay_dict'):
params['scene_type'] = self.scene_type.to_alipay_dict()
else:
params['scene_type'] = self.scene_type
if self.smid:
if hasattr(self.smid, 'to_alipay_dict'):
params['smid'] = self.smid.to_alipay_dict()
else:
params['smid'] = self.smid
if self.sys_service_provider_id:
if hasattr(self.sys_service_provider_id, 'to_alipay_dict'):
params['sys_service_provider_id'] = self.sys_service_provider_id.to_alipay_dict()
else:
params['sys_service_provider_id'] = self.sys_service_provider_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceEducateTuitioncodeApplySendModel()
if 'complain_url' in d:
o.complain_url = d['complain_url']
if 'fund_type' in d:
o.fund_type = d['fund_type']
if 'monitor_info' in d:
o.monitor_info = d['monitor_info']
if 'out_apply_id' in d:
o.out_apply_id = d['out_apply_id']
if 'payee_info' in d:
o.payee_info = d['payee_info']
if 'scene_type' in d:
o.scene_type = d['scene_type']
if 'smid' in d:
o.smid = d['smid']
if 'sys_service_provider_id' in d:
o.sys_service_provider_id = d['sys_service_provider_id']
return o
| 32.38961 | 97 | 0.61267 |
acee8b227df116b60e78f874a48e32cce89eb2c7 | 5,505 | py | Python | huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/show_cluster_response.py | handsome-baby/huaweicloud-sdk-python-v3 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/show_cluster_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/show_cluster_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowClusterResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'ShowClusterMetadata',
'spec': 'V3ClusterSpec',
'status': 'ClusterStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""ShowClusterResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this ShowClusterResponse.
API版本,固定值“v3”,该值不可修改。
:return: The api_version of this ShowClusterResponse.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this ShowClusterResponse.
API版本,固定值“v3”,该值不可修改。
:param api_version: The api_version of this ShowClusterResponse.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this ShowClusterResponse.
API类型,固定值“Cluster”或“cluster”,该值不可修改。
:return: The kind of this ShowClusterResponse.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this ShowClusterResponse.
API类型,固定值“Cluster”或“cluster”,该值不可修改。
:param kind: The kind of this ShowClusterResponse.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this ShowClusterResponse.
:return: The metadata of this ShowClusterResponse.
:rtype: ShowClusterMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ShowClusterResponse.
:param metadata: The metadata of this ShowClusterResponse.
:type: ShowClusterMetadata
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this ShowClusterResponse.
:return: The spec of this ShowClusterResponse.
:rtype: V3ClusterSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this ShowClusterResponse.
:param spec: The spec of this ShowClusterResponse.
:type: V3ClusterSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this ShowClusterResponse.
:return: The status of this ShowClusterResponse.
:rtype: ClusterStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowClusterResponse.
:param status: The status of this ShowClusterResponse.
:type: ClusterStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowClusterResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.84507 | 91 | 0.565668 |
acee8cbe3672e87c9d780a1ae9d661b1642331ac | 28,283 | py | Python | subgrounds/query.py | originalpkbims/subgrounds-pkbims | 03271135d985bc4a53129edb0cb2391555012270 | [
"Apache-2.0"
] | null | null | null | subgrounds/query.py | originalpkbims/subgrounds-pkbims | 03271135d985bc4a53129edb0cb2391555012270 | [
"Apache-2.0"
] | null | null | null | subgrounds/query.py | originalpkbims/subgrounds-pkbims | 03271135d985bc4a53129edb0cb2391555012270 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from functools import partial, reduce
from re import L
from typing import Any, Callable, Optional, Tuple
from pipe import map, traverse, where, take, take_while
import math
import logging
logger = logging.getLogger('subgrounds')
from subgrounds.schema import (
TypeMeta,
SchemaMeta,
TypeRef,
typeref_of_input_field
)
from subgrounds.utils import extract_data, filter_none, identity, rel_complement, union
# ================================================================
# Query definitions, data structures and types
# ================================================================
class InputValue:
@dataclass(frozen=True)
class T(ABC):
@property
@abstractmethod
def graphql(self) -> str:
""" Returns a GraphQL string representation of the input value
Returns:
str: The GraphQL string representation of the input value
"""
pass
@property
def is_variable(self) -> bool:
""" Returns True i.f.f. the input value is of type Variable
Returns:
bool: True i.f.f. the input value is of type Variable, otherwise False
"""
return False
@property
def is_number(self) -> bool:
""" Returns True i.f.f. the input value is of type Float or Int
Returns:
bool: True i.f.f. the input value is of type Float or Int, otherwise False
"""
return False
@dataclass(frozen=True)
class Null(T):
@property
def graphql(self) -> str:
return "null"
@dataclass(frozen=True)
class Int(T):
value: int
@property
def graphql(self) -> str:
return str(self.value)
@property
def is_number(self) -> bool:
return True
@dataclass(frozen=True)
class Float(T):
value: float
@property
def graphql(self) -> str:
return str(self.value)
@property
def is_number(self) -> bool:
return True
@dataclass(frozen=True)
class String(T):
value: str
@property
def graphql(self) -> str:
return f"\"{self.value}\""
@dataclass(frozen=True)
class Boolean(T):
value: bool
@property
def graphql(self) -> str:
return str(self.value).lower()
@dataclass(frozen=True)
class Enum(T):
value: str
@property
def graphql(self) -> str:
return self.value
@dataclass(frozen=True)
class Variable(T):
name: str
@property
def graphql(self) -> str:
return f'${self.name}'
@property
def is_variable(self) -> bool:
return True
@dataclass(frozen=True)
class List(T):
value: list[InputValue.T]
@property
def graphql(self) -> str:
return f"[{', '.join([val.graphql for val in self.value])}]"
@dataclass(frozen=True)
class Object(T):
value: dict[str, InputValue.T]
@property
def graphql(self) -> str:
return f"{{{', '.join([f'{key}: {value.graphql}' for key, value in self.value.items()])}}}"
@dataclass(frozen=True)
class VariableDefinition:
name: str
type_: TypeRef.T
default: Optional[InputValue.T] = None
@property
def graphql(self) -> str:
if self.default is None:
return f'${self.name}: {TypeRef.graphql(self.type_)}'
else:
return f'${self.name}: {TypeRef.graphql(self.type_)} = {self.default.graphql}'
@dataclass(frozen=True)
class Argument:
name: str
value: InputValue.T
@property
def graphql(self) -> str:
return f"{self.name}: {self.value.graphql}"
@dataclass(frozen=True)
class Selection:
"""
Raises:
Exception: [description]
Returns:
[type]: [description]
"""
fmeta: TypeMeta.FieldMeta
alias: Optional[str] = None
arguments: list[Argument] = field(default_factory=list)
selection: list[Selection] = field(default_factory=list)
@property
def key(self):
if self.alias:
return self.alias
else:
return self.fmeta.name
@property
def args_graphql(self) -> str:
if self.arguments:
return f'({", ".join([arg.graphql for arg in self.arguments])})'
else:
return ""
def graphql(self, level: int = 0) -> str:
indent = " " * level
if self.alias:
alias_str = f'{self.alias}: '
else:
alias_str = ''
match (self.selection):
case None | []:
return f"{indent}{alias_str}{self.fmeta.name}{self.args_graphql}"
case inner_selection:
inner_str = "\n".join(
[f.graphql(level=level + 1) for f in inner_selection]
)
return f"{indent}{alias_str}{self.fmeta.name}{self.args_graphql} {{\n{inner_str}\n{indent}}}"
@property
def data_path(self) -> list[str]:
match self:
case Selection(TypeMeta.FieldMeta(name), None, _, []) | Selection(TypeMeta.FieldMeta(_), name, _, []):
return [name]
case Selection(TypeMeta.FieldMeta(name), None, _, [inner_select, *_]) | Selection(TypeMeta.FieldMeta(_), name, _, [inner_select, *_]):
return [name] + inner_select.data_path
@property
def data_paths(self) -> list[list[str]]:
def f(select: Selection, keys: list[str] = []):
match select:
case Selection(TypeMeta.FieldMeta(name), None, _, []) | Selection(TypeMeta.FieldMeta(_), name, _, []):
yield [*keys, name]
case Selection(TypeMeta.FieldMeta(name), None, _, inner) | Selection(TypeMeta.FieldMeta(_), name, _, inner):
for select in inner:
yield from f(select, keys=[*keys, name])
return list(f(self))
def contains_list(self: Selection) -> bool:
if self.fmeta.type_.is_list:
return True
else:
return any(self.selection | map(Selection.contains_list))
@staticmethod
def split(select: Selection) -> list[Selection]:
match select:
case Selection(_, _, _, [] | None):
return [select]
case Selection(fmeta, alias, args, inner_select):
return list(inner_select | map(Selection.split) | traverse | map(lambda inner_select: Selection(fmeta, alias, args, inner_select)))
def extract_data(self, data: dict | list[dict]) -> list[Any] | Any:
return extract_data(self.data_path, data)
def add_selections(self: Selection, new_selections: list[Selection]) -> Selection:
return Selection(
fmeta=self.fmeta,
alias=self.alias,
selection=union(
self.selection,
new_selections,
key=lambda select: select.fmeta.name,
combine=Selection.combine
)
)
def add_selection(self: Selection, new_selection: Selection) -> Selection:
return self.add_selections([new_selection])
@staticmethod
def remove_selections(select: Selection, selections_to_remove: list[Selection]) -> Selection:
def combine(select: Selection, selection_to_remove: Selection) -> Optional[Selection]:
if selection_to_remove.selection == []:
return None
else:
return Selection.remove_selections(select, selection_to_remove.selection)
return Selection(
fmeta=select.fmeta,
alias=select.alias,
arguments=select.arguments,
selection=filter_none(union(
select.selection,
selections_to_remove,
key=lambda s: s.fmeta.name,
combine=combine
))
)
@staticmethod
def remove_selection(select: Selection, selection_to_remove: Selection) -> Selection:
return Selection.remove_selections(select, [selection_to_remove])
@staticmethod
def combine(select: Selection, other: Selection) -> Selection:
if select.key != select.key:
raise Exception(f"Selection.combine: {select.key} != {select.key}")
return Selection(
fmeta=select.fmeta,
alias=select.alias,
arguments=select.arguments,
selection=filter_none(union(
select.selection,
other.selection,
key=lambda select: select.fmeta.name,
combine=Selection.combine
))
)
@staticmethod
def consolidate(selections: list[Selection]) -> list[Selection]:
def f(selections: list[Selection], other: Selection) -> list[Selection]:
try:
next(selections | where(lambda select: select.key == other.key))
return list(selections | map(lambda select: Selection.combine(select, other) if select.key == other.key else select))
except StopIteration:
return selections + [other]
return reduce(f, selections, [])
@staticmethod
def contains(select: Selection, other: Selection) -> bool:
if (select.fmeta == other.fmeta and rel_complement(other.selection, select.selection, key=lambda s: s.fmeta.name) == []):
return all(
other.selection
| map(lambda s: Selection.contains(next(filter(lambda s_: s.fmeta.name == s_.fmeta.name, select.selection)), s))
)
else:
return False
@staticmethod
def contains_argument(select: Selection, arg_name: str) -> bool:
try:
next(filter(lambda arg: arg.name == arg_name, select.arguments))
return True
except StopIteration:
return any(select.selection | map(partial(Selection.contains_argument, arg_name=arg_name)))
@staticmethod
def get_argument(select: Selection, target: str) -> Optional[Argument]:
try:
return next(select.arguments | where(lambda arg: arg.name == target))
except StopIteration:
try:
return next(
select.selection
| map(partial(Selection.get_argument, target=target))
| where(lambda x: x is not None)
)
except StopIteration:
return None
@staticmethod
def substitute_arg(select: Selection, arg_name: str, replacement: Argument | list[Argument]) -> Selection:
return Selection(
fmeta=select.fmeta,
alias=select.alias,
arguments=list(
select.arguments
| map(lambda arg: replacement if arg.name == arg_name else arg)
| traverse
),
selection=list(
select.selection
| map(partial(Selection.substitute_arg, arg_name=arg_name, replacement=replacement))
)
)
def select(self: Selection, other: Selection) -> Selection:
if other.selection == []:
return self
else:
return Selection(
fmeta=self.fmeta,
alias=self.alias,
arguments=self.arguments,
selection=list(
other.selection
| map(lambda s: next(
self.selection
| where(lambda s_: s_.fmeta.name == s.fmeta.name)
| map(lambda s_: Selection.select(s_, s))
| take(1)
))
)
)
# TODO: Function to recover an approximate selection from a JSON data object
@staticmethod
def of_json(data: dict) -> Selection:
pass
@dataclass(frozen=True)
class Query:
name: Optional[str] = None
selection: list[Selection] = field(default_factory=list)
# Variables as query arguments, not the values of those variables
# NOTE: Temporarily add the values with the definitions
variables: list[VariableDefinition] = field(default_factory=list)
@property
def graphql(self) -> str:
""" Returns a string containing a GraphQL query matching the current query
Returns:
str: The string containing the GraphQL query
"""
selection_str = "\n".join(
[select.graphql(level=1) for select in self.selection]
)
if len(self.variables) > 0:
args_str = f'({", ".join([vardef.graphql for vardef in self.variables])})'
else:
args_str = ''
return f'query{args_str} {{\n{selection_str}\n}}'
def add_selections(self: Query, new_selections: list[Selection]) -> Query:
""" Returns a new Query containing all selections in 'query' along with
the new selections in `new_selections`
Args:
self (Query): The query to which new selections are to be added
new_selections (list[Selection]): The new selections to be added to the query
Returns:
Query: A new `Query` objects containing all selections
"""
return Query(
name=self.name,
selection=union(
self.selection,
new_selections,
key=lambda select: select.key,
combine=Selection.combine
)
)
@staticmethod
def add_selection(query: Query, new_selection: Selection) -> Query:
""" Same as `add_selections`, but for a single `new_selection`.
Args:
query (Query): The query to which new selections are to be added
new_selection (Selection): The new selection to be added to the query
Returns:
Query: A new `Query` objects containing all selections
"""
return Query.add_selections(query, [new_selection])
@staticmethod
def remove_selections(query: Query, selections_to_remove: list[Selection]) -> Query:
""" Returns a new `Query` object containing all selections in `query` minus the selections
sepcified in `selections_to_remove`.
Note: Selections in `selections_to_remove` do not need to be "full" selections (i.e.: a selections all the way to
leaves of the GraphQL schema).
Args:
query (Query): The query to which selections have to be removed
selections_to_remove (list[Selection]): The selections to remove from the query
Returns:
Query: A new `Query` object containing the original query selections without the
selections in `selections_to_remove`
"""
def combine(select: Selection, selection_to_remove: Selection) -> Optional[Selection]:
if selection_to_remove.selection == []:
return None
else:
return Selection.remove_selections(select, selection_to_remove.selection)
return Query(
name=query.name,
selection=filter_none(union(
query.selection,
selections_to_remove,
key=lambda s: s.fmeta.name,
combine=combine
)),
variables=query.variables
)
@staticmethod
def remove_selection(query: Query, selection_to_remove: Selection) -> Query:
""" Same as `remove_selections` but for a single selection
Note: `selection_to_remove` does not need to be a "full" selection (i.e.: a selection all the way to
leaves of the GraphQL schema).
Example:
```python
expected = Selection(TypeMeta.FieldMeta('pair', '', [], TypeRef.non_null_list('Pair')), None, [], [])
og_selection = Selection(TypeMeta.FieldMeta('pair', '', [], TypeRef.non_null_list('Pair')), None, [], [
Selection(TypeMeta.FieldMeta('token0', '', [], TypeRef.Named('Token')), None, [], [
Selection(TypeMeta.FieldMeta('id', '', [], TypeRef.Named('String')), None, [], []),
Selection(TypeMeta.FieldMeta('name', '', [], TypeRef.Named('String')), None, [], []),
Selection(TypeMeta.FieldMeta('symbol', '', [], TypeRef.Named('String')), None, [], []),
])
])
selection_to_remove = Selection(TypeMeta.FieldMeta('token0', '', [], TypeRef.Named('Token')), None, [], [])
new_selection = Selection.remove_selection(og_selection, selection_to_remove)
self.assertEqual(new_selection, expected)
```
Args:
query (Query): The query to which a selection has to be removed
selection_to_remove (Selection): The selection to remove from the query
Returns:
Query: A new `Query` object containing the original query selections without the
selection `selection_to_remove`
"""
return Query.remove_selections(query, [selection_to_remove])
@staticmethod
def remove(query: Query, other: Query) -> Query:
""" Same as `remove_selections` but takes another `Query` object as argument
instead of a list of selections
Note: `other` does not need to include "full" selections (i.e.: selections all the way to
leaves of the GraphQL schema).
Args:
query (Query): The query for which selections are to be removed
other (Query): A query containing selections that will be removed from `query`
Returns:
Query: A new `Query` object containing the original query selections without the
selections in `other`
"""
return reduce(Query.remove_selection, other.selection, query)
@staticmethod
def combine(query: Query, other: Query) -> Query:
""" Returns a new `Query` object containing the selections of both `query` and `other`
Args:
query (Query): A `Query` object
other (Query): Another `Query` object
Returns:
Query: A new `Query` object containing the selections of both `query` and `other`
"""
return Query(
name=query.name,
selection=union(
query.selection,
other.selection,
key=lambda select: select.key,
combine=Selection.combine
)
)
@staticmethod
def transform(
query: Query,
variable_f: Callable[[VariableDefinition], VariableDefinition] = identity,
selection_f: Callable[[Selection], Selection] = identity
) -> Query:
return reduce(Query.add_selection, query.selection | map(selection_f) | traverse, Query(
name=query.name,
variables=list(query.variables | map(variable_f) | traverse)
))
@staticmethod
def contains_selection(query: Query, selection: Selection) -> bool:
""" Returns True i.f.f. the `selection` is present in `query`
Args:
query (Query): A query object
selection (Selection): The selection to be found (or not) in `query`
Returns:
bool: True if the `selection` is present in `query`, otherwise False
"""
return any(
query.selection
| map(lambda select: Selection.contains(select, selection))
)
@staticmethod
def contains_argument(query: Query, arg_name: str) -> bool:
return any(query.selection | map(partial(Selection.contains_argument, arg_name=arg_name)))
@staticmethod
def get_argument(query: Query, target: str) -> Optional[Argument]:
try:
return next(
query.selection
| map(partial(Selection.get_argument, target=target))
| where(lambda x: x is not None)
)
except StopIteration:
return None
@staticmethod
def substitute_arg(query: Query, arg_name: str, replacement: Argument | list[Argument]) -> Query:
return Query(
name=query.name,
selection=list(
query.selection
| map(partial(Selection.substitute_arg, arg_name=arg_name, replacement=replacement))
),
variables=query.variables
)
@staticmethod
def contains(query: Query, other: Query) -> bool:
""" Returns True i.f.f. all selections in `other` are contained in `query`. In other words,
returns true i.f.f. `other` is a subset of `query`.
Note: `other` does not need to include "full" selections (i.e.: selections all the way to
leaves of the GraphQL schema).
Args:
query (Query): The query that is to be checked
other (Query): The query that has to be in `query`
Returns:
bool: True i.f.f. all selections in `other` are contained in `query`, otherwise False
"""
return all(other.selection | map(partial(Query.contains_selection, query)))
@staticmethod
def select(query: Query, other: Query) -> Query:
""" Returns a new Query
Args:
query (Query): [description]
other (Query): [description]
Returns:
Query: [description]
"""
return Query(
name=query.name,
selection=list(
other.selection
| map(lambda s: next(
query.selection
| where(lambda s_: s_.fmeta.name == s.fmeta.name)
| map(lambda s_: Selection.select(s_, s))
| take(1)
))
),
variables=query.variables
)
@dataclass(frozen=True)
class Fragment:
name: str
type_: TypeRef.T
selection: list[Selection] = field(default_factory=list)
# Variables as fragment arguments, not the values of those variables
variables: list[VariableDefinition] = field(default_factory=list)
@property
def graphql(self):
selection_str = "\n".join(
[select.graphql(level=1) for select in self.selection]
)
return f"""fragment {self.name} on {TypeRef.root_type_name(self.type_)} {{\n{selection_str}\n}}"""
@staticmethod
def combine(frag: Fragment, other: Fragment) -> Fragment:
pass
@staticmethod
def transform(frag: Fragment, f: Callable[[Selection], Selection]) -> Fragment:
return Fragment(
name=frag.name,
type_=frag.type_,
selection=list(frag.selection | map(f))
)
@dataclass(frozen=True)
class Document:
url: str
query: Optional[Query]
fragments: list[Fragment] = field(default_factory=list)
# A list of variable assignments. For non-repeating queries
# the list would be of length 1 (i.e.: only one set of query variable assignments)
variables: dict[str, Any] = field(default_factory=dict)
@property
def graphql(self):
return '\n'.join([self.query.graphql, *list(self.fragments | map(lambda frag: frag.graphql))])
@staticmethod
def mk_single_query(url: str, query: Query) -> Document:
return Document(url, [query])
@staticmethod
def combine(doc: Document, other: Document) -> Document:
return Document(
url=doc.url,
query=doc.query.combine(other.query),
fragments=union(
doc.fragments,
other.fragments,
key=lambda frag: frag.name,
combine=Fragment.combine
)
)
@staticmethod
def transform(
doc: Document,
query_f: Callable[[Query], Query] = identity,
fragment_f: Callable[[Fragment], Fragment] = identity
) -> Document:
return Document(
url=doc.url,
query=query_f(doc.query),
fragments=list(doc.fragments | map(fragment_f)),
variables=doc.variables
)
@dataclass(frozen=True)
class DataRequest:
documents: list[Document] = field(default_factory=list)
@property
def graphql(self):
return '\n'.join(list(self.documents | map(lambda doc: doc.graphql)))
@staticmethod
def combine(req: DataRequest, other: DataRequest) -> None:
return DataRequest(
documents=union(
req.documents,
other.documents,
key=lambda doc: doc.url,
combine=Document.combine
)
)
@staticmethod
def transform(req: DataRequest, f: Callable[[Document], Document]) -> DataRequest:
return DataRequest(
documents=list(req.documents | map(f))
)
@staticmethod
def single_query(url: str, query: Query) -> DataRequest:
return DataRequest([
Document(url, query)
])
@staticmethod
def single_document(doc: Document) -> DataRequest:
return DataRequest([doc])
@staticmethod
def add_documents(self: DataRequest, docs: Document | list[Document]) -> DataRequest:
return DataRequest(list([self.documents, docs] | traverse))
# ================================================================
# Utility functions
# ================================================================
def input_value_of_string(type_: TypeRef.T, value: str) -> InputValue:
match type_:
case TypeRef.Named("ID"):
return InputValue.String(value)
case TypeRef.Named("Int"):
return InputValue.Int(int(value))
case TypeRef.Named("BigInt"):
return InputValue.String(value)
case (TypeRef.Named("Float")):
return InputValue.Float(float(value))
case (TypeRef.Named("BigDecimal")):
return InputValue.String(value)
case (TypeRef.Named("Boolean")):
return InputValue.Boolean(bool(value))
case (TypeRef.Named("String" | "Bytes")):
return InputValue.String(value)
case (TypeRef.Named()):
return InputValue.Enum(value)
case type_:
raise TypeError(f"input_value_of_string: invalid type {type_}")
def input_value_of_value(type_: TypeRef.T, value: Any) -> InputValue:
match type_:
case (TypeRef.Named("ID"), _, str()):
return InputValue.String(str(value))
case TypeRef.Named("Int"):
return InputValue.Int(int(value))
case TypeRef.Named("BigInt"):
return InputValue.String(str(value))
case (TypeRef.Named("Float")):
return InputValue.Float(float(value))
case (TypeRef.Named("BigDecimal")):
return InputValue.String(str(value))
case (TypeRef.Named("Boolean")):
return InputValue.Boolean(bool(value))
case (TypeRef.Named("String" | "Bytes")):
return InputValue.String(str(value))
case (TypeRef.Named()):
return InputValue.Enum(str(value))
case type_:
raise TypeError(f"input_value_of_value: invalid type {type_}")
def input_value_of_argument(
schema: SchemaMeta,
meta: TypeMeta,
value: Any
) -> InputValue:
def fmt_value(type_ref: TypeRef.T, value: Any, non_null=False):
match (type_ref, schema.type_map[TypeRef.root_type_name(type_ref)], value):
# Only allow Null values when non_null=True
case (_, _, None):
if not non_null:
return InputValue.Null()
else:
raise TypeError(f"Argument {meta.name} cannot be None!")
# If type is non_null, recurse with non_null=True
case (TypeRef.NonNull(t), _, _):
return fmt_value(t, value, non_null=True)
case (TypeRef.Named("ID"), _, str()):
return InputValue.String(value)
case (TypeRef.Named("Int"), _, int()):
return InputValue.Int(value)
case (TypeRef.Named("BigInt"), _, int()):
return InputValue.String(str(value))
case (TypeRef.Named("Float"), _, int() | float()):
return InputValue.Float(float(value))
case (TypeRef.Named("BigDecimal"), _, int() | float()):
return InputValue.String(str(float(value)))
case (TypeRef.Named("String" | "Bytes"), _, str()):
return InputValue.String(value)
case (TypeRef.Named(), TypeMeta.EnumMeta(_), str()):
return InputValue.Enum(value)
case (TypeRef.Named("Boolean"), _, bool()):
return InputValue.Boolean(value)
case (TypeRef.List(t), _, list()):
return InputValue.List([fmt_value(t, val, non_null) for val in value])
case (TypeRef.Named(), TypeMeta.InputObjectMeta() as input_object, dict()):
return InputValue.Object({key: fmt_value(typeref_of_input_field(input_object, key), val, non_null) for key, val in value.items()})
case (value, typ, non_null):
raise TypeError(f"mk_input_value({value}, {typ}, {non_null})")
match meta:
case TypeMeta.ArgumentMeta(type_=type_):
return fmt_value(type_, value)
case _:
raise TypeError(f"input_value_of_argument: TypeMeta {meta.name} is not of type TypeMeta.ArgumentMeta")
def add_object_field(
object_: TypeMeta.ObjectMeta | TypeMeta.InterfaceMeta,
field: TypeMeta.FieldMeta
) -> None:
object_.fields.append(field)
def arguments_of_field_args(
schema: SchemaMeta,
field: TypeMeta.FieldMeta,
args: Optional[dict[str, Any]]
) -> list[Argument]:
if args is None:
args = {}
def f(arg_meta: TypeMeta.ArgumentMeta) -> Optional[Argument]:
if arg_meta.name in args:
return Argument(
arg_meta.name,
input_value_of_argument(schema, arg_meta, args[arg_meta.name])
)
else:
if (arg_meta.default_value) or (not TypeRef.is_non_null(arg_meta.type_)):
return None
else:
raise TypeError(f"arguments_of_field_args: Argument {arg_meta.name} of field {field.name} is required but not provided!")
# TODO: Add warnings if arguments are not used
match field:
case TypeMeta.FieldMeta() as field:
args = [f(arg_meta) for arg_meta in field.arguments]
return list(filter(lambda arg: arg is not None, args))
case _:
raise TypeError(f"arguments_of_field_args: TypeMeta {field.name} is not of type FieldMeta")
def selection_of_path(
schema: SchemaMeta,
fpath: list[Tuple[Optional[dict[str, Any]], TypeMeta.FieldMeta]]
) -> list[Selection]:
match fpath:
case [(args, TypeMeta.FieldMeta() as fmeta), *rest]:
return [Selection(
fmeta,
arguments=arguments_of_field_args(schema, fmeta, args),
selection=selection_of_path(schema, rest)
)]
case []:
return []
def pagination_args(page_size: int, num_entities: int) -> list[dict[str, int]]:
num_pages = math.ceil(num_entities / page_size)
return [
{'first': num_entities % page_size, 'skip': i * page_size} if (i == num_pages - 1 and num_entities % page_size != 0)
else {'first': page_size, 'skip': i * page_size}
for i in range(0, num_pages)
]
| 30.21688 | 140 | 0.651416 |
acee8cfc0bf33610b3a9af17be9515381813b9fc | 1,064 | py | Python | test/test_app.py | ayushmaskey/centricity_io | 7579ee06e8fe720dd83fb0c095d5455e5d4a0af5 | [
"MIT"
] | 1 | 2020-02-20T00:43:38.000Z | 2020-02-20T00:43:38.000Z | test/test_app.py | ayushmaskey/centricity_io | 7579ee06e8fe720dd83fb0c095d5455e5d4a0af5 | [
"MIT"
] | null | null | null | test/test_app.py | ayushmaskey/centricity_io | 7579ee06e8fe720dd83fb0c095d5455e5d4a0af5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import unittest
import context
import app.sql.sql_connection as sql
import app.sql.sql_result_to_string as sql_str
# import sys
# print(sys.path)
class unittest_app(unittest.TestCase):
def test(self):
pass
""" unit test for sql_connection.py """
def test_sql_result_type_check(self):
expectation = 'amaskey'
q = "select LoginUser from DoctorFacility where LoginUser = '%s'" % (expectation)
rows = sql.sqlQuery(q)
self.assertEqual(type(rows), list)
def test_sql_conn(self):
expectation = 'amaskey'
q = "select LoginUser from DoctorFacility where LoginUser = '%s'" % (expectation)
rows = sql.sqlQuery(q)
for row in rows:
result = row[0].lower()
break
self.assertEqual(result, expectation)
""" unit test for sql_result_to_string.py """
def test_sql_result_comma_separate_first_column(self):
rows = [('amaskey', 'it'), ('jeff', )]
expectation = "amaskey,jeff"
str1 = sql_str.sql_result_comma_separate_first_column(rows)
self.assertEqual(str1, expectation)
if __name__ == '__main__':
unittest.main()
| 25.333333 | 83 | 0.728383 |
acee8de4b016c5ab37d41669ea0bace5c64831f1 | 1,128 | py | Python | src/compas_fab/ghpython/components/Cf_AttachTool/code.py | claimHF/compas_fab | 3efe608c07dc5b08653ee4132a780a3be9fb93af | [
"MIT"
] | null | null | null | src/compas_fab/ghpython/components/Cf_AttachTool/code.py | claimHF/compas_fab | 3efe608c07dc5b08653ee4132a780a3be9fb93af | [
"MIT"
] | null | null | null | src/compas_fab/ghpython/components/Cf_AttachTool/code.py | claimHF/compas_fab | 3efe608c07dc5b08653ee4132a780a3be9fb93af | [
"MIT"
] | null | null | null | """
Attach a tool to the robot.
COMPAS FAB v0.22.0
"""
from ghpythonlib.componentbase import executingcomponent as component
from compas_rhino.conversions import RhinoMesh
from compas_rhino.conversions import plane_to_compas_frame
from compas.geometry import Frame
from compas_fab.robots import PlanningScene
from compas_fab.robots import Tool
class AttachToolComponent(component):
def RunScript(self, robot, visual_mesh, collision_mesh, tcf_plane):
if robot and robot.client and robot.client.is_connected and visual_mesh:
if not collision_mesh:
collision_mesh = visual_mesh
c_visual_mesh = RhinoMesh.from_geometry(visual_mesh).to_compas()
c_collision_mesh = RhinoMesh.from_geometry(collision_mesh).to_compas()
if not tcf_plane:
frame = Frame.worldXY()
else:
frame = plane_to_compas_frame(tcf_plane)
tool = Tool(c_visual_mesh, frame, c_collision_mesh)
scene = PlanningScene(robot)
robot.attach_tool(tool)
scene.add_attached_tool()
return robot
| 32.228571 | 82 | 0.699468 |
acee8e12c361656e23c6b07584beaa97a94bbcf7 | 7,064 | py | Python | methods/meta_template.py | DingYuan0118/Meta-Fine-Tuning | 531b7418420c072844216ec5217f1f03f6419a79 | [
"MIT"
] | null | null | null | methods/meta_template.py | DingYuan0118/Meta-Fine-Tuning | 531b7418420c072844216ec5217f1f03f6419a79 | [
"MIT"
] | null | null | null | methods/meta_template.py | DingYuan0118/Meta-Fine-Tuning | 531b7418420c072844216ec5217f1f03f6419a79 | [
"MIT"
] | null | null | null | import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import utils
from abc import abstractmethod
class MetaTemplate(nn.Module):
def __init__(self, model_func, n_way, n_support, change_way = True):
super(MetaTemplate, self).__init__()
self.n_way = n_way
self.n_support = n_support
self.n_query = -1 #(change depends on input)
self.freeze_backbone = False
self.feature = model_func()
self.feat_dim = self.feature.final_feat_dim
self.change_way = change_way #some methods allow different_way classification during training and test
@abstractmethod
def set_forward(self,x,is_feature):
pass
@abstractmethod
def set_forward_loss(self, x):
pass
def forward(self,x):
out = self.feature.forward(x)
return out
def parse_feature(self,x,is_feature):
x = Variable(x.cuda())
if is_feature:
z_all = x
else:
if self.freeze_backbone:
for params in self.feature.parameters():
params.requires_grad = False
x = x.contiguous().view( self.n_way * (self.n_support + self.n_query), *x.size()[2:])
z_all = self.feature.forward(x)
z_all = z_all.view( self.n_way, self.n_support + self.n_query, -1)
z_support = z_all[:, :self.n_support]
z_query = z_all[:, self.n_support:]
return z_support, z_query
def correct(self, x):
scores = self.set_forward(x)
y_query = np.repeat(range( self.n_way ), self.n_query )
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
return float(top1_correct), len(y_query)
def train_loop(self, epoch, train_loader, optimizer ):
print_freq = 10
avg_loss=0
for i, (x,_ ) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss = self.set_forward_loss( x )
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
def train_loop2(self, epoch, train_loader, optimizer ):
print_freq = 10
avg_loss=0
for i, (x,_ ) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss = self.set_forward_loss( x )
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
def train_loop_finetune(self, epoch, train_loader, optimizer ):
print_freq = 10
avg_loss=0
for i, (x,_ ) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss = self.set_forward_loss_finetune( x )
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
def train_loop3(self, epoch, train_loader, optimizer, unsup_loader):
print_freq = 10
avg_loss=0
for i, (x,_ ) in enumerate(train_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss = self.set_forward_loss( x )
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
if i % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1)))
def test_loop(self, test_loader, record = None):
correct =0
count = 0
acc_all = []
iter_num = len(test_loader)
for i, (x,_) in enumerate(test_loader):
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
correct_this, count_this = self.correct(x)
acc_all.append(correct_this/ count_this*100 )
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
return acc_mean
def set_forward_adaptation(self, x, is_feature = True): #further adaptation, default is fixing feature and train a new softmax clasifier
assert is_feature == True, 'Feature is fixed in further adaptation'
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous().view(self.n_way* self.n_support, -1 )
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
y_support = torch.from_numpy(np.repeat(range( self.n_way ), self.n_support ))
y_support = Variable(y_support.cuda())
linear_clf = nn.Linear(self.feat_dim, self.n_way)
linear_clf = linear_clf.cuda()
set_optimizer = torch.optim.SGD(linear_clf.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
loss_function = nn.CrossEntropyLoss()
loss_function = loss_function.cuda()
batch_size = 4
support_size = self.n_way* self.n_support
for epoch in range(100):
rand_id = np.random.permutation(support_size)
for i in range(0, support_size , batch_size):
set_optimizer.zero_grad()
selected_id = torch.from_numpy( rand_id[i: min(i+batch_size, support_size) ]).cuda()
z_batch = z_support[selected_id]
y_batch = y_support[selected_id]
scores = linear_clf(z_batch)
loss = loss_function(scores,y_batch)
loss.backward()
set_optimizer.step()
scores = linear_clf(z_query)
return scores
| 37.775401 | 140 | 0.57517 |
acee8e209610ce111e4cb19f8a99e60fbf32b49d | 3,440 | py | Python | homeassistant/components/vesync/common.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 1,635 | 2015-01-01T14:59:18.000Z | 2016-04-13T02:36:16.000Z | homeassistant/components/vesync/common.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 1,463 | 2015-01-06T06:18:07.000Z | 2016-04-12T22:30:37.000Z | homeassistant/components/vesync/common.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 659 | 2015-01-05T14:02:23.000Z | 2016-04-12T23:39:31.000Z | """Common utilities for VeSync Component."""
import logging
from homeassistant.helpers.entity import Entity, ToggleEntity
from .const import DOMAIN, VS_FANS, VS_LIGHTS, VS_SENSORS, VS_SWITCHES
_LOGGER = logging.getLogger(__name__)
async def async_process_devices(hass, manager):
"""Assign devices to proper component."""
devices = {}
devices[VS_SWITCHES] = []
devices[VS_FANS] = []
devices[VS_LIGHTS] = []
devices[VS_SENSORS] = []
await hass.async_add_executor_job(manager.update)
if manager.fans:
devices[VS_FANS].extend(manager.fans)
# Expose fan sensors separately
devices[VS_SENSORS].extend(manager.fans)
_LOGGER.info("%d VeSync fans found", len(manager.fans))
if manager.bulbs:
devices[VS_LIGHTS].extend(manager.bulbs)
_LOGGER.info("%d VeSync lights found", len(manager.bulbs))
if manager.outlets:
devices[VS_SWITCHES].extend(manager.outlets)
# Expose outlets' power & energy usage as separate sensors
devices[VS_SENSORS].extend(manager.outlets)
_LOGGER.info("%d VeSync outlets found", len(manager.outlets))
if manager.switches:
for switch in manager.switches:
if not switch.is_dimmable():
devices[VS_SWITCHES].append(switch)
else:
devices[VS_LIGHTS].append(switch)
_LOGGER.info("%d VeSync switches found", len(manager.switches))
return devices
class VeSyncBaseEntity(Entity):
"""Base class for VeSync Entity Representations."""
def __init__(self, device):
"""Initialize the VeSync device."""
self.device = device
self._attr_unique_id = self.base_unique_id
self._attr_name = self.base_name
@property
def base_unique_id(self):
"""Return the ID of this device."""
# The unique_id property may be overridden in subclasses, such as in
# sensors. Maintaining base_unique_id allows us to group related
# entities under a single device.
if isinstance(self.device.sub_device_no, int):
return f"{self.device.cid}{str(self.device.sub_device_no)}"
return self.device.cid
@property
def base_name(self):
"""Return the name of the device."""
# Same story here as `base_unique_id` above
return self.device.device_name
@property
def available(self) -> bool:
"""Return True if device is available."""
return self.device.connection_status == "online"
@property
def device_info(self):
"""Return device information."""
return {
"identifiers": {(DOMAIN, self.base_unique_id)},
"name": self.base_name,
"model": self.device.device_type,
"default_manufacturer": "VeSync",
"sw_version": self.device.current_firm_version,
}
def update(self):
"""Update vesync device."""
self.device.update()
class VeSyncDevice(VeSyncBaseEntity, ToggleEntity):
"""Base class for VeSync Device Representations."""
@property
def details(self):
"""Provide access to the device details dictionary."""
return self.device.details
@property
def is_on(self):
"""Return True if device is on."""
return self.device.device_status == "on"
def turn_off(self, **kwargs):
"""Turn the device off."""
self.device.turn_off()
| 31.272727 | 76 | 0.644186 |
acee8e33e3b771605ab03d0ee63590482a0ce8ac | 58,685 | py | Python | src/pycropml/transpiler/generators/siriusGenerator.py | brichet/PyCrop2ML | 7177996f72a8d95fdbabb772a16f1fd87b1d033e | [
"MIT"
] | 5 | 2020-06-21T18:58:04.000Z | 2022-01-29T21:32:28.000Z | src/pycropml/transpiler/generators/siriusGenerator.py | brichet/PyCrop2ML | 7177996f72a8d95fdbabb772a16f1fd87b1d033e | [
"MIT"
] | 27 | 2018-12-04T15:35:44.000Z | 2022-03-11T08:25:03.000Z | src/pycropml/transpiler/generators/siriusGenerator.py | brichet/PyCrop2ML | 7177996f72a8d95fdbabb772a16f1fd87b1d033e | [
"MIT"
] | 7 | 2019-04-20T02:25:22.000Z | 2021-11-04T07:52:35.000Z | # coding: utf8
from pycropml.transpiler.codeGenerator import CodeGenerator
from pycropml.transpiler.rules.csharpRules import CsharpRules
from pycropml.transpiler.generators.docGenerator import DocGenerator
from pycropml.transpiler.pseudo_tree import Node
import os
from path import Path
from pycropml.transpiler.Parser import parser
from pycropml.transpiler.ast_transform import AstTransformer, transform_to_syntax_tree
from pycropml import code2nbk
from pycropml.transpiler.generators.csharpGenerator import CsharpGenerator, CsharpTrans,CsharpCompo
category = {"state":"s", "rate":"r", "auxiliary":"a", "exogenous":"ex"}
param_datatype ={"STRING":"String",
"INT":"Integer",
"DOUBLE":"Double",
"BOOLEAN":"Boolean",
"DATE":"Date",
"DATELIST":"ListDate",
"STRINGLIST": "ListString",
"DOUBLELIST": "ListDouble",
"INTLIST": "ListInteger",
"BOOLEANLIST": "ListBoolean"}
def getdefault(x):
if "default" in dir(x):
if x.datatype == "STRING": return '"%s"'%x.default if x.default is not None else "-1D"
elif x.datatype == "BOOLEAN": return x.default.lower() if x.default is not None else "-1D"
elif x.datatype in ("DATE", "DATELIST"): return "-1D"
elif x.datatype.endswith("LIST"): return x.default if x.default is not None else "-1D"
else: return x.default if x.default is not None else "-1D"
else: return "-1D"
class SiriusGenerator(CsharpGenerator):
""" This class contains the specific properties of
SQ components and use the NodeVisitor to generate a csharp
code source from a well formed syntax tree based on BioMa structure.
"""
def __init__(self, tree=None, model=None, name=None):
self.tree = tree
self.model=model
self.name = name
self.indent_with=' '*4
CsharpGenerator.__init__(self, tree, model, name)
self.usingBioma()
def usingBioma(self):
self.write("""
using System;
using System.Collections.Generic;
using System.Linq;
using System.Xml;
using CRA.ModelLayer.MetadataTypes;
using CRA.ModelLayer.Core;
using CRA.ModelLayer.Strategy;
using System.Reflection;
using VarInfo=CRA.ModelLayer.Core.VarInfo;
using Preconditions=CRA.ModelLayer.Core.Preconditions;
using CRA.AgroManagement;
""")
def desc(self,node, n, inp, vartype) :
self.write("PropertyDescription pd%s = new PropertyDescription();"%n)
self.newline(node)
self.write("pd%s.DomainClassType = typeof(SiriusQuality%s.DomainClass.%s%s);"%(n, self.name,self.name,inp.variablecategory.capitalize()))
self.newline(node)
self.write('pd%s.PropertyName = "%s";'%(n,inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('pd%s.PropertyType = (SiriusQuality%s.DomainClass.%s%sVarInfo.%s).ValueType.TypeForCurrentValue;'%(n, self.name,self.name, inp.variablecategory.capitalize(),inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('pd%s.PropertyVarInfo =(SiriusQuality%s.DomainClass.%s%sVarInfo.%s);'%(n,self.name,self.name, inp.variablecategory.capitalize(),inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('%s.Add(pd%s);'%(vartype, n))
def inOutputDesc(self, node):
self.newline(node)
self.write("ModellingOptions mo0_0 = new ModellingOptions();")
self.newline(node)
self.write("//Parameters")
self.newline(node)
self.write("List<VarInfo> _parameters0_0 = new List<VarInfo>();")
self.newline(node)
n = 1
for p in self.model.parameters:
self.write("VarInfo v%s = new VarInfo();"%n)
self.newline(node)
self.write("v%s.DefaultValue = %s;"%(n, getdefault(p)))
self.newline(node)
self.write('v%s.Description = "%s";'%(n,p.description))
self.newline(node)
self.write("v%s.Id = 0;"%n)
self.newline(node)
self.write("v%s.MaxValue = %s;"%(n, p.max if p.max is not None else getdefault(p)))
self.newline(node)
self.write("v%s.MinValue = %s;"%(n, p.min if p.min is not None else getdefault(p)))
self.newline(node)
self.write('v%s.Name = "%s";'%(n, p.name))
self.newline(node)
self.write("v%s.Size = 1;"%n)
self.newline(node)
self.write('v%s.Units = "%s";'%(n, p.unit if ("unit" in dir(p) and p.unit is not None) else "dimensionless"))
self.newline(node)
self.write('v%s.URL = "%s";'%(n, p.url if ("url" in dir(p) and p.url is not None) else ""))
self.newline(node)
self.write("v%s.VarType = CRA.ModelLayer.Core.VarInfo.Type.PARAMETER;"%n)
self.newline(node)
self.write('v%s.ValueType = VarInfoValueTypes.GetInstanceForName("%s");'%(n, param_datatype[p.datatype]))
self.newline(node)
self.write("_parameters0_0.Add(v%s);"%n)
self.newline(node)
n = n+1
self.write("mo0_0.Parameters=_parameters0_0;")
n = 1
self.newline(extra=1)
self.write("//Inputs")
self.newline(node)
self.write("List<PropertyDescription> _inputs0_0 = new List<PropertyDescription>();"%())
self.newline(node)
for inp in self.model.inputs:
if inp.name not in self.modparam :
self.desc(node,n,inp, "_inputs0_0")
self.write("")
self.newline(node)
n = n+1
self.write("mo0_0.Inputs=_inputs0_0;")
self.newline(extra=1)
self.write("//Outputs")
self.newline(node)
self.write("List<PropertyDescription> _outputs0_0 = new List<PropertyDescription>();")
self.newline(node)
for out in self.model.outputs:
if out.name not in self.modparam :
self.desc(node,n,out,"_outputs0_0")
self.newline(node)
n = n+1
self.write("mo0_0.Outputs=_outputs0_0;")
self.newline(node)
self.otherDesc(node)
self.newline(extra=1)
def otherDesc(self,node):
self.write("//Associated strategies")
self.newline(node)
self.write('List<string> lAssStrat0_0 = new List<string>();')
self.newline(node)
self.write("mo0_0.AssociatedStrategies = lAssStrat0_0;")
self.newline(node)
self.write("//Adding the modeling options to the modeling options manager")
self.newline(node)
self.write("_modellingOptionsManager = new ModellingOptionsManager(mo0_0);")
self.newline(node)
self.write("SetStaticParametersVarInfoDefinitions();")
self.newline(node)
self.write("SetPublisherData();")
self.newline(extra=1)
def description(self, node):
self.write("public string Description")
self.open(node)
self.write('get { return "%s" ;}'%self.model.description.Abstract.replace("\n", ""))
self.close(node)
self.newline(extra=1)
def url(self, node):
self.write("public string URL")
self.open(node)
self.write('get { return "%s" ;}'%(self.model.description.url if "url" in dir(self.model.description) else ""))
self.close(node)
self.newline(extra=1)
def domain(self, node):
self.write("public string Domain")
self.open(node)
self.write('get { return "";}')
self.close(node)
self.newline(extra=1)
def modelType(self, node):
self.write("public string ModelType")
self.open(node)
self.write('get { return "";}')
self.close(node)
self.newline(extra=1)
def isContext(self, node):
self.write("public bool IsContext")
self.open(node)
self.write('get { return false;}')
self.close(node)
self.newline(extra=1)
def isTimeStep(self, node):
self.write("public IList<int> TimeStep")
self.open(node)
self.write("get")
self.open(node)
self.write("IList<int> ts = new List<int>();")
self.newline(node)
self.write("return ts;")
self.close(node)
self.close(node)
self.newline(extra=1)
def publisherdata(self, node):
self.write("private PublisherData _pd;")
self.newline(node)
self.write("public PublisherData PublisherData")
self.open(node)
self.write("get { return _pd;} ")
self.close(node)
self.newline(extra=1)
def SetPublisherData(self, node):
self.write("private void SetPublisherData()")
self.open(node)
self.write("_pd = new CRA.ModelLayer.MetadataTypes.PublisherData();")
self.newline(node)
self.write('_pd.Add("Creator", "%s");'%self.model.description.Authors)
self.newline(node)
self.write('_pd.Add("Date", "");')
self.newline(node)
self.write('_pd.Add("Publisher", "%s");'%self.model.description.Institution)
self.close(node)
self.newline(extra=1)
def getStrategyDomainClassesTypes(self, node):
self.write("public IEnumerable<Type> GetStrategyDomainClassesTypes()")
self.open(node)
self.write("return new List<Type>() { typeof(SiriusQuality%s.DomainClass.%sState), typeof(SiriusQuality%s.DomainClass.%sState), typeof(SiriusQuality%s.DomainClass.%sRate), typeof(SiriusQuality%s.DomainClass.%sAuxiliary), typeof(SiriusQuality%s.DomainClass.%sExogenous)};"%(self.name, self.name,self.name, self.name,self.name, self.name,self.name, self.name,self.name, self.name))
self.close(node)
self.newline(extra=1)
def setParametersDefaultValue(self, node):
self.write("public void SetParametersDefaultValue()")
self.open(node)
self.write("_modellingOptionsManager.SetParametersDefaultValue();")
self.close(node)
self.newline(extra=1)
def varinfodef(self, node, pa):
self.write('%sVarInfo.Name = "%s";'%(pa.name, pa.name)); self.newline(node)
self.write('%sVarInfo.Description = "%s";'%(pa.name, pa.description)); self.newline(node)
self.write('%sVarInfo.MaxValue = %s;'%(pa.name,pa.max if pa.max is not None else getdefault(pa))); self.newline(node)
self.write('%sVarInfo.MinValue = %s;'%(pa.name, pa.min if pa.min is not None else getdefault(pa))); self.newline(node)
self.write('%sVarInfo.DefaultValue = %s;'%(pa.name, getdefault(pa))); self.newline(node)
self.write('%sVarInfo.Units = "%s";'%(pa.name, pa.unit if ("unit" in dir(pa) and pa.unit is not None) else "dimensionless")); self.newline(node)
self.write('%sVarInfo.ValueType = VarInfoValueTypes.GetInstanceForName("%s");'%(pa.name, param_datatype[pa.datatype])); self.newline(node)
def SetStaticParametersVarInfoDefinitions(self, node):
self.write("private static void SetStaticParametersVarInfoDefinitions()")
self.open(node)
for pa in self.model.parameters:
self.newline(extra=1)
self.varinfodef(node, pa)
self.close(node)
self.newline(extra=1)
def staticVarInfo(self, node):
for pa in self.model.parameters:
self.write("private static VarInfo _%sVarInfo = new VarInfo();"%pa.name)
self.newline(node)
self.write("public static VarInfo %sVarInfo"%pa.name)
self.open(node)
self.write("get { return _%sVarInfo;} "%pa.name)
self.close(node)
self.newline(extra=1)
def TestPostConditions(self, node):
self.write("public string TestPostConditions(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex,string callID)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.open(node)
self.write("try")
self.open(node)
self.write("//Set current values of the outputs to the static VarInfo representing the output properties of the domain classes")
for out in self.model.outputs:
self.newline(node)
self.write('SiriusQuality%s.DomainClass.%s%sVarInfo.%s.CurrentValue=%s.%s;'%(self.name, self.name, out.variablecategory.capitalize(),out.name,category[out.variablecategory], out.name))
self.newline(node)
self.write('ConditionsCollection prc = new ConditionsCollection();'); self.newline(node)
self.write('Preconditions pre = new Preconditions(); ' ); self.newline(node)
n = len(self.model.inputs) + 1
for out in self.model.outputs:
self.newline(node)
self.write("RangeBasedCondition r%s = new RangeBasedCondition(SiriusQuality%s.DomainClass.%s%sVarInfo.%s);"%(n,self.name, self.name, out.variablecategory.capitalize(),out.name)); self.newline(node)
self.write("if(r%s.ApplicableVarInfoValueTypes.Contains( SiriusQuality%s.DomainClass.%s%sVarInfo.%s.ValueType)){prc.AddCondition(r%s);}"%(n, self.name, self.name, out.variablecategory.capitalize(), out.name, n)); self.newline(node)
n = n+1
self.write('string postConditionsResult = pre.VerifyPostconditions(prc, callID); if (!string.IsNullOrEmpty(postConditionsResult)) { pre.TestsOut(postConditionsResult, true, "PostConditions errors in strategy " + this.GetType().Name); } return postConditionsResult;')
self.close(node)
self.newline(node)
self.write("catch (Exception exception)");
self.open(node)
self.write('string msg = "SiriusQuality.%s, " + this.GetType().Name + ": Unhandled exception running post-condition test. ";'%self.name); self.newline(node)
self.write('throw new Exception(msg, exception);'); self.newline(node)
self.close(node)
self.close(node)
self.newline(extra=1)
def TestPreConditions(self, node):
self.write("public string TestPreConditions(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex,string callID)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.open(node)
self.write("try")
self.open(node)
self.write("//Set current values of the inputs to the static VarInfo representing the inputs properties of the domain classes")
for inp in self.model.inputs:
if "variablecategory" in dir(inp):
self.newline(node)
self.write('SiriusQuality%s.DomainClass.%s%sVarInfo.%s.CurrentValue=%s.%s;'%(self.name, self.name, inp.variablecategory.capitalize(),inp.name if not inp.name.endswith("_t1") else inp.name[:-3],category[inp.variablecategory], inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('ConditionsCollection prc = new ConditionsCollection();'); self.newline(node)
self.write('Preconditions pre = new Preconditions(); ' ); self.newline(node)
n = 1
for inp in self.model.inputs:
if "variablecategory" in dir(inp):
self.newline(node)
self.write("RangeBasedCondition r%s = new RangeBasedCondition(SiriusQuality%s.DomainClass.%s%sVarInfo.%s);"%(n,self.name, self.name,inp.variablecategory.capitalize(), inp.name if not inp.name.endswith("_t1") else inp.name[:-3])); self.newline(node)
self.write("if(r%s.ApplicableVarInfoValueTypes.Contains( SiriusQuality%s.DomainClass.%s%sVarInfo.%s.ValueType)){prc.AddCondition(r%s);}"%(n, self.name, self.name, inp.variablecategory.capitalize(), inp.name if not inp.name.endswith("_t1") else inp.name[:-3], n)); self.newline(node)
n = n+1
self.newline(node)
for p in self.model.parameters:
self.write('prc.AddCondition(new RangeBasedCondition(_modellingOptionsManager.GetParameterByName("%s")));'%p.name)
self.newline(node)
self.write('string preConditionsResult = pre.VerifyPreconditions(prc, callID); if (!string.IsNullOrEmpty(preConditionsResult)) { pre.TestsOut(preConditionsResult, true, "PreConditions errors in strategy " + this.GetType().Name); } return preConditionsResult;')
self.close(node)
self.newline(node)
self.write("catch (Exception exception)");
self.open(node)
self.write('string msg = "SiriusQuality.%s, " + this.GetType().Name + ": Unhandled exception running pre-condition test. ";'%self.name); self.newline(node)
self.write('throw new Exception(msg, exception);'); self.newline(node)
self.close(node)
self.close(node)
self.newline(extra=1)
def estimate(self, node):
self.write("public void Estimate(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.open(node)
self.write("try")
self.open(node)
self.write("CalculateModel(s, s1, r, a, ex);")
self.close(node)
self.newline(node)
self.write("catch (Exception exception)")
self.open(node)
self.write('string msg = "Error in component SiriusQuality%s, strategy: " + this.GetType().Name + ": Unhandled exception running model. "+exception.GetType().FullName+" - "+exception.Message;'%self.name)
self.newline(node)
self.write('throw new Exception(msg, exception);')
self.close(node)
self.close(node)
self.newline(extra=1)
def get_set_param(self, node):
self.write("// Getter and setters for the value of the parameters of the strategy. The actual parameters are stored into the ModelingOptionsManager of the strategy.\n")
for arg in self.node_param :
self.newline(node)
self.write("public ")
self.visit_decl(arg.pseudo_type)
self.write(' ' +arg.name)
self.open(node)
self.write("get { ")
self.newline(node)
self.indentation += 1
self.write('VarInfo vi= _modellingOptionsManager.GetParameterByName("%s");'%arg.name)
self.newline(node)
self.write("if (vi != null && vi.CurrentValue!=null) return (")
self.visit_decl(arg.pseudo_type)
self.write(")vi.CurrentValue ;")
self.newline(node)
self.write('else throw new Exception("Parameter')
self.write(" '%s' not found (or found null) in strategy '%s'"%(arg.name,self.model.name))
self.write('");')
self.newline(node)
self.indentation -= 1
self.write('} set {')
self.newline(node)
self.indentation += 1
self.write('VarInfo vi = _modellingOptionsManager.GetParameterByName("%s");'%arg.name)
self.newline(node)
self.write('if (vi != null) vi.CurrentValue=value;')
self.newline(node)
self.write('else throw new Exception("Parameter')
self.write(" '%s' not found in strategy '%s'"%(arg.name,self.model.name))
self.write('");')
self.close(node)
self.close(node)
self.newline(extra=1)
def visit_function_definition(self, node):
self.newline(node)
self.funcname = node.name
if (not node.name.startswith("model_") and not node.name.startswith("init_")) :
self.write("public static ")
self.visit_decl(node.return_type) if node.return_type else self.write("void")
self.write(" Main(") if node.name=="main" else self.write(" %s("%node.name)
for i, pa in enumerate(node.params):
self.visit_decl(pa.pseudo_type)
self.write(" %s"%pa.name)
if i!= (len(node.params)-1):
self.write(', ')
self.write(')')
self.open(node)
else:
self.write("private void CalculateModel(") if not node.name.startswith("init_") else self.write("public void Init(")
self.write('SiriusQuality%s.DomainClass.%sState s, SiriusQuality%s.DomainClass.%sState s1, SiriusQuality%s.DomainClass.%sRate r, SiriusQuality%s.DomainClass.%sAuxiliary a, SiriusQuality%s.DomainClass.%sExogenous ex)'%(self.name, self.name,self.name,self.name,self.name, self.name,self.name,self.name, self.name,self.name))
self.open(node)
for arg in self.add_features(node) :
if "feat" in dir(arg):
if arg.feat in ["IN","INOUT"] :
self.newline(node)
if self.model and arg.name not in self.modparam:
self.visit_decl(arg.pseudo_type)
self.write(" ")
self.write(arg.name)
if not node.name.startswith("init_"):
if arg.name in self.states and not arg.name.endswith("_t1") :
self.write(" = s.%s"%arg.name)
if arg.name in self.states and arg.name.endswith("_t1") :
self.write(" = s1.%s"%arg.name[:-3])
if arg.name in self.rates:
self.write(" = r.%s"%arg.name)
if arg.name in self.auxiliary:
self.write(" = a.%s"%arg.name)
if arg.name in self.exogenous:
self.write(" = ex.%s"%arg.name)
else:
if arg.pseudo_type[0] =="list":
self.write(" = new List<%s>()"%(self.types[arg.pseudo_type[1]]))
elif arg.pseudo_type[0] =="array":
self.write(" = new %s[%s]"%(self.types[arg.pseudo_type[1]], arg.elts[0].value if "value" in dir(arg.elts[0]) else arg.elts[0].name))
self.write(";")
self.indentation -= 1
self.body(node.block)
self.newline(node)
self.visit_return(node)
self.close(node)
self.newline(extra=1)
def open(self, node):
self.newline(node)
self.write("{")
self.newline(node)
self.indentation += 1
def close(self, node):
self.newline(node)
self.indentation -= 1
self.write("}")
def visit_module(self, node):
self.write("using SiriusQuality%s.DomainClass;"%self.name)
self.newline(node)
self.write("namespace SiriusQuality%s.Strategies"%self.name)
self.open(node)
self.write("public class %s : IStrategySiriusQuality%s"%(self.model.name, self.name))
self.open(node)
self.write("public %s()"%self.model.name)
self.open(node)
self.inOutputDesc(node)
self.close(node)
self.newline(extra=1)
self.description(node)
self.url(node)
self.domain(node)
self.modelType(node)
self.isContext(node)
self.isTimeStep(node)
self.publisherdata(node)
self.SetPublisherData(node)
self.write("private ModellingOptionsManager _modellingOptionsManager;")
self.newline(node)
self.write("public ModellingOptionsManager ModellingOptionsManager")
self.open(node)
self.write("get { return _modellingOptionsManager; } ")
self.close(node)
self.newline(extra=1)
self.getStrategyDomainClassesTypes(node) ###
self.get_set_param(node) ####
self.setParametersDefaultValue(node) ###
self.SetStaticParametersVarInfoDefinitions(node) ####
self.staticVarInfo(node) ###
self.TestPostConditions(node) ###
self.TestPreConditions(node) ###
self.estimate(node) ###
self.visit(node.body)
self.close(node) #class
self.close(node) #namespace
class SiriusTrans(CsharpTrans):
""" This class used to generates states, rates, auxiliary and exogenous classes
for Sirius.
"""
def __init__(self, models):
self.models = models
CsharpTrans.__init__(self, self.models)
def using(self):
self.write("""
using System;
using System.Collections.Generic;
using CRA.ModelLayer.Core;
using System.Reflection;
using CRA.ModelLayer.ParametersManagement;
""")
def open(self, node):
self.newline(node)
self.write("{")
self.newline(node)
self.indentation += 1
def close(self, node):
self.newline(node)
self.indentation -= 1
self.write("}")
def constr(self, node, typ):
self.write('private ParametersIO _parametersIO;')
self.newline(extra=1)
self.write('public %s()'%typ)
self.open(node)
self.write('_parametersIO = new ParametersIO(this);')
self.close(node)
def copyConstr(self, nodes, typ):
self.write('public %s(%s toCopy, bool copyAll) // copy constructor '%(typ, typ))
self.open(nodes)
self.write('if (copyAll)')
self.open(nodes)
self.copyconstructor(nodes)
self.close(nodes)
self.close(nodes)
self.newline(extra = 1)
def description(self, node, typ):
self.write("public string Description")
self.open(node)
self.write('get { return "%s of the component";}'%typ)
self.close(node)
self.newline(extra=1)
def url(self, node):
self.write("public string URL")
self.open(node)
self.write('get { return "http://" ;}')
self.close(node)
self.newline(extra=1)
def propertiesDescription(self, node):
self.write('public virtual IDictionary<string, PropertyInfo> PropertiesDescription')
self.open(node)
self.write('get { return _parametersIO.GetCachedProperties(typeof(IDomainClass));}')
self.close(node)
self.newline(extra=1)
def clone(self, node):
self.write("public virtual Object Clone()")
self.open(node)
self.write('IDomainClass myclass = (IDomainClass) this.MemberwiseClone();')
self.newline(node)
self.write('_parametersIO.PopulateClonedCopy(myclass);')
self.newline(node)
self.write('return myclass;')
self.close(node)
def clearValue(self, node):
self.write("public virtual Boolean ClearValues()")
self.open(node)
for arg in node:
self.newline(node)
self.write(" _")
self.write(arg.name)
if arg.pseudo_type[0] =="list":
self.write(" = new List<%s>()"%(self.types[arg.pseudo_type[1]]))
elif arg.pseudo_type=="DateTime":
self.write(" = new DateTime()")
elif arg.pseudo_type[0] =="array":
self.write(" = new %s[%s]"%(self.types[arg.pseudo_type[1]], arg.elts[0].value if "value" in dir(arg.elts[0]) else arg.elts[0].name))
elif arg.pseudo_type == "str":
self.write(" = null")
else: self.write(" = default(%s)"%(self.types[arg.pseudo_type]))
self.write(";")
self.newline(node);
self.write("return true;")
self.close(node)
self.newline(extra=1)
def generate(self, nodes, typ, name):
self.using()
self.write("namespace SiriusQuality%s.DomainClass"%name)
self.open(nodes)
self.write("public class %s : ICloneable, IDomainClass"%typ)
self.newline()
self.write("{")
self.indentation += 1
self.newline()
self.private(nodes)
self.newline()
self.constr(nodes, typ) ########### constructor
self.newline(extra = 1)
self.copyConstr(nodes, typ)###### copy constructor
self.getset(nodes)
self.newline(extra=1)
self.description(nodes, typ)
self.url(nodes)
self.propertiesDescription(nodes)
self.clearValue(nodes)
self.clone(nodes)
self.indentation -= 1
self.newline()
self.write('}')
self.close(nodes)
def staticVarInfoDef(self, node):
for n in node:
self.write('static VarInfo _%s = new VarInfo();'%n.name)
self.newline(1)
self.newline(extra = 1)
def varInfoConstrctor(self, node, typ):
self.write("static %sVarInfo()"%typ)
self.open(node)
self.write("%sVarInfo.DescribeVariables();"%typ)
self.close(node)
self.newline(extra = 1)
def infoDescription(self, node, typ):
self.write('public virtual string Description')
self.open(node)
self.write('get { return "%s Domain class of the component";}'%typ)
self.close(node)
self.newline(extra = 1)
def domainClassOfReference(self, node, typ):
self.write('public string DomainClassOfReference')
self.open(node)
self.write('get { return "%s";}'%typ)
self.close(node)
self.newline(extra = 1)
def getVarInfo(self, node):
for n in node:
self.write('public static VarInfo %s'%n.name)
self.open(node)
self.write('get { return _%s;}'%n.name)
self.close(node)
self.newline(extra = 1)
self.newline(extra = 1)
def describeVariables(self, node):
self.write('static void DescribeVariables()')
self.open(node)
for pa in node :
self.write('_%s.Name = "%s";'%(pa.name, pa.name)); self.newline(node)
self.write('_%s.Description = "%s";'%(pa.name, pa.description)); self.newline(node)
self.write('_%s.MaxValue = %s;'%(pa.name,pa.max if ("max" in dir(pa) and pa.max is not None) or pa.max=="" else getdefault(pa))); self.newline(node)
self.write('_%s.MinValue = %s;'%(pa.name, pa.min if ("min" in dir(pa) and pa.min is not None) or pa.min=="" else getdefault(pa))); self.newline(node)
self.write('_%s.DefaultValue = %s;'%(pa.name, getdefault(pa))); self.newline(node)
self.write('_%s.Units = "%s";'%(pa.name, pa.unit if ("unit" in dir(pa) and pa.unit is not None) else "dimensionless")); self.newline(node)
self.write('_%s.ValueType = VarInfoValueTypes.GetInstanceForName("%s");'%(pa.name, param_datatype[pa.datatype])); self.newline(node)
self.newline(extra=1)
self.close(node)
self.newline(extra = 1)
def generateVarInfo(self, nodes, typ, name):
self.using()
self.write("namespace SiriusQuality%s.DomainClass"%name)
self.open(nodes)
self.write('public class %sVarInfo : IVarInfoClass'%typ)
self.open(nodes)
self.staticVarInfoDef(nodes)
self.varInfoConstrctor(nodes, typ)
self.infoDescription(nodes, typ)
self.url(nodes)
self.domainClassOfReference(nodes, typ)
self.getVarInfo(nodes)
self.describeVariables(nodes)
self.close(nodes)
self.close(nodes)
def to_struct_sirius(models, rep, name):
generator = SiriusTrans(models)
generator.model2Node()
def createdc(states, catvar):
generator.result = []
generator.generate(states, "%s%s"%(name,catvar), name)
z= ''.join(generator.result)
filename = Path(os.path.join(rep, "%s%s.cs"%(name, catvar)))
with open(filename, "wb") as tg_file:
tg_file.write(z.encode('utf-8'))
states = generator.node_states
createdc(states,"State")
rates = generator.node_rates
createdc(rates,"Rate")
auxiliary = generator.node_auxiliary
createdc(auxiliary,"Auxiliary")
exogenous = generator.node_exogenous
createdc(exogenous,"Exogenous")
def varinfo(states, catvar):
generator.result = []
generator.generateVarInfo(states, "%s%s"%(name,catvar), name)
z= ''.join(generator.result)
filename = Path(os.path.join(rep, "%s%sVarInfo.cs"%(name, catvar)))
with open(filename, "wb") as tg_file:
tg_file.write(z.encode('utf-8'))
states = generator.states
varinfo(states,"State")
rates = generator.rates
varinfo(rates,"Rate")
auxiliary = generator.auxiliary
varinfo(auxiliary,"Auxiliary")
exogenous = generator.exogenous
varinfo(exogenous,"Exogenous")
''' Csharp composite'''
class SiriusCompo(CsharpCompo):
""" This class used to generates states, rates, auxiliary and exogenous classes
for C# languages.
"""
def __init__(self, tree=None, model=None, name=None):
self.model=model
self.tree = tree
self.name = name
CsharpCompo.__init__(self, tree, model, name)
def usingBioma(self):
self.write("""
using System;
using System.Collections.Generic;
using System.Linq;
using System.Xml;
using CRA.ModelLayer.MetadataTypes;
using CRA.ModelLayer.Core;
using CRA.ModelLayer.Strategy;
using System.Reflection;
using VarInfo=CRA.ModelLayer.Core.VarInfo;
using Preconditions=CRA.ModelLayer.Core.Preconditions;
using CRA.AgroManagement;
""")
def desc(self,node, n, inp, vartype) :
self.write("PropertyDescription pd%s = new PropertyDescription();"%n)
self.newline(node)
self.write("pd%s.DomainClassType = typeof(SiriusQuality%s.DomainClass.%s%s);"%(n, self.name,self.name,inp.variablecategory.capitalize()))
self.newline(node)
self.write('pd%s.PropertyName = "%s";'%(n,inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('pd%s.PropertyType = (SiriusQuality%s.DomainClass.%s%sVarInfo.%s).ValueType.TypeForCurrentValue;'%(n, self.name,self.name, inp.variablecategory.capitalize(),inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('pd%s.PropertyVarInfo =(SiriusQuality%s.DomainClass.%s%sVarInfo.%s);'%(n,self.name,self.name, inp.variablecategory.capitalize(),inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('%s.Add(pd%s);'%(vartype, n))
def constructor(self, node):
self.write("public %sComponent()"%self.model.name)
self.open(node)
self.write("ModellingOptions mo0_0 = new ModellingOptions();")
self.newline(node)
self.write("//Parameters")
self.newline(node)
self.write("List<VarInfo> _parameters0_0 = new List<VarInfo>();")
n = 1
for p in self.node_param:
for j in self.get_mo(p.name):
self.newline(node)
self.write('VarInfo v%s = new CompositeStrategyVarInfo(_%s, "%s");'%(n,j,p.name))
self.newline(node)
self.write("_parameters0_0.Add(v%s);"%n)
n = n+1
self.newline(node)
self.write("List<PropertyDescription> _inputs0_0 = new List<PropertyDescription>();")
self.newline(node)
n=1
for inp in self.model.inputs:
if "variablecategory" in dir(inp) :
self.desc(node,n,inp, "_inputs0_0")
self.write("")
self.newline(node)
n = n+1
self.newline(node)
self.write('mo0_0.Inputs=_inputs0_0;')
self.newline(node)
self.write("List<PropertyDescription> _outputs0_0 = new List<PropertyDescription>();")
self.newline(node)
for out in self.model.outputs:
if "variablecategory" in dir(out):
self.desc(node,n,out, "_outputs0_0")
self.write("")
self.newline(node)
n = n+1
self.newline(node)
self.write('mo0_0.Outputs=_outputs0_0;')
self.newline(node)
self.write("List<string> lAssStrat0_0 = new List<string>();")
self.newline(node)
for m in self.model.model:
name = m.name
self.write("lAssStrat0_0.Add(typeof(SiriusQuality%s.Strategies.%s).FullName);"%(self.model.name, name))
self.newline(1)
self.write("mo0_0.AssociatedStrategies = lAssStrat0_0;")
self.newline(1)
self.write("_modellingOptionsManager = new ModellingOptionsManager(mo0_0);")
self.newline(1)
self.write("SetStaticParametersVarInfoDefinitions();")
self.newline(1)
self.write("SetPublisherData();")
self.close(node)
self.newline(extra=1)
def description(self, node):
self.write("public string Description")
self.open(node)
self.write('get { return "%s" ;}'%self.model.description.Abstract.replace("\n", ""))
self.close(node)
self.newline(extra=1)
def url(self, node):
self.write("public string URL")
self.open(node)
self.write('get { return "%s" ;}'%(self.model.description.url if "url" in dir(self.model.description) else ""))
self.close(node)
self.newline(extra=1)
def domain(self, node):
self.write("public string Domain")
self.open(node)
self.write('get { return "";}')
self.close(node)
self.newline(extra=1)
def modelType(self, node):
self.write("public string ModelType")
self.open(node)
self.write('get { return "";}')
self.close(node)
self.newline(extra=1)
def isContext(self, node):
self.write("public bool IsContext")
self.open(node)
self.write('get { return false;}')
self.close(node)
self.newline(extra=1)
def isTimeStep(self, node):
self.write("public IList<int> TimeStep")
self.open(node)
self.write("get")
self.open(node)
self.write("IList<int> ts = new List<int>();")
self.newline(node)
self.write("return ts;")
self.close(node)
self.close(node)
self.newline(extra=1)
def publisherdata(self, node):
self.write("private PublisherData _pd;")
self.newline(node)
self.write("public PublisherData PublisherData")
self.open(node)
self.write("get { return _pd;} ")
self.close(node)
self.newline(extra=1)
def modelingOptions(self, node):
self.write("private ModellingOptionsManager _modellingOptionsManager;")
self.newline(node)
self.write("public ModellingOptionsManager ModellingOptionsManager")
self.open(node)
self.write("get { return _modellingOptionsManager; } ")
self.close(node)
self.newline(extra=1)
def SetPublisherData(self, node):
self.write("private void SetPublisherData()")
self.open(node)
self.write("_pd = new CRA.ModelLayer.MetadataTypes.PublisherData();")
self.newline(node)
self.write('_pd.Add("Creator", "%s");'%self.model.description.Authors)
self.newline(node)
self.write('_pd.Add("Date", "");')
self.newline(node)
self.write('_pd.Add("Publisher", "%s");'%self.model.description.Institution)
self.close(node)
self.newline(extra=1)
def getStrategyDomainClassesTypes(self, node):
self.write("public IEnumerable<Type> GetStrategyDomainClassesTypes()")
self.open(node)
self.write("return new List<Type>() { typeof(SiriusQuality%s.DomainClass.%sState), typeof(SiriusQuality%s.DomainClass.%sState), typeof(SiriusQuality%s.DomainClass.%sRate), typeof(SiriusQuality%s.DomainClass.%sAuxiliary), typeof(SiriusQuality%s.DomainClass.%sExogenous)};"%(self.name, self.name,self.name, self.name,self.name, self.name,self.name, self.name, self.name, self.name))
self.close(node)
self.newline(extra=1)
def SetParametersDefaultValue(self, node):
self.write("public void SetParametersDefaultValue()")
self.open(node)
self.write("_modellingOptionsManager.SetParametersDefaultValue();")
for m in self.model.model:
self.newline(node)
self.write("_%s.SetParametersDefaultValue();"%m.name)
self.close(node)
self.newline(extra=1)
def varinfodef(self, node, pa):
self.write('%sVarInfo.Name = "%s";'%(pa.name, pa.name)); self.newline(node)
self.write('%sVarInfo.Description = "%s";'%(pa.name, pa.description)); self.newline(node)
self.write('%sVarInfo.MaxValue = %s;'%(pa.name,pa.max if pa.max is not None else getdefault(pa))); self.newline(node)
self.write('%sVarInfo.MinValue = %s;'%(pa.name, pa.min if pa.min is not None else getdefault(pa))); self.newline(node)
self.write('%sVarInfo.DefaultValue = %s;'%(pa.name, getdefault(pa))); self.newline(node)
self.write('%sVarInfo.Units = "%s";'%(pa.name, pa.unit if ("unit" in dir(pa) and pa.unit is not None) else "dimensionless")); self.newline(node)
self.write('%sVarInfo.ValueType = VarInfoValueTypes.GetInstanceForName("%s");'%(pa.name, param_datatype[pa.datatype])); self.newline(node)
def SetStaticParametersVarInfoDefinitions(self, node):
self.write("private static void SetStaticParametersVarInfoDefinitions()")
self.open(node)
for pa in self.params:
self.newline(extra=1)
self.varinfodef(node, pa)
self.close(node)
self.newline(extra=1)
def staticVarInfo(self, node):
for pa in self.node_param:
self.write("public static VarInfo %sVarInfo"%pa.name)
self.open(node)
self.write("get { return SiriusQuality%s.Strategies.%s.%sVarInfo;} "%(self.model.name,self.get_mo(pa.name)[0],pa.name))
self.close(node)
self.newline(extra=1)
def TestPostConditions(self, node):
self.write("public string TestPostConditions(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex,string callID)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.open(node)
self.write("try")
self.open(node)
self.write("//Set current values of the outputs to the static VarInfo representing the output properties of the domain classes")
for out in self.model.outputs:
self.newline(node)
self.write('SiriusQuality%s.DomainClass.%s%sVarInfo.%s.CurrentValue=%s.%s;'%(self.name, self.name, out.variablecategory.capitalize(),out.name,category[out.variablecategory], out.name))
self.newline(extra=1)
self.write('ConditionsCollection prc = new ConditionsCollection();'); self.newline(node)
self.write('Preconditions pre = new Preconditions(); ' ); self.newline(node)
self.newline(extra=1)
n = len(self.model.inputs) + 1
for out in self.model.outputs:
self.newline(node)
self.write("RangeBasedCondition r%s = new RangeBasedCondition(SiriusQuality%s.DomainClass.%s%sVarInfo.%s);"%(n,self.name, self.name, out.variablecategory.capitalize(), out.name)); self.newline(node)
self.write("if(r%s.ApplicableVarInfoValueTypes.Contains( SiriusQuality%s.DomainClass.%s%sVarInfo.%s.ValueType)){prc.AddCondition(r%s);}"%(n, self.name, self.name, out.variablecategory.capitalize(), out.name, n)); self.newline(node)
n = n+1
self.newline(extra=1)
self.write('string ret = "";')
self.newline(node)
for m in self.model.model:
self.write('ret += _%s.TestPostConditions(s, s1, r, a, ex, " strategy SiriusQuality%s.Strategies.%s");'%(m.name, self.model.name, self.name ))
self.newline(node)
self.write('if (ret != "") { pre.TestsOut(ret, true, " postconditions tests of associated classes"); }')
self.newline(extra=1)
self.write('string postConditionsResult = pre.VerifyPostconditions(prc, callID); if (!string.IsNullOrEmpty(postConditionsResult)) { pre.TestsOut(postConditionsResult, true, "PostConditions errors in strategy " + this.GetType().Name); } return postConditionsResult;')
self.close(node)
self.newline(node)
self.write("catch (Exception exception)");
self.open(node)
self.write('string msg = "Component SiriusQuality.%s, " + this.GetType().Name + ": Unhandled exception running post-condition test. ";'%self.name); self.newline(node)
self.write('throw new Exception(msg, exception);'); self.newline(node)
self.close(node)
self.close(node)
self.newline(extra=1)
def TestPreConditions(self, node):
self.write("public string TestPreConditions(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex,string callID)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.open(node)
self.write("try")
self.open(node)
self.write("//Set current values of the inputs to the static VarInfo representing the inputs properties of the domain classes")
for inp in self.model.inputs:
if "variablecategory" in dir(inp):
self.newline(node)
self.write('SiriusQuality%s.DomainClass.%s%sVarInfo.%s.CurrentValue=%s.%s;'%(self.name, self.name, inp.variablecategory.capitalize(),inp.name if not inp.name.endswith("_t1") else inp.name[:-3],category[inp.variablecategory], inp.name if not inp.name.endswith("_t1") else inp.name[:-3]))
self.newline(node)
self.write('ConditionsCollection prc = new ConditionsCollection();'); self.newline(node)
self.write('Preconditions pre = new Preconditions(); ' ); self.newline(node)
n = 1
for inp in self.model.inputs:
if "variablecategory" in dir(inp):
self.newline(node)
self.write("RangeBasedCondition r%s = new RangeBasedCondition(SiriusQuality%s.DomainClass.%s%sVarInfo.%s);"%(n,self.name, self.name, inp.variablecategory.capitalize(), inp.name if not inp.name.endswith("_t1") else inp.name[:-3])); self.newline(node)
self.write("if(r%s.ApplicableVarInfoValueTypes.Contains( SiriusQuality%s.DomainClass.%s%sVarInfo.%s.ValueType)){prc.AddCondition(r%s);}"%(n, self.name, self.name, inp.variablecategory.capitalize(), inp.name if not inp.name.endswith("_t1") else inp.name[:-3], n)); self.newline(node)
n = n+1
self.newline(extra=1)
for p in self.params:
self.write('prc.AddCondition(new RangeBasedCondition(_modellingOptionsManager.GetParameterByName("%s")));'%p.name)
self.newline(node)
self.write('string ret = "";')
self.newline(node)
for m in self.model.model:
self.write('ret += _%s.TestPreConditions(s, s1, r, a, ex, " strategy SiriusQuality%s.Strategies.%s");'%(m.name, self.model.name, self.name ))
self.newline(node)
self.write('if (ret != "") { pre.TestsOut(ret, true, " preconditions tests of associated classes"); }')
self.newline(extra=1)
self.write('string preConditionsResult = pre.VerifyPreconditions(prc, callID); if (!string.IsNullOrEmpty(preConditionsResult)) { pre.TestsOut(preConditionsResult, true, "PreConditions errors in component " + this.GetType().Name); } return preConditionsResult;')
self.close(node)
self.newline(node)
self.write("catch (Exception exception)");
self.open(node)
self.write('string msg = "Component SiriusQuality.%s, " + this.GetType().Name + ": Unhandled exception running pre-condition test. ";'%self.name); self.newline(node)
self.write('throw new Exception(msg, exception);'); self.newline(node)
self.close(node)
self.close(node)
self.newline(extra=1)
def estimate(self, node):
self.write("public void Estimate(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.open(node)
self.write("try")
self.open(node)
self.write("CalculateModel(s, s1, r, a, ex);")
self.close(node)
self.newline(node)
self.write("catch (Exception exception)")
self.open(node)
self.write('string msg = "Error in component SiriusQuality%s, strategy: " + this.GetType().Name + ": Unhandled exception running model. "+exception.GetType().FullName+" - "+exception.Message;'%self.name)
self.newline(node)
self.write('throw new Exception(msg, exception);')
self.close(node)
self.close(node)
self.newline(extra=1)
def calculateModel(self, node):
self.write("private void CalculateModel(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.open(node)
self.write("EstimateOfAssociatedClasses(s, s1, r, a, ex);")
self.close(node)
self.newline(extra=1)
def copy_Constructor(self, node):
self.write('public %sComponent(%sComponent toCopy): this() // copy constructor '%(self.model.name, self.model.name))
self.open(node)
self.copyconstructor(self.node_param)
self.close(node)
def visit_module(self, node):
self.usingBioma()
self.newline(node)
self.write("using SiriusQuality%s.DomainClass;"%self.model.name)
self.newline(node)
self.write("namespace SiriusQuality%s.Strategies"%self.model.name)
self.open(node)
self.write("public class %sComponent : IStrategySiriusQuality%s"%(self.model.name, self.model.name))
self.open(node)
self.constructor(node)
self.description(node)
self.url(node)
self.domain(node)
self.modelType(node)
self.isContext(node)
self.isTimeStep(node)
self.publisherdata(node)
self.SetPublisherData(node)
self.modelingOptions(node)
self.getStrategyDomainClassesTypes(node)
self.getsetParam(node,self.node_param)
self.SetParametersDefaultValue(node)
self.SetStaticParametersVarInfoDefinitions(node)
self.staticVarInfo(node)
self.TestPostConditions(node)
self.TestPreConditions(node)
self.estimate(node)
self.calculateModel(node)
self.createModelInstances()
self.newline(extra=1)
self.visit(node.body)
self.newline(extra=1)
self.copy_Constructor(self.node_param)
self.newline(node)
self.close(node)
self.close(node)
def visit_function_definition(self, node):
if node.name.startswith("model"):
self.write("private void EstimateOfAssociatedClasses(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
else:
self.write("public void Init(SiriusQuality%s.DomainClass.%sState s,SiriusQuality%s.DomainClass.%sState s1,SiriusQuality%s.DomainClass.%sRate r,SiriusQuality%s.DomainClass.%sAuxiliary a,SiriusQuality%s.DomainClass.%sExogenous ex)"%(self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name,self.name))
self.init=True
self.open(node)
self.visit(node.block)
self.close(node)
self.newline(extra=1)
def visit_implicit_return(self, node):
self.newline(node)
def visit_assignment(self, node):
if "function" in dir(node.value) and node.value.function.split('_')[0]=="model":
name = node.value.function.split('model_')[1]
self.write("_%s.Estimate(s,s1, r, a, ex);"%(name))
self.newline(node)
else:
self.newline(node)
self.visit(node.target)
self.write(' = ')
self.visit(node.value)
self.write(";")
self.newline(node)
def interfaceStrategy(self, node):
self.write('using System;'); self.newline(1)
self.write('using CRA.AgroManagement;'); self.newline(1)
self.write('using CRA.ModelLayer.Strategy;'); self.newline(1)
self.write('namespace SiriusQuality%s.DomainClass'%(self.model.name))
self.open(node)
self.write('public interface IStrategySiriusQuality%s : IStrategy'%self.model.name)
self.open(node)
self.write('void Estimate( %sState s, %sState s1, %sRate r, %sAuxiliary a, %sExogenous ex);'%(self.model.name, self.model.name, self.model.name, self.model.name, self.model.name))
self.newline(extra=1)
self.write('string TestPreConditions( %sState s, %sState s1, %sRate r, %sAuxiliary a, %sExogenous ex, string callID);'%(self.model.name, self.model.name, self.model.name, self.model.name, self.model.name))
self.newline(extra=1)
self.write('string TestPostConditions( %sState s, %sState s1, %sRate r, %sAuxiliary a, %sExogenous ex, string callID);'%(self.model.name, self.model.name, self.model.name, self.model.name, self.model.name))
self.newline(extra=1)
self.write('void SetParametersDefaultValue();')
self.close(node)
self.close(node)
def wrapper(self):
self.write("using SQCrop2ML_%s.DomainClass;"%self.model.name)
self.newline(1)
self.write("using SQCrop2ML_%s.Strategies;"%self.model.name)
self.newline(extra=1)
self.write("namespace SiriusModel.Model.%s"%self.model.name)
self.newline(1)
self.write("{")
self.newline(1)
self.indentation += 1
self.write("class %sWrapper : UniverseLink"%self.model.name)
self.newline(1)
self.write("{")
self.newline(1)
self.indentation += 1
self.privateWrap()
self.constrWrap()
self.newline(extra=1)
self.outputWrap()
self.newline(extra=1)
self.copyconstrWrap()
self.newline(extra=1)
self.initWrap()
self.newline(extra=1)
self.loadParamWrap()
self.newline(extra=1)
self.estimateWrap()
self.newline(extra=1)
self.indentation -= 1
self.write("}")
self.newline(extra=1)
self.indentation -= 1
self.write("}")
def constrWrap(self):
name = self.model.name
self.write("public %sWrapper(Universe universe) : base(universe)"%(name))
self.newline(1)
self.write("{")
self.newline(1)
self.indentation += 1
self.write("s = new %sState();"%(name))
self.newline(1)
self.write("r = new %sRate();"%(name))
self.newline(1)
self.write("a = new %sAuxiliary();"%(name))
self.newline(1)
self.write("ex = new %sExogenous();"%(name))
self.newline(1)
self.write("%sComponent = new %s();"%(name.lower(), name))
self.newline(1)
self.write("loadParameters();")
self.newline(1)
self.indentation -= 1
self.write("}")
def copyconstrWrap(self):
self.write("public %sWrapper(Universe universe, %sWrapper toCopy, bool copyAll) : base(universe)"%(self.model.name,self.model.name))
self.newline(1)
self.write("{")
self.newline(1)
self.indentation += 1
self.write("s = (toCopy.s != null) ? new %sState(toCopy.s, copyAll) : null;"%(self.model.name))
self.newline(1)
self.newline(1)
self.write("r = (toCopy.r != null) ? new %sRate(toCopy.r, copyAll) : null;"%(self.model.name))
self.newline(1)
self.write("a = (toCopy.a != null) ? new %sAuxiliary(toCopy.a, copyAll) : null;"%(self.model.name))
self.newline(1)
self.write("ex = (toCopy.ex != null) ? new %sExogenous(toCopy.ex, copyAll) : null;"%(self.model.name))
self.newline(1)
self.write("if (copyAll)")
self.newline(1)
self.write("{")
self.newline(1)
self.indentation += 1
self.write("%sComponent = (toCopy.%sComponent != null) ? new %s(toCopy.%sComponent) : null;"%(self.model.name.lower(),self.model.name.lower(),self.model.name,self.model.name.lower()))
self.newline(1)
self.indentation -= 1
self.write("}")
self.newline(1)
self.indentation -= 1
self.write("}")
def to_wrapper_sirius(models, rep, name):
generator = SiriusCompo(model = models)
generator.result=[u"using System;\nusing System.Collections.Generic;\nusing System.Linq;\n"]
generator.model2Node()
generator.wrapper()
z= ''.join(generator.result)
filename = Path(os.path.join(rep, "%sWrapper.cs"%name))
with open(filename, "wb") as tg2_file:
tg2_file.write(z.encode('utf-8'))
filename = Path(os.path.join(rep, "IStrategySiriusQuality%s.cs"%name))
generator2 = SiriusCompo(model = models)
generator2.interfaceStrategy(1)
z= ''.join(generator2.result)
with open(filename, "wb") as tg2_file:
tg2_file.write(z.encode('utf-8'))
return 0 | 46.686555 | 389 | 0.620397 |
acee8e63dd220ed2a94d335ceb725c6c80ae50a8 | 2,579 | py | Python | scripts/delete instances.py | MuhammadIsmailShahzad/ckan-cloud-operator | 35a4ca88c4908d81d1040a21fca8904e77c4cded | [
"MIT"
] | 14 | 2019-11-18T12:01:03.000Z | 2021-09-15T15:29:50.000Z | scripts/delete instances.py | MuhammadIsmailShahzad/ckan-cloud-operator | 35a4ca88c4908d81d1040a21fca8904e77c4cded | [
"MIT"
] | 52 | 2019-09-09T14:22:41.000Z | 2021-09-29T08:29:24.000Z | scripts/delete instances.py | MuhammadIsmailShahzad/ckan-cloud-operator | 35a4ca88c4908d81d1040a21fca8904e77c4cded | [
"MIT"
] | 8 | 2019-10-05T12:46:25.000Z | 2021-09-15T15:13:05.000Z | import os
import tempfile
from ruamel import yaml
from ckan_cloud_operator import logs
from ckan_cloud_operator.providers.ckan.instance import manager as ckan_instance_manager
from dataflows import Flow, dump_to_path, printer
from ckan_cloud_operator.helpers import scripts as scripts_helpers
# id or name of instances to delete
INSTANCE_IDS_OR_NAMES = os.environ.get('INSTANCE_IDS_OR_NAMES', '')
# keep empty to do a dry run and get the code from the output
APPROVE_CODE = os.environ.get('APPROVE_CODE', '')
def delete_instances(instance_ids_or_names, approve_code):
# scripts_helpers.check_file_based_approval_code(approve_code)
dry_run_generator = ckan_instance_manager.delete_instances(instance_ids_or_names=instance_ids_or_names, dry_run=True)
instance_ids = set()
instance_names = set()
while True:
try:
instance = next(dry_run_generator)
if instance.get('id'):
instance_ids.add(instance['id'])
if instance.get('name'):
instance_ids.add(instance['name'])
logs.info("dry run", **instance)
except StopIteration:
break
res = next(dry_run_generator)
logs.info("dry run", **res)
yield {"dry-run": True, **instance, **res}
if approve_code:
assert scripts_helpers.check_file_based_approval_code(approve_code, {'instance-ids': instance_ids}), \
'invalid approval code'
generator = ckan_instance_manager.delete_instances(instance_ids_or_names=instance_ids)
while True:
try:
instance = next(generator)
logs.info('Deleting instance', **instance)
except StopIteration:
break
res = next(generator)
logs.info(**res)
yield {"dry-run": False, **instance, **res}
else:
approve_code = scripts_helpers.create_file_based_approval_code({'instance-ids': instance_ids})
logs.important_log(logs.INFO, f'APPROVE_CODE={approve_code}', instance_ids=instance_ids)
def main(instance_ids_or_names, approve_code):
instance_ids_or_names = [i.strip() for i in instance_ids_or_names.split(',') if i.strip()]
approve_code = approve_code.strip()
logs.info(instance_ids_or_names=instance_ids_or_names, approve_code=approve_code)
Flow(
delete_instances(instance_ids_or_names, approve_code),
dump_to_path('data/delete_instances'),
printer(num_rows=9999)
).process()
if __name__ == '__main__':
main(INSTANCE_IDS_OR_NAMES, APPROVE_CODE)
| 39.075758 | 121 | 0.690965 |
acee8fc3a63cffec6e1563889d744460aa5fd179 | 11,108 | py | Python | procgame/desktop/desktop_pysdl2.py | horseyhorsey/SkeletonGameVisualPinball10 | 8ae19ce99d143c61f0bcb9e00259137e96a39b4b | [
"MIT"
] | 1 | 2019-02-12T15:43:49.000Z | 2019-02-12T15:43:49.000Z | procgame/desktop/desktop_pysdl2.py | horseyhorsey/SkeletonGameVisualPinball10 | 8ae19ce99d143c61f0bcb9e00259137e96a39b4b | [
"MIT"
] | null | null | null | procgame/desktop/desktop_pysdl2.py | horseyhorsey/SkeletonGameVisualPinball10 | 8ae19ce99d143c61f0bcb9e00259137e96a39b4b | [
"MIT"
] | null | null | null | import sys
import procgame
import pinproc
from threading import Thread
import random
import string
import time
import locale
import math
import copy
import ctypes
from .. import config
import os
from time import sleep
try:
import serial
except Exception, e:
print "pySerial not found; RGBDMD support will be unavailable"
from procgame.events import EventManager
try:
from ..dmd import sdl2_displaymanager
from ..dmd.sdl2_displaymanager import *
import sdl2.ext
import pygame
except ImportError:
print "PySDL2 is required, but not found."
raise
class Desktop():
"""The :class:`Desktop` class helps manage interaction with the desktop, providing both a windowed
representation of the DMD, as well as translating keyboard input into pyprocgame events."""
exit_event_type = 99
"""Event type sent when Ctrl-C is received."""
key_map = {}
dots_w = 128
dots_h = 32
screen_scale = 2 # this is the factor to pygame scale the display. 192x96 (x6) = 1152x576
def __init__(self):
print 'Desktop init begun.'
self.ctrl = 0
self.i = 0
self.key_events = []
self.dots_w = config.value_for_key_path(keypath='dmd_dots_w', default=128)
self.dots_h = config.value_for_key_path(keypath='dmd_dots_h', default=32)
self.screen_position_x = config.value_for_key_path(keypath = 'screen_position_x', default=0)
self.screen_position_y = config.value_for_key_path(keypath='screen_position_y', default=0)
self.screen_scale = config.value_for_key_path(keypath='desktop_dmd_scale', default=2)
self.dot_filter = config.value_for_key_path(keypath='dmd_dot_filter', default=True)
self.fullscreen = config.value_for_key_path(keypath='dmd_fullscreen', default=False)
self.window_border = config.value_for_key_path(keypath='dmd_window_border', default=True)
self.dmd_screen_size = ((self.dots_w)*self.screen_scale, (self.dots_h)*self.screen_scale)
self.dmd_soften = config.value_for_key_path(keypath='dmd_soften', default="0")
self.use_rgb_dmd_device = config.value_for_key_path(keypath='rgb_dmd.enabled', default=False)
self.dmd_flip = config.value_for_key_path(keypath='dmd_flip', default=0)
if(self.use_rgb_dmd_device):
# turn off dots and scaling, since they are incompatible (at this time) --SDL2 bug.
self.screen_scale = 1
self.dot_filter = False
self.serial_port_number = config.value_for_key_path(keypath='rgb_dmd.com_port', default=None)
if(self.serial_port_number is None):
raise ValueError, "RGBDMD: config.yaml specified rgb_dmd enabled, but no com_port value (e.g., com3) given!"
self.setup_window()
if(self.use_rgb_dmd_device):
if(serial is None):
raise ValueError, "RGBDMD: config.yaml specified rgb_dmd enabled, but requird pySerial library not installed/found."
self.serialPort = serial.Serial(port=self.serial_port_number, baudrate=2500000)
self.magic_cookie = bytearray([0xBA,0x11,0x00,0x03, 0x04, 0x00, 0x00,0x00])
self.serialPort.write(self.magic_cookie);
self.draw = self.draw_to_rgb_dmd
return
if(self.dot_filter==True):
dmd_grid_path = config.value_for_key_path(keypath='dmd_grid_path', default='./')
# self.dot_filter = False
# self.draw = self.draw_no_dot_effect
############## Make the Dot filter ############################
dot_sprite = sdl2_DisplayManager.inst().load_texture(os.path.join(dmd_grid_path,'dmdgrid32x32.png'))
# 1. Make the destination texture (huge)
self.dot_tex = sdl2.render.SDL_CreateTexture(sdl2_DisplayManager.inst().texture_renderer.renderer, sdl2.pixels.SDL_PIXELFORMAT_RGBA8888,
sdl2.render.SDL_TEXTUREACCESS_TARGET,
self.dots_w*10,self.dots_h*10)
sdl2.SDL_SetTextureBlendMode(self.dot_tex,sdl2.SDL_BLENDMODE_BLEND)
# 2. backup the old renderer destination
bk = sdl2.SDL_GetRenderTarget(sdl2_DisplayManager.inst().texture_renderer.renderer)
sdl2.SDL_SetRenderTarget(sdl2_DisplayManager.inst().texture_renderer.renderer, self.dot_tex)
# the following is needed on OSX, but breaks nothing being present for both
sdl2_DisplayManager.inst().texture_renderer.clear((0,0,0))
# 3. start the stamping process
acr = int(math.ceil(self.dots_w/float(32)))
down = int(math.ceil(self.dots_h/float(32)))
for step_w in range(0,acr):
for step_h in range(0,down):
sdl2_DisplayManager.inst().texture_renderer.copy(dot_sprite, dstrect= (step_w*320,step_h*320,320,320))
del dot_sprite
# 4. restore the target for the renderer
sdl2.SDL_SetRenderTarget(sdl2_DisplayManager.inst().texture_renderer.renderer, bk) # revert back
else:
self.draw = self.draw_no_dot_effect
def add_key_map(self, key, switch_number):
"""Maps the given *key* to *switch_number*, where *key* is one of the key constants in :mod:`pygame.locals`."""
self.key_map[key] = switch_number
def clear_key_map(self):
"""Empties the key map."""
self.key_map = {}
def get_keyboard_events(self):
"""Asks :mod:`pySDL2` for recent keyboard events and translates them into an array
of events similar to what would be returned by :meth:`pinproc.PinPROC.get_events`."""
# print event.key.keysym.sym <-- number
# print sdl2.SDL_GetKeyName(event.key.keysym.sym) <-- name
for event in sdl2.ext.get_events():
EventManager.default().post(name=self.event_name_for_pygame_event_type(event.type), object=self, info=event)
key_event = {}
#print("Key: %s" % event.key.keysym.sym)
if event.type == sdl2.SDL_KEYDOWN:
if event.key.keysym.sym == sdl2.SDLK_LCTRL or event.key.keysym.sym == sdl2.SDLK_RCTRL:
self.ctrl = 1
if event.key.keysym.sym == sdl2.SDLK_c:
if self.ctrl == 1:
key_event['type'] = self.exit_event_type
key_event['value'] = 'quit'
if event.key.keysym.sym == sdl2.SDLK_ESCAPE:
key_event['type'] = self.exit_event_type
key_event['value'] = 'quit'
elif event.key.keysym.sym in self.key_map:
key_event['type'] = pinproc.EventTypeSwitchClosedDebounced
key_event['value'] = self.key_map[event.key.keysym.sym]
elif event.type == sdl2.SDL_KEYUP:
if event.key.keysym.sym == sdl2.SDLK_LCTRL or event.key.keysym.sym == sdl2.SDLK_RCTRL:
self.ctrl = 0
elif event.key.keysym.sym in self.key_map:
key_event['type'] = pinproc.EventTypeSwitchOpenDebounced
key_event['value'] = self.key_map[event.key.keysym.sym]
if len(key_event):
self.key_events.append(key_event)
e = self.key_events
self.key_events = []
return e
event_listeners = {}
def event_name_for_pygame_event_type(self, event_type):
return 'pygame(%s)' % (event_type)
screen = None
""":class:`pygame.Surface` object representing the screen's surface."""
# you'll need to change your displayController to width=192, height=96 and the same for all layers created
def setup_window(self):
#os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (self.screen_position_x,self.screen_position_y)
self.window_w = self.dots_w * self.screen_scale
self.window_h = self.dots_h * self.screen_scale
flags = 0
if(self.fullscreen):
flags = flags | sdl2.SDL_WINDOW_FULLSCREEN
if(self.window_border is False):
flags = flags | sdl2.SDL_WINDOW_BORDERLESS
sdl2_DisplayManager.Init(self.dots_w, self.dots_h, self.screen_scale, "PyProcGameHD. [CTRL-C to exit]", self.screen_position_x,self.screen_position_y, flags, self.dmd_soften)
sdl2_DisplayManager.inst().fonts_init(None,"Courier")
def draw(self, frame):
"""Draw the given :class:`~procgame.dmd.Frame` in the window."""
sdl2_DisplayManager.inst().clear((0,0,0,255))
if(not self.fullscreen==True):
sdl2_DisplayManager.inst().screen_blit(source_tx=frame.pySurface, expand_to_fill=True, flip = self.dmd_flip)
else:
sdl2.SDL_RenderCopy(sdl2_DisplayManager.inst().texture_renderer.renderer, frame.pySurface.texture, None, sdl2.rect.SDL_Rect(0,0,self.window_w,self.window_h))
# sdl2_DisplayManager.inst().screen_blit(source_tx=self.dot_tex, expand_to_fill=True)
sdl2.SDL_RenderCopy(sdl2_DisplayManager.inst().texture_renderer.renderer, self.dot_tex, None, sdl2.rect.SDL_Rect(0,0,self.window_w,self.window_h))
# sdl2.SDL_RenderCopy(texture_renderer.renderer, dot_tex, None, sdl2.rect.SDL_Rect(0,0,window_w,window_h))
sdl2_DisplayManager.inst().flip()
def draw_no_dot_effect(self, frame):
"""Draw the given :class:`~procgame.dmd.Frame` in the window."""
sdl2_DisplayManager.inst().clear((0,0,0,255))
if(not self.fullscreen==True):
sdl2_DisplayManager.inst().screen_blit(source_tx=frame.pySurface, expand_to_fill=True)
else:
sdl2.SDL_RenderCopy(sdl2_DisplayManager.inst().texture_renderer.renderer, frame.pySurface.texture, None, sdl2.rect.SDL_Rect(self.screen_position_x,self.screen_position_y,self.screen_position_x+self.window_w,self.screen_position_y+self.window_h))
sdl2_DisplayManager.inst().flip()
def draw_to_rgb_dmd(self, frame):
sdl2_DisplayManager.inst().clear((0,0,0,255))
sdl2_DisplayManager.inst().screen_blit(source_tx=frame.pySurface, expand_to_fill=True)
sdl2_DisplayManager.inst().flip()
bucket = sdl2_DisplayManager.inst().make_bits_from_texture(frame.pySurface, 128, 32)
self.serialPort.write(self.magic_cookie);
s = bytearray([])
i = 0
while(i < 128*32*4):
# print("pixel %i: %s, %s, %s, %s" % (i/4, bucket[i], bucket[i+1], bucket[i+2], bucket[i+3]))
s.append(bucket[i])
s.append(bucket[i+1])
s.append(bucket[i+2])
# s.append("%c%c%c" % (bucket[i], bucket[i+1], bucket[i+2]))
# self.serialPort.write("%c%c%c" % (bucket[i], bucket[i+1], bucket[i+2]))
i+=4
# print (s)
self.serialPort.write(s)
del bucket
# print(type(b))
# del tx
# do python send
def __str__(self):
return '<Desktop pySDL2>'
| 44.610442 | 257 | 0.640259 |
acee8ff1f658f78b774fb52f407618a225370e1d | 6,529 | py | Python | src/daft_exprt/features_stats.py | ishine/ubisoft-laforge-daft-exprt | a576691c8c42988f813183efcea43c1677abe17a | [
"Apache-2.0"
] | 33 | 2021-09-17T18:32:23.000Z | 2022-03-01T21:05:08.000Z | src/daft_exprt/features_stats.py | arav-agarwal2/daft-exprt-colab | c6114a0066e564b82c267d45d8894b5f289fe037 | [
"Apache-2.0"
] | 5 | 2021-12-07T04:23:04.000Z | 2022-03-15T07:37:13.000Z | src/daft_exprt/features_stats.py | arav-agarwal2/daft-exprt-colab | c6114a0066e564b82c267d45d8894b5f289fe037 | [
"Apache-2.0"
] | 7 | 2021-09-16T02:24:02.000Z | 2022-01-11T07:48:19.000Z | import collections
import logging
import logging.handlers
import os
import uuid
import numpy as np
from daft_exprt.utils import launch_multi_process
_logger = logging.getLogger(__name__)
def get_symbols_durations(markers_file, hparams, log_queue):
''' extract symbols durations in markers file
'''
# create logger from logging queue
qh = logging.handlers.QueueHandler(log_queue)
root = logging.getLogger()
if not root.hasHandlers():
root.setLevel(logging.INFO)
root.addHandler(qh)
logger = logging.getLogger(f"worker{str(uuid.uuid4())}")
# check file exists
assert(os.path.isfile(markers_file)), logger.error(f'There is no such file "{markers_file}"')
# read markers lines
with open(markers_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
markers = [line.strip().split(sep='\t') for line in lines] # [[begin, end, nb_frames, symbol, word, word_idx], ...]
# extract duration for each symbol that is in markers
symbols_durations = []
for marker in markers:
begin, end, _, symbol, _, _ = marker
assert(symbol in hparams.symbols), logger.error(f'{markers_file} -- Symbol "{symbol}" does not exist')
begin, end = float(begin), float(end)
symbols_durations.append([symbol, end - begin])
return symbols_durations
def get_non_zero_energy_values(energy_file, log_queue):
''' Extract non-zero energy values in energy file
'''
# create logger from logging queue
qh = logging.handlers.QueueHandler(log_queue)
root = logging.getLogger()
if not root.hasHandlers():
root.setLevel(logging.INFO)
root.addHandler(qh)
logger = logging.getLogger(f"worker{str(uuid.uuid4())}")
# check file exists
assert(os.path.isfile(energy_file)), logger.error(f'There is no such file "{energy_file}"')
# read energy lines
with open(energy_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
energy_vals = [float(line.strip()) for line in lines]
# remove non-zero energy values
energy_vals = list(filter(lambda a: a != 0., energy_vals))
return energy_vals
def get_voiced_pitch_values(pitch_file, log_queue):
''' Extract voiced pitch values in pitch file
'''
# create logger from logging queue
qh = logging.handlers.QueueHandler(log_queue)
root = logging.getLogger()
if not root.hasHandlers():
root.setLevel(logging.INFO)
root.addHandler(qh)
logger = logging.getLogger(f"worker{str(uuid.uuid4())}")
# check file exists
assert(os.path.isfile(pitch_file)), logger.error(f'There is no such file "{pitch_file}"')
# read pitch lines
with open(pitch_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
pitch_vals = [float(line.strip()) for line in lines]
# remove unvoiced pitch values
pitch_vals = list(filter(lambda a: a != 0., pitch_vals))
return pitch_vals
def extract_features_stats(hparams, n_jobs):
''' Extract features stats for training and inference
'''
# only use the training set to extract features stats
with open(hparams.training_files, 'r', encoding='utf-8') as f:
lines = f.readlines()
training_files = [line.strip().split(sep='|') for line in lines] # [[features_dir, features_file, speaker_id], ...]
# iterate over speakers
_logger.info('--' * 30)
_logger.info('Extracting Features Stats'.upper())
_logger.info('--' * 30)
symbols_durations = []
speaker_stats = {f'spk {id}': {'energy': [], 'pitch': []}
for id in set(hparams.speakers_id)}
for speaker_id in set(hparams.speakers_id):
_logger.info(f'Speaker ID: {speaker_id}')
# extract all files associated to speaker ID
spk_training_files = [[x[0], x[1]] for x in training_files if int(x[2]) == speaker_id]
# extract symbol durations
markers_files = [os.path.join(x[0], f'{x[1]}.markers') for x in spk_training_files]
symbols_durs = launch_multi_process(iterable=markers_files, func=get_symbols_durations,
n_jobs=n_jobs, hparams=hparams, timer_verbose=False)
symbols_durs = [y for x in symbols_durs for y in x]
symbols_durations.extend(symbols_durs)
# extract non-zero energy values
energy_files = [os.path.join(x[0], f'{x[1]}.symbols_nrg') for x in spk_training_files]
energy_vals = launch_multi_process(iterable=energy_files, func=get_non_zero_energy_values,
n_jobs=n_jobs, timer_verbose=False)
energy_vals = [y for x in energy_vals for y in x]
speaker_stats[f'spk {speaker_id}']['energy'].extend(energy_vals)
# extract voiced symbols pitch values
pitch_files = [os.path.join(x[0], f'{x[1]}.symbols_f0') for x in spk_training_files]
pitch_vals = launch_multi_process(iterable=pitch_files, func=get_voiced_pitch_values,
n_jobs=n_jobs, timer_verbose=False)
pitch_vals = [y for x in pitch_vals for y in x]
speaker_stats[f'spk {speaker_id}']['pitch'].extend(pitch_vals)
_logger.info('')
# compute symbols durations stats
symbols_stats = collections.defaultdict(list)
for item in symbols_durations:
symbol, duration = item
symbols_stats[symbol].append(duration)
for symbol in symbols_stats:
min, max = np.min(symbols_stats[symbol]), np.max(symbols_stats[symbol])
mean, std = np.mean(symbols_stats[symbol]), np.std(symbols_stats[symbol])
symbols_stats[symbol] = {
'dur_min': min, 'dur_max': max,
'dur_mean': mean, 'dur_std': std
}
# compute energy and pitch stats for each speaker
for speaker, vals in speaker_stats.items():
energy_vals, pitch_vals = vals['energy'], vals['pitch']
speaker_stats[speaker] = {
'energy': {
'mean': np.mean(energy_vals),
'std': np.std(energy_vals),
'min': np.min(energy_vals),
'max': np.max(energy_vals)
},
'pitch': {
'mean': np.mean(pitch_vals),
'std': np.std(pitch_vals),
'min': np.min(pitch_vals),
'max': np.max(pitch_vals)
}
}
# merge stats
stats = {**speaker_stats}
stats['symbols'] = symbols_stats
return stats
| 39.331325 | 120 | 0.633022 |
acee90068b6cdba613c671c4e3e772ca3782c22b | 9,023 | py | Python | components/dataset.py | Akkyen/CG-RL | 55bdc11b77f6ae100c6ed6b089c6237dbe37edfb | [
"MIT"
] | null | null | null | components/dataset.py | Akkyen/CG-RL | 55bdc11b77f6ae100c6ed6b089c6237dbe37edfb | [
"MIT"
] | null | null | null | components/dataset.py | Akkyen/CG-RL | 55bdc11b77f6ae100c6ed6b089c6237dbe37edfb | [
"MIT"
] | null | null | null | # coding=utf-8
from collections import OrderedDict
import torch
import numpy as np
try:
import cPickle as pickle
except:
import pickle
from torch.autograd import Variable
from asdl.transition_system import ApplyRuleAction, ReduceAction
from common.utils import cached_property
from model import nn_utils
class Dataset(object):
def __init__(self, examples):
self.examples = examples
@property
def all_source(self):
return [e.src_sent for e in self.examples]
@property
def all_targets(self):
return [e.tgt_code for e in self.examples]
@staticmethod
def from_bin_file(file_path):
examples = pickle.load(open(file_path, 'rb'), encoding="utf-8")
return Dataset(examples)
def batch_iter(self, batch_size, shuffle=False):
index_arr = np.arange(len(self.examples))
if shuffle:
np.random.shuffle(index_arr)
batch_num = int(np.ceil(len(self.examples) / float(batch_size)))
for batch_id in range(batch_num):
batch_ids = index_arr[batch_size * batch_id: batch_size * (batch_id + 1)]
batch_examples = [self.examples[i] for i in batch_ids]
batch_examples.sort(key=lambda e: -len(e.src_sent))
yield batch_examples
def __len__(self):
return len(self.examples)
def __iter__(self):
return iter(self.examples)
class Example(object):
def __init__(self, src_sent, tgt_actions, tgt_code, tgt_ast, idx=0, meta=None):
self.src_sent = src_sent
self.tgt_code = tgt_code
self.tgt_ast = tgt_ast
self.tgt_actions = tgt_actions
self.idx = idx
self.meta = meta
class Batch(object):
def __init__(self, examples, grammar, vocab, copy=True, cuda=False):
self.examples = examples
self.max_action_num = max(len(e.tgt_actions) for e in self.examples)
self.src_sents = [e.src_sent for e in self.examples]
self.src_sents_len = [len(e.src_sent) for e in self.examples]
self.grammar = grammar
self.vocab = vocab
self.copy = copy
self.cuda = cuda
self.init_index_tensors()
def __len__(self):
return len(self.examples)
def get_frontier_field_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_actions):
ids.append(self.grammar.field2id[e.tgt_actions[t].frontier_field])
# assert self.grammar.id2field[ids[-1]] == e.tgt_actions[t].frontier_field
else:
ids.append(0)
return Variable(torch.cuda.LongTensor(ids)) if self.cuda else Variable(torch.LongTensor(ids))
def get_field_idx(self, fields):
ids = []
for f in fields:
ids.append(self.grammar.field2id[f])
return Variable(torch.cuda.LongTensor(ids)) if self.cuda else Variable(torch.LongTensor(ids))
def get_frontier_prod_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_actions):
ids.append(self.grammar.prod2id[e.tgt_actions[t].frontier_prod])
# assert self.grammar.id2prod[ids[-1]] == e.tgt_actions[t].frontier_prod
else:
ids.append(0)
return Variable(torch.cuda.LongTensor(ids)) if self.cuda else Variable(torch.LongTensor(ids))
def get_frontier_field_type_idx(self, t):
ids = []
for e in self.examples:
if t < len(e.tgt_actions):
ids.append(self.grammar.type2id[e.tgt_actions[t].frontier_field.type])
# assert self.grammar.id2type[ids[-1]] == e.tgt_actions[t].frontier_field.type
else:
ids.append(0)
return Variable(torch.cuda.LongTensor(ids)) if self.cuda else Variable(torch.LongTensor(ids))
def init_index_tensors(self):
self.apply_rule_idx_matrix = []
self.apply_rule_mask = []
self.primitive_idx_matrix = []
self.gen_token_mask = []
self.primitive_copy_mask = []
self.primitive_copy_token_idx_mask = np.zeros((self.max_action_num, len(self), max(self.src_sents_len)), dtype='float32')
for t in range(self.max_action_num):
app_rule_idx_row = []
app_rule_mask_row = []
token_row = []
gen_token_mask_row = []
copy_mask_row = []
for e_id, e in enumerate(self.examples):
app_rule_idx = app_rule_mask = token_idx = gen_token_mask = copy_mask = 0
if t < len(e.tgt_actions):
action = e.tgt_actions[t].action
action_info = e.tgt_actions[t]
if isinstance(action, ApplyRuleAction):
app_rule_idx = self.grammar.prod2id[action.production]
# assert self.grammar.id2prod[app_rule_idx] == action.production
app_rule_mask = 1
elif isinstance(action, ReduceAction):
app_rule_idx = len(self.grammar)
app_rule_mask = 1
else:
src_sent = self.src_sents[e_id]
token = str(action.token)
token_idx = self.vocab.primitive[action.token]
token_can_copy = False
if self.copy and token in src_sent:
token_pos_list = [idx for idx, _token in enumerate(src_sent) if _token == token]
self.primitive_copy_token_idx_mask[t, e_id, token_pos_list] = 1.
copy_mask = 1
token_can_copy = True
if token_can_copy is False or token_idx != self.vocab.primitive.unk_id:
# if the token is not copied, we can only generate this token from the vocabulary,
# even if it is a <unk>.
# otherwise, we can still generate it from the vocabulary
gen_token_mask = 1
if token_can_copy:
assert action_info.copy_from_src
assert action_info.src_token_position in token_pos_list
# # cannot copy, only generation
# # could be unk!
# if not action_info.copy_from_src:
# gen_token_mask = 1
# else: # copy
# copy_mask = 1
# copy_pos = action_info.src_token_position
# if token_idx != self.vocab.primitive.unk_id:
# # both copy and generate from vocabulary
# gen_token_mask = 1
app_rule_idx_row.append(app_rule_idx)
app_rule_mask_row.append(app_rule_mask)
token_row.append(token_idx)
gen_token_mask_row.append(gen_token_mask)
copy_mask_row.append(copy_mask)
self.apply_rule_idx_matrix.append(app_rule_idx_row)
self.apply_rule_mask.append(app_rule_mask_row)
self.primitive_idx_matrix.append(token_row)
self.gen_token_mask.append(gen_token_mask_row)
self.primitive_copy_mask.append(copy_mask_row)
T = torch.cuda if self.cuda else torch
self.apply_rule_idx_matrix = Variable(T.LongTensor(self.apply_rule_idx_matrix))
self.apply_rule_mask = Variable(T.FloatTensor(self.apply_rule_mask))
self.primitive_idx_matrix = Variable(T.LongTensor(self.primitive_idx_matrix))
self.gen_token_mask = Variable(T.FloatTensor(self.gen_token_mask))
self.primitive_copy_mask = Variable(T.FloatTensor(self.primitive_copy_mask))
self.primitive_copy_token_idx_mask = Variable(torch.from_numpy(self.primitive_copy_token_idx_mask))
if self.cuda: self.primitive_copy_token_idx_mask = self.primitive_copy_token_idx_mask.cuda()
@property
def primitive_mask(self):
return 1. - torch.eq(self.gen_token_mask + self.primitive_copy_mask, 0).float()
@cached_property
def src_sents_var(self):
return nn_utils.to_input_variable(self.src_sents, self.vocab.source,
cuda=self.cuda)
@cached_property
def src_token_mask(self):
return nn_utils.length_array_to_mask_tensor(self.src_sents_len,
cuda=self.cuda)
@cached_property
def token_pos_list(self):
# (batch_size, src_token_pos, unique_src_token_num)
batch_src_token_to_pos_map = []
for e_id, e in enumerate(self.examples):
aggregated_primitive_tokens = OrderedDict()
for token_pos, token in enumerate(e.src_sent):
aggregated_primitive_tokens.setdefault(token, []).append(token_pos)
| 38.233051 | 129 | 0.596476 |
acee91570f16f064f98ea02f899dc056ac9a3e6c | 3,704 | py | Python | TwitchWeather.py | tomaarsen/TwitchWeather | e7f666eb261880512d4dbffa7a5fd8118f73ca95 | [
"MIT"
] | 1 | 2022-01-23T17:36:56.000Z | 2022-01-23T17:36:56.000Z | TwitchWeather.py | tomaarsen/TwitchWeather | e7f666eb261880512d4dbffa7a5fd8118f73ca95 | [
"MIT"
] | null | null | null | TwitchWeather.py | tomaarsen/TwitchWeather | e7f666eb261880512d4dbffa7a5fd8118f73ca95 | [
"MIT"
] | null | null | null | from TwitchWebsocket import TwitchWebsocket
import json, requests, random, logging
from enum import Enum, auto
from Log import Log
Log(__file__)
from Settings import Settings
class ResultCode(Enum):
SUCCESS = auto()
ERROR = auto()
class TwitchWeather:
def __init__(self):
# Initialize variables
self.host = None
self.port = None
self.chan = None
self.nick = None
self.auth = None
self.api_key = None
# Fill uninitialized variables using settings.txt
self.update_settings()
# Instantiate TwitchWebsocket instance with correct params
self.ws = TwitchWebsocket(host=self.host,
port=self.port,
chan=self.chan,
nick=self.nick,
auth=self.auth,
callback=self.message_handler,
capability=None,
live=True)
# Start the websocket connection
self.ws.start_bot()
def update_settings(self):
# Fill previously initialised variables with data from the settings.txt file
self.host, self.port, self.chan, self.nick, self.auth, self.api_key = Settings().get_settings()
def message_handler(self, m):
try:
if m.type == "366":
logging.info(f"Successfully joined channel: #{m.channel}")
elif m.type == "PRIVMSG":
# Listen for command
if m.message.startswith("!weather"):
split_message = m.message.split()
# If city params are passed
if len(split_message) > 1:
location = " ".join(split_message[1:])
# Get the output as well as the return code
out, _code = self.fetch_weather(location)
# Send messages to Twitch chat
# Because in all cases, error or success,
# we want to output `out` to chat, we ignore `_code` for now.
self.ws.send_message(out)
else:
self.ws.send_message("Please request weather for a specific city like: !weather Toronto")
except Exception as e:
logging.exception(e)
def fetch_weather(self, location):
# Construct URL and get result
url = f"https://api.openweathermap.org/data/2.5/weather?q={location}&appid={self.api_key}"
data = requests.get(url).json()
# In case the city is not found
if data['cod'] == '404':
return data['message'].capitalize(), ResultCode.ERROR
# If successful
elif data['cod'] == 200:
celcius = float(data["main"]["temp"]) - 273.15
fahrenheit = celcius * 1.8 + 32
humidity = float(data["main"]["humidity"])
city = data["name"]
country = data["sys"]["country"]
description = data["weather"][0]["description"]
out = f"{celcius:.1f}°C/{fahrenheit:.0f}°F, {humidity:.1f}% humidity, with {description} in {city}, {country}."
return out, ResultCode.SUCCESS
# If some other error (eg. api limit exceeded)
else:
if 'cod' in data:
out = f"Error with code {data['cod']} encountered."
else:
out = "Unknown error encountered"
return out, ResultCode.ERROR
if __name__ == "__main__":
TwitchWeather() | 36.673267 | 123 | 0.523218 |
acee917cff5e08033de532c3154ee35c2390ca16 | 564 | py | Python | tests/domain/test_config.py | staticdev/github-portfolio | 850461eed8160e046ee16664ac3dbc19e3ec0965 | [
"MIT"
] | null | null | null | tests/domain/test_config.py | staticdev/github-portfolio | 850461eed8160e046ee16664ac3dbc19e3ec0965 | [
"MIT"
] | null | null | null | tests/domain/test_config.py | staticdev/github-portfolio | 850461eed8160e046ee16664ac3dbc19e3ec0965 | [
"MIT"
] | null | null | null | """Test cases for config model."""
import git_portfolio.domain.config as c
def test_config_model_init() -> None:
"""Verify model initialization."""
github_hostname = "localhost"
github_access_token = "my-token"
github_selected_repos = ["user/repo", "user/repo2"]
test_config = c.Config(github_hostname, github_access_token, github_selected_repos)
assert test_config.github_hostname == github_hostname
assert test_config.github_access_token == github_access_token
assert test_config.github_selected_repos == github_selected_repos
| 37.6 | 87 | 0.76773 |
acee92e69eac5dae08a62ea4dea1f799d168aceb | 3,166 | py | Python | cbmc_viewer/markup_link.py | markrtuttle/aws-viewer-for-cbmc | 00ff0942fcc89df47e00034592ce3f15dba5e915 | [
"Apache-2.0"
] | null | null | null | cbmc_viewer/markup_link.py | markrtuttle/aws-viewer-for-cbmc | 00ff0942fcc89df47e00034592ce3f15dba5e915 | [
"Apache-2.0"
] | null | null | null | cbmc_viewer/markup_link.py | markrtuttle/aws-viewer-for-cbmc | 00ff0942fcc89df47e00034592ce3f15dba5e915 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Links to source code.
This module is a set of methods for constructing links into the
annotated source code. All other modules use these methods for
consistent links to source code. All paths in this module are
assumed to be relative to the root of the source code.
"""
import html
import os
import re
from cbmc_viewer import srcloct
################################################################
def path_to_file(dst, src):
"""The path from src to dst for use in a hyperlink from src to dst.
Given two paths src and dst relative to a common root, return the
relative path from src to dst. This is the path to use in a
hyperlink from src to dst. For example,
the path from 'a/b/foo.html' to 'c/bar.html' is '../../c/bar.html',
the path from 'a/b/foo.html' to '.' is '../..', and
the path from '.' to 'a/b/foo.html' is 'a/b/foo.html'.
"""
path = os.path.relpath(dst, os.path.dirname(src))
if dst != os.path.normpath(os.path.join(os.path.dirname(src), path)):
raise UserWarning(
"{} != {}".format(dst,
os.path.normpath(os.path.join(os.path.dirname(src), path)))
)
return path
################################################################
# Method to link into the source tree.
# By default, links are from the root of the source tree to the source file.
def link_text_to_file(text, to_file, from_file=None):
"""Link text to a file in the source tree."""
if srcloct.is_builtin(to_file):
return html.escape(str(text))
from_file = from_file or '.'
path = path_to_file(to_file, from_file)
return '<a href="{}.html">{}</a>'.format(path, text)
def link_text_to_line(text, to_file, line, from_file=None):
"""Link text to a line in a file in the source tree."""
if srcloct.is_builtin(to_file):
return html.escape(str(text))
from_file = from_file or '.'
line = int(line)
path = path_to_file(to_file, from_file)
return '<a href="{}.html#{}">{}</a>'.format(path, line, text)
def link_text_to_srcloc(text, srcloc, from_file=None):
"""Link text to a source location in a file in the source tree."""
if srcloc is None:
return text
return link_text_to_line(text, srcloc['file'], srcloc['line'], from_file)
def link_text_to_symbol(text, symbol, symbols, from_file=None):
"""Link text to a symbol definition in the source tree."""
srcloc = symbols.lookup(symbol)
return link_text_to_srcloc(text, srcloc, from_file)
def split_text_into_symbols(text):
"""Split text into substrings that could be symbols."""
return re.split('([_a-zA-Z][_a-zA-Z0-9]*)', text)
def link_symbols_in_text(text, symbols, from_file=None):
"""Link symbols appearing in text to their definitions."""
if text is None:
return None
tokens = split_text_into_symbols(text)
return ''.join(
[link_text_to_symbol(tkn, tkn, symbols, from_file)
for tkn in tokens]
)
################################################################
| 32.979167 | 89 | 0.625711 |
acee93ab598fc0c438081f8dd3e5f2fcf6bfe4e0 | 1,171 | py | Python | clinica/pipelines/cli_param/argument.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | clinica/pipelines/cli_param/argument.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | clinica/pipelines/cli_param/argument.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | """Common CLI arguments used by Clinica pipelines."""
import click
from clinica.utils.pet import LIST_SUVR_REFERENCE_REGIONS
acq_label = click.argument("acq_label")
bids_directory = click.argument(
"bids_directory",
type=click.Path(exists=True, file_okay=False, resolve_path=True),
)
caps_directory = click.argument(
"caps_directory",
type=click.Path(writable=True, file_okay=False, resolve_path=True),
)
contrast = click.argument("contrast")
group_label = click.argument("group_label")
orig_input_data = click.argument(
"orig_input_data",
type=click.Choice(["t1-freesurfer", "pet-surface", "custom-pipeline"]),
)
orig_input_data_ml = click.argument(
"orig_input_data_ml",
type=click.Choice(["t1-volume", "pet-surface"]),
)
pvc_psf_tsv = click.argument(
"pvc_psf_tsv",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
)
subject_visits_with_covariates_tsv = click.argument(
"subject_visits_with_covariates_tsv",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
)
suvr_reference_region = click.argument(
"suvr_reference_region", type=click.Choice(LIST_SUVR_REFERENCE_REGIONS)
)
| 26.022222 | 75 | 0.752348 |
acee94a8620f3467cdbcb15a1cec3cfcc7c4e968 | 1,802 | py | Python | test/sst/7.1.0/goblin_singlestream1.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 10 | 2018-02-26T02:39:36.000Z | 2020-10-20T14:55:56.000Z | test/sst/7.1.0/goblin_singlestream1.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 5 | 2017-09-07T11:41:35.000Z | 2020-10-12T14:35:39.000Z | test/sst/7.0.0/goblin_singlestream1.py | tactcomplabs/gc64-hmcsim | 79bf4ffae74dc52bb605adb3e0e1eb84649f9624 | [
"BSD-2-Clause"
] | 4 | 2017-09-07T06:03:43.000Z | 2021-09-10T13:44:19.000Z | import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.SingleStreamGenerator",
"generatorParams.verbose" : 0,
"generatorParams.startat" : 3,
"generatorParams.count" : 500000,
"generatorParams.max_address" : 512000,
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "1000 ns",
"backend.mem_size" : "512MiB",
"clock" : "1GHz",
"backend" : "memHierarchy.goblinHMCSim"
})
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
| 30.542373 | 109 | 0.705327 |
acee9572aea537eb0b26b231bca5b2a247a71515 | 163 | py | Python | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Minute_MLP.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Minute_MLP.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_Seasonal_Minute_MLP.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['Seasonal_Minute'] , ['MLP'] ); | 40.75 | 90 | 0.760736 |
acee95a093e36460a6399dc4df3e1e06366b0bdd | 1,589 | py | Python | pydl4j/tests/build_tests/test_build_5.py | mjlorenzo305/deeplearning4j | a1fcc5f19f0f637e83252b00982b3f12b401f679 | [
"Apache-2.0"
] | 13,006 | 2015-02-13T18:35:31.000Z | 2022-03-18T12:11:44.000Z | pydl4j/tests/build_tests/test_build_5.py | pxiuqin/deeplearning4j | e11ddf3c24d355b43d36431687b807c8561aaae4 | [
"Apache-2.0"
] | 5,319 | 2015-02-13T08:21:46.000Z | 2019-06-12T14:56:50.000Z | pydl4j/tests/build_tests/test_build_5.py | pxiuqin/deeplearning4j | e11ddf3c24d355b43d36431687b807c8561aaae4 | [
"Apache-2.0"
] | 4,719 | 2015-02-13T22:48:55.000Z | 2022-03-22T07:25:36.000Z | ################################################################################
# Copyright (c) 2015-2019 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
import pytest
import pydl4j
import os
def _datavec_test():
from jnius import autoclass
SparkConf = autoclass('org.apache.spark.SparkConf')
SparkContext = autoclass('org.apache.spark.api.java.JavaSparkContext')
JavaRDD = autoclass('org.apache.spark.api.java.JavaRDD')
def test_build():
_CONFIG = {
'dl4j_version': '1.0.0-SNAPSHOT',
'dl4j_core': True,
'datavec': True,
'spark': False,
'spark_version': '2',
'scala_version': '2.11',
'nd4j_backend': 'cpu'
}
my_dir = pydl4j.jarmgr._MY_DIR
if os.path.isdir(my_dir):
os.remove(my_dir)
pydl4j.set_config(_CONFIG)
pydl4j.maven_build()
import jumpy as jp
assert jp.zeros((3, 2)).numpy().sum() == 0
_datavec_test()
if __name__ == '__main__':
pytest.main([__file__])
| 27.396552 | 80 | 0.615481 |
acee95abc73d019b329534682b4850e72b83f7bd | 3,368 | py | Python | research/object_detection/builders/optimizer_builder_tf2_test.py | zhaowt96/models | 03182253673b0e2666ad9a33839759834c0acebd | [
"Apache-2.0"
] | null | null | null | research/object_detection/builders/optimizer_builder_tf2_test.py | zhaowt96/models | 03182253673b0e2666ad9a33839759834c0acebd | [
"Apache-2.0"
] | null | null | null | research/object_detection/builders/optimizer_builder_tf2_test.py | zhaowt96/models | 03182253673b0e2666ad9a33839759834c0acebd | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
import unittest
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import optimizer_builder
from object_detection.protos import optimizer_pb2
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class OptimizerBuilderV2Test(tf.test.TestCase):
"""Test building optimizers in V2 mode."""
def testBuildRMSPropOptimizer(self):
optimizer_text_proto = """
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.RMSprop)
def testBuildMomentumOptimizer(self):
optimizer_text_proto = """
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.SGD)
def testBuildAdamOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Adam)
def testBuildMovingAverageOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertIsInstance(optimizer, tf.keras.optimizers.Optimizer)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 32.07619 | 80 | 0.684679 |
acee95c4e47ace6c8480c9ef303fab10a6d0f9a1 | 880 | py | Python | sentiment_analysis/build_model.py | mFarouki/tensorflow | 18521f71d2fc9f1173e445f198dfb84577fe815b | [
"MIT"
] | null | null | null | sentiment_analysis/build_model.py | mFarouki/tensorflow | 18521f71d2fc9f1173e445f198dfb84577fe815b | [
"MIT"
] | null | null | null | sentiment_analysis/build_model.py | mFarouki/tensorflow | 18521f71d2fc9f1173e445f198dfb84577fe815b | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras import layers
cross_entropy_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def embedded_neural_net(max_features, embedding_dim=16, dropout_rate=0.2):
model = tf.keras.Sequential([
layers.Embedding(max_features + 1, embedding_dim),
layers.Dropout(dropout_rate),
layers.GlobalAveragePooling1D(),
layers.Dropout(dropout_rate),
layers.Dense(1)])
print(model.summary())
return model
def compile_model(model, loss_fxn=cross_entropy_loss, optimizer='adam', metrics=None):
# note that adam is a type of stochastic gradient descent good for multiclass problems
if metrics is None:
metrics = tf.metrics.BinaryAccuracy(threshold=0.0)
model.compile(optimizer=optimizer,
loss=loss_fxn,
metrics=metrics)
return model
| 32.592593 | 90 | 0.709091 |
acee971c2acafabc75341d679d7da1ed81cc4280 | 430 | py | Python | day35/02.py | 1923851861/Oldboy_SH_Python | c9d8224cf41c58235fc67783d73a48f82fee61a1 | [
"MIT"
] | null | null | null | day35/02.py | 1923851861/Oldboy_SH_Python | c9d8224cf41c58235fc67783d73a48f82fee61a1 | [
"MIT"
] | null | null | null | day35/02.py | 1923851861/Oldboy_SH_Python | c9d8224cf41c58235fc67783d73a48f82fee61a1 | [
"MIT"
] | null | null | null | from threading import Thread,Lock
import time
mutex=Lock()
n=100
def task():
global n
with mutex:
temp=n
time.sleep(0.1)
n=temp-1
if __name__ == '__main__':
t_l=[]
start_time=time.time()
for i in range(3):
t=Thread(target=task)
t_l.append(t)
t.start()
for t in t_l:
t.join()
stop_time=time.time()
print(n)
print(stop_time-start_time) | 15.925926 | 33 | 0.562791 |
acee9807ce007b27048e34d8a30a8e36ecb1dfa2 | 1,326 | py | Python | pyciderevalcap/eval.py | Xiaoming-Zhao/cider | 8be1558d026ae737e9b3a9ecefe5e1578033ecea | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | pyciderevalcap/eval.py | Xiaoming-Zhao/cider | 8be1558d026ae737e9b3a9ecefe5e1578033ecea | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | pyciderevalcap/eval.py | Xiaoming-Zhao/cider | 8be1558d026ae737e9b3a9ecefe5e1578033ecea | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from .tokenizer.ptbtokenizer import PTBTokenizer
from .cider.cider import Cider
from .ciderD.ciderD import CiderD
__author__ = 'rama'
class CIDErEvalCap:
def __init__(self, gts, res, df):
print('tokenization...')
tokenizer = PTBTokenizer('gts')
_gts = tokenizer.tokenize(gts)
print('tokenized refs')
tokenizer = PTBTokenizer('res')
_res = tokenizer.tokenize(res)
print('tokenized cands')
self.gts = _gts
self.res = _res
self.df = df
def evaluate(self):
# =================================================
# Set up scorers
# =================================================
print('setting up scorers...')
scorers = [
(Cider(df=self.df), "CIDEr"), (CiderD(df=self.df), "CIDErD")
]
# =================================================
# Compute scores
# =================================================
metric_scores = {}
for scorer, method in scorers:
print('computing {} score...'.format((scorer.method())))
score, scores = scorer.compute_score(self.gts, self.res)
print("Mean %s score: {}".format((method, score)))
metric_scores[method] = list(scores)
return metric_scores
| 30.837209 | 72 | 0.47813 |
acee982f736f2c7405eda51fafde524a088e68be | 3,269 | py | Python | backend/venv/Lib/site-packages/sphinx/__init__.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | backend/venv/Lib/site-packages/sphinx/__init__.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | backend/venv/Lib/site-packages/sphinx/__init__.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # -*- coding: utf-8 -*-
"""
Sphinx
~~~~~~
The Sphinx documentation toolchain.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Keep this file executable as-is in Python 3!
# (Otherwise getting the version out of it from setup.py is impossible.)
import sys
from os import path
__version__ = '1.2.3'
__released__ = '1.2.3' # used when Sphinx builds its own docs
# version info for better programmatic use
# possible values for 3rd element: 'alpha', 'beta', 'rc', 'final'
# 'final' has 0 as the last element
version_info = (1, 2, 3, 'final', 0)
package_dir = path.abspath(path.dirname(__file__))
if '+' in __version__ or 'pre' in __version__:
# try to find out the changeset hash if checked out from hg, and append
# it to __version__ (since we use this value from setup.py, it gets
# automatically propagated to an installed copy as well)
try:
import subprocess
p = subprocess.Popen(['hg', 'id', '-i', '-R',
path.join(package_dir, '..')],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out:
__version__ += '/' + out.strip()
except Exception:
pass
def main(argv=sys.argv):
if sys.argv[1:2] == ['-M']:
sys.exit(make_main(argv))
else:
sys.exit(build_main(argv))
def build_main(argv=sys.argv):
"""Sphinx build "main" command-line entry."""
if sys.version_info[:3] < (2, 5, 0):
sys.stderr.write('Error: Sphinx requires at least Python 2.5 to run.\n')
return 1
try:
from sphinx import cmdline
except ImportError:
err = sys.exc_info()[1]
errstr = str(err)
if errstr.lower().startswith('no module named'):
whichmod = errstr[16:]
hint = ''
if whichmod.startswith('docutils'):
whichmod = 'Docutils library'
elif whichmod.startswith('jinja'):
whichmod = 'Jinja2 library'
elif whichmod == 'roman':
whichmod = 'roman module (which is distributed with Docutils)'
hint = ('This can happen if you upgraded docutils using\n'
'easy_install without uninstalling the old version'
'first.\n')
else:
whichmod += ' module'
sys.stderr.write('Error: The %s cannot be found. '
'Did you install Sphinx and its dependencies '
'correctly?\n' % whichmod)
if hint:
sys.stderr.write(hint)
return 1
raise
if sys.version_info[:3] >= (3, 3, 0):
from sphinx.util.compat import docutils_version
if docutils_version < (0, 10):
sys.stderr.write('Error: Sphinx requires at least '
'Docutils 0.10 for Python 3.3 and above.\n')
return 1
return cmdline.main(argv)
def make_main(argv=sys.argv):
"""Sphinx build "make mode" entry."""
from sphinx import make_mode
return make_mode.run_make_mode(argv[2:])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 33.357143 | 80 | 0.575099 |
acee9887d23c50f878a2026f76c33f1abf02e855 | 21,933 | py | Python | django/conf/global_settings.py | Elorex/django | 16454ac35f6a24a04b23a9340b0d62c33edbc1ea | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2019-03-19T07:08:22.000Z | 2019-03-19T07:08:27.000Z | django/conf/global_settings.py | Elorex/django | 16454ac35f6a24a04b23a9340b0d62c33edbc1ea | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-01-31T11:30:21.000Z | 2020-01-31T11:30:21.000Z | django/conf/global_settings.py | Elorex/django | 16454ac35f6a24a04b23a9340b0d62c33edbc1ea | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-21T05:45:42.000Z | 2020-11-21T05:45:42.000Z | """
Default Django settings. Override these with settings in the module pointed to
by the DJANGO_SETTINGS_MODULE environment variable.
"""
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
def gettext_noop(s):
return s
####################
# CORE #
####################
DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('dsb', gettext_noop('Lower Sorbian')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-co', gettext_noop('Colombian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gd', gettext_noop('Scottish Gaelic')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hsb', gettext_noop('Upper Sorbian')),
('hu', gettext_noop('Hungarian')),
('hy', gettext_noop('Armenian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kab', gettext_noop('Kabyle')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmål')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default charset to use for all HttpResponse objects, if a MIME type isn't
# manually specified. It's used to construct the Content-Type header.
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = False
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
TEMPLATES = []
# Default form rendering class.
FORM_RENDERER = 'django.forms.renderers.DjangoTemplates'
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search'),
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$'),
# re.compile(r'^/robots.txt$'),
# re.compile(r'^/phpmyadmin/'),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum size in bytes of request data (excluding file uploads) that will be
# read before a SuspiciousOperation (RequestDataTooBig) is raised.
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Maximum number of GET/POST parameters that will be read before a
# SuspiciousOperation (TooManyFieldsSent) is raised.
DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see https://docs.python.org/library/os.html#files-and-directories.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# https://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware to use. Order is important; in the request phase, these
# middleware will be applied in the order given, and in the response
# phase the middleware will be applied in reverse order.
MIDDLEWARE = []
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like "example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to set the flag restricting cookie leaks on cross-site requests.
# This can be 'Lax', 'Strict', or None to disable the flag.
SESSION_COOKIE_SAMESITE = 'Lax'
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
LOGOUT_REDIRECT_URL = None
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SAMESITE = 'Lax'
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
CSRF_TRUSTED_ORIGINS = []
CSRF_USE_SESSIONS = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_PRELOAD = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
| 34.540157 | 101 | 0.701135 |
acee988f5adc91de2bda18d273512161937d9c53 | 515 | py | Python | Mocks/BNO555.py | Solar-Gators/emulators | d2e68d06a867d65601b7cbadd7dffdf62cb7844d | [
"MIT"
] | null | null | null | Mocks/BNO555.py | Solar-Gators/emulators | d2e68d06a867d65601b7cbadd7dffdf62cb7844d | [
"MIT"
] | 5 | 2020-07-04T01:29:44.000Z | 2021-03-04T18:30:42.000Z | Mocks/BNO555.py | Solar-Gators/emulators | d2e68d06a867d65601b7cbadd7dffdf62cb7844d | [
"MIT"
] | null | null | null | from .Message import Message
class Axis():
def __init__(self, x, y ,z):
self.x = x
self.y = y
self.z = z
def print(self):
print("x: "+str(self.x)+", y: "+str(self.y)+", z: "+str(self.z))
class BNO555(Message):
def __init__(self, addr_CAN, addr_telem):
super.__init__(addr_CAN, addr_telem)
self.gyro = Axis(5, 4, 3)
self.accel = Axis(1, 2, 3)
def toCharArray(self):
# TODO figure out how the IMU is sending from the car
pass
| 27.105263 | 72 | 0.561165 |
acee98925146d08650a7031ce2f9ac6c5a3f7d44 | 323 | py | Python | python/tcp_server.py | 12Tall/network | 602dfbba3a6bfb7400ecc80f618bb6e8be95e4ad | [
"MIT"
] | null | null | null | python/tcp_server.py | 12Tall/network | 602dfbba3a6bfb7400ecc80f618bb6e8be95e4ad | [
"MIT"
] | 1 | 2020-11-25T06:54:54.000Z | 2020-11-25T06:54:54.000Z | python/tcp_server.py | 12Tall/network | 602dfbba3a6bfb7400ecc80f618bb6e8be95e4ad | [
"MIT"
] | null | null | null | import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9991
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.connect(("www.baidu.com",80))
server.send(b"GET / HTTP/1.1\r\nHost:baidu.com\r\n\r\n")
response = server.recv(4096)
print(response)
server.listen(5)
| 17 | 58 | 0.733746 |
acee98d66eec02564e81481c651f6c3f582d575e | 765 | py | Python | src/playbook/actions/basic/shell.py | ownport/playbook | 6d3196ddf68f2c3c3efc4a52e26719c3e5596dca | [
"MIT"
] | 1 | 2018-09-10T10:01:25.000Z | 2018-09-10T10:01:25.000Z | src/playbook/actions/basic/shell.py | ownport/playbook | 6d3196ddf68f2c3c3efc4a52e26719c3e5596dca | [
"MIT"
] | null | null | null | src/playbook/actions/basic/shell.py | ownport/playbook | 6d3196ddf68f2c3c3efc4a52e26719c3e5596dca | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division, print_function)
from playbook.actions import BaseAction
from plumbum import local
from plumbum.commands.processes import ProcessExecutionError
class Action(BaseAction):
name = u'shell'
def handler(self):
msg = ''
try:
exitcode, stdout, stderr = local.get(self._args[0]).run(self._args[1:])
except ProcessExecutionError as err:
exitcode = err.retcode
stdout = err.stdout
stderr = err.stderr
msg = err.stderr
return {
'status': 'SUCCESS' if exitcode == 0 else 'FAILED',
'exitcode': exitcode,
'msg': msg,
'stdout': stdout,
'stderr': stderr
}
| 25.5 | 83 | 0.586928 |
acee992f8f77f5bcbb5978f39ca718aa9233ce60 | 141 | py | Python | _12_EXERCISE_LISTS_BASICS/_2_Multiples_List.py | YordanPetrovDS/Python_Fundamentals | 81163054cd3ac780697eaa43f099cc455f253a0c | [
"MIT"
] | null | null | null | _12_EXERCISE_LISTS_BASICS/_2_Multiples_List.py | YordanPetrovDS/Python_Fundamentals | 81163054cd3ac780697eaa43f099cc455f253a0c | [
"MIT"
] | null | null | null | _12_EXERCISE_LISTS_BASICS/_2_Multiples_List.py | YordanPetrovDS/Python_Fundamentals | 81163054cd3ac780697eaa43f099cc455f253a0c | [
"MIT"
] | null | null | null | factor = int(input())
count = int(input())
num_list = []
for num in range(1, count + 1):
num_list.append(factor * num)
print(num_list)
| 15.666667 | 33 | 0.64539 |
acee996677aec519edb43bfe8c343ed3c9dffe84 | 1,186 | py | Python | user_app/migrations/0003_my_user.py | shahinvx/Django_Permission_and_Group | 8f88d7af370dba4bfed9bd2cece6f106ee7e7d04 | [
"MIT"
] | null | null | null | user_app/migrations/0003_my_user.py | shahinvx/Django_Permission_and_Group | 8f88d7af370dba4bfed9bd2cece6f106ee7e7d04 | [
"MIT"
] | null | null | null | user_app/migrations/0003_my_user.py | shahinvx/Django_Permission_and_Group | 8f88d7af370dba4bfed9bd2cece6f106ee7e7d04 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-10-29 14:53
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('user_app', '0002_auto_20211029_1931'),
]
operations = [
migrations.CreateModel(
name='My_User',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='auth.user')),
('full_name', models.CharField(blank=True, max_length=300, null=True)),
('age', models.PositiveIntegerField(blank=True, null=True)),
('address', models.CharField(blank=True, max_length=300, null=True)),
],
options={
'db_table': 'my_user',
'managed': True,
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 34.882353 | 185 | 0.565767 |
acee99a5f22b1acc8241101b0fe7d051e63e94a7 | 2,145 | py | Python | lib/datasets/factory.py | AlphaJia/tf-faster-rcnn | 4ba2d3eeca78387f99561ee0a562e208ccda1bd9 | [
"MIT"
] | 4 | 2020-10-30T05:31:15.000Z | 2020-11-10T06:37:38.000Z | lib/datasets/factory.py | AlphaJia/tf-faster-rcnn | 4ba2d3eeca78387f99561ee0a562e208ccda1bd9 | [
"MIT"
] | null | null | null | lib/datasets/factory.py | AlphaJia/tf-faster-rcnn | 4ba2d3eeca78387f99561ee0a562e208ccda1bd9 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Factory method for easily getting imdbs by name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__sets = {}
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
import numpy as np
# Set up voc_<year>_<split>
# for year in ['2007', '2012']:
# for split in ['train', 'val', 'trainval', 'test']:
# name = 'voc_{}_{}'.format(year, split)
# __sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
#
# for year in ['2007', '2012']:
# for split in ['train', 'val', 'trainval', 'test']:
# name = 'voc_{}_{}_diff'.format(year, split)
# __sets[name] = (lambda split=split, year=year: pascal_voc(split, year, use_diff=True))
# Set up coco_2014_<split>
# for year in ['2014']:
# for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
# name = 'coco_{}_{}'.format(year, split)
# __sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2017_<split>
for year in ['2017']:
for split in ['train', 'val', 'minival', 'valminusminival', 'trainval']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
# for year in ['2015']:
# for split in ['test', 'test-dev']:
# name = 'coco_{}_{}'.format(year, split)
# __sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2017_<split>
for year in ['2017']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return list(__sets.keys())
| 32.5 | 92 | 0.620047 |
acee9a622e46ca52f9435e354d733421acec11ea | 719 | py | Python | setup.py | dan-coates/datacompy | 0a3e3701b2d284ca440c569f7866f2fc08eb3f9b | [
"Apache-2.0"
] | 1 | 2021-01-29T04:24:47.000Z | 2021-01-29T04:24:47.000Z | setup.py | PauloASilva/datacompy | 0a3e3701b2d284ca440c569f7866f2fc08eb3f9b | [
"Apache-2.0"
] | null | null | null | setup.py | PauloASilva/datacompy | 0a3e3701b2d284ca440c569f7866f2fc08eb3f9b | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
import os
from io import open as iopen
CURR_DIR = os.path.abspath(os.path.dirname(__file__))
INSTALL_REQUIRES = ["pandas>=0.19.0,!=0.23.*", "numpy>=1.11.3"]
with iopen(os.path.join(CURR_DIR, "README.rst"), encoding="utf-8") as file_open:
LONG_DESCRIPTION = file_open.read()
exec(open("datacompy/_version.py").read())
setup(
name="datacompy",
version=__version__,
description="Dataframe comparison in Python",
long_description=LONG_DESCRIPTION,
url="https://github.com/capitalone/datacompy",
license="Apache-2.0",
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
package_data={"": ["templates/*"]},
zip_safe=False,
)
| 29.958333 | 80 | 0.7121 |
acee9a6a556b22e86971fb97517d62e52a8d5a4a | 759 | py | Python | projeto_negocio_moderno/negocio_moderno_env/Scripts/django-admin.py | LeandroMelloo/curso_completo_api_rest_django_framework | c56771a2103ac755e68984b2b1b78f591c1d4b5a | [
"Apache-2.0"
] | null | null | null | projeto_negocio_moderno/negocio_moderno_env/Scripts/django-admin.py | LeandroMelloo/curso_completo_api_rest_django_framework | c56771a2103ac755e68984b2b1b78f591c1d4b5a | [
"Apache-2.0"
] | null | null | null | projeto_negocio_moderno/negocio_moderno_env/Scripts/django-admin.py | LeandroMelloo/curso_completo_api_rest_django_framework | c56771a2103ac755e68984b2b1b78f591c1d4b5a | [
"Apache-2.0"
] | null | null | null | #!c:\users\leandropaulino\desktop\curso_alura_api_rest_django\projeto_negocio_moderno\negocio_moderno_env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 34.5 | 124 | 0.749671 |
acee9b422c78dcd3d9cd494d7e52ff6450417918 | 1,701 | py | Python | tests/basics/Referencing27.py | RESP3CT88/Nuitka | 0fcc25d9f00c4fc78c79a863c4b7987f573962e1 | [
"Apache-2.0"
] | 1 | 2021-05-25T12:48:28.000Z | 2021-05-25T12:48:28.000Z | tests/basics/Referencing27.py | RESP3CT88/Nuitka | 0fcc25d9f00c4fc78c79a863c4b7987f573962e1 | [
"Apache-2.0"
] | null | null | null | tests/basics/Referencing27.py | RESP3CT88/Nuitka | 0fcc25d9f00c4fc78c79a863c4b7987f573962e1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
from nuitka.tools.testing.Common import executeReferenceChecked
x = 17
# Python2.7 or higher syntax things are here.
def simpleFunction1():
return {i: x for i in range(x)}
def simpleFunction2():
try:
return {y: i for i in range(x)}
except NameError:
pass
def simpleFunction3():
return {i for i in range(x)}
def simpleFunction4():
try:
return {y for i in range(x)}
except NameError:
pass
# These need stderr to be wrapped.
tests_stderr = ()
# Disabled tests
tests_skipped = {}
result = executeReferenceChecked(
prefix="simpleFunction",
names=globals(),
tests_skipped=tests_skipped,
tests_stderr=tests_stderr,
)
sys.exit(0 if result else 1)
| 23.625 | 79 | 0.684303 |
acee9b7245930c704bfcc3fe09fdcfa205368ca2 | 2,785 | py | Python | native_client_sdk/src/build_tools/install_third_party.py | gavinp/chromium | 681563ea0f892a051f4ef3d5e53438e0bb7d2261 | [
"BSD-3-Clause"
] | 1 | 2016-03-10T09:13:57.000Z | 2016-03-10T09:13:57.000Z | native_client_sdk/src/build_tools/install_third_party.py | gavinp/chromium | 681563ea0f892a051f4ef3d5e53438e0bb7d2261 | [
"BSD-3-Clause"
] | 1 | 2022-03-13T08:39:05.000Z | 2022-03-13T08:39:05.000Z | native_client_sdk/src/build_tools/install_third_party.py | gavinp/chromium | 681563ea0f892a051f4ef3d5e53438e0bb7d2261 | [
"BSD-3-Clause"
] | null | null | null | #! -*- python -*-
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Build and install all the third-party tools and libraries required to build
the SDK code. To add a script, add it to the array |THIRD_PARTY_SCRIPTS|.
Before running the scripts, a couple of environment variables get set:
PYTHONPATH - append this script's dir to the search path for module import.
NACL_SDK_ROOT - forced to point to the root of this repo.
"""
import os
import subprocess
import sys
from optparse import OptionParser
# Append to PYTHONPATH in this very non-compliant way so that this script can be
# run from a DEPS hook, where the normal path rules don't apply.
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SDK_SRC_DIR = os.path.dirname(SCRIPT_DIR)
SRC_DIR = os.path.dirname(os.path.dirname(SDK_SRC_DIR))
THIRD_PARTY_DIR = os.path.join(SRC_DIR, 'third_party')
SCONS_DIR = os.path.join(THIRD_PARTY_DIR, 'scons-2.0.1', 'engine')
sys.path.append(SCRIPT_DIR)
sys.path.append(SCONS_DIR)
import build_utils
THIRD_PARTY_SCRIPTS = [
os.path.join('install_boost', 'install_boost.py'),
]
def main(argv):
parser = OptionParser()
parser.add_option(
'-a', '--all-toolchains', dest='all_toolchains',
action='store_true',
help='Install into all available toolchains.')
(options, args) = parser.parse_args(argv)
if args:
print 'ERROR: invalid argument: %s' % str(args)
parser.print_help()
sys.exit(1)
python_paths = [SCRIPT_DIR, SCONS_DIR]
shell_env = os.environ.copy()
python_paths += [shell_env.get('PYTHONPATH', '')]
shell_env['PYTHONPATH'] = os.pathsep.join(python_paths)
# Force NACL_SDK_ROOT to point to the toolchain in this repo.
nacl_sdk_root = os.path.dirname(SCRIPT_DIR)
shell_env['NACL_SDK_ROOT'] = nacl_sdk_root
script_argv = [arg for arg in argv if not arg in ['-a', '--all-toolchains']]
if options.all_toolchains:
script_argv += [
'--toolchain=%s' % (
build_utils.NormalizeToolchain(base_dir=nacl_sdk_root,
arch='x86',
variant='glibc')),
'--toolchain=%s' % (
build_utils.NormalizeToolchain(base_dir=nacl_sdk_root,
arch='x86',
variant='newlib')),
'--third-party=%s' % THIRD_PARTY_DIR,
]
for script in THIRD_PARTY_SCRIPTS:
print "Running install script: %s" % os.path.join(SCRIPT_DIR, script)
py_command = [sys.executable, os.path.join(SCRIPT_DIR, script)]
subprocess.check_call(py_command + script_argv, env=shell_env)
if __name__ == '__main__':
main(sys.argv[1:])
| 34.382716 | 80 | 0.674686 |
acee9be55ef8d689d377777a07932113a9b8b22b | 689 | py | Python | stubs/micropython-v1_9_3-esp8266/upip_utarfile.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_9_3-esp8266/upip_utarfile.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_9_3-esp8266/upip_utarfile.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
Module: 'upip_utarfile' on esp8266 v1.9.3
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.0.0(5a875ba)', version='v1.9.3-8-g63826ac5c on 2017-11-01', machine='ESP module with ESP8266')
# Stubber: 1.1.2 - updated
from typing import Any
DIRTYPE = "dir"
class FileSection:
""""""
def read(self, *argv) -> Any:
pass
def readinto(self, *argv) -> Any:
pass
def skip(self, *argv) -> Any:
pass
REGTYPE = "file"
TAR_HEADER = None
class TarFile:
""""""
def extractfile(self, *argv) -> Any:
pass
def next(self, *argv) -> Any:
pass
class TarInfo:
""""""
def roundup():
pass
uctypes = None
| 14.659574 | 152 | 0.576197 |
acee9c13934e687b010a415641de50eb8787eeeb | 204 | py | Python | daira/urls.py | medram/daira | cf445f1b308b879bb6c852067b162a2a1c1c72d3 | [
"MIT"
] | 1 | 2020-11-08T01:37:21.000Z | 2020-11-08T01:37:21.000Z | daira/urls.py | medram/daira | cf445f1b308b879bb6c852067b162a2a1c1c72d3 | [
"MIT"
] | 8 | 2020-08-30T18:16:08.000Z | 2022-01-13T03:01:35.000Z | daira/urls.py | medram/daira | cf445f1b308b879bb6c852067b162a2a1c1c72d3 | [
"MIT"
] | null | null | null | from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
app_name = __package__
urlpatterns = [
path('', views.home, name='home'),
] | 18.545455 | 42 | 0.754902 |
acee9c82497f5aff33336b7829fee069cf6776ba | 7,567 | py | Python | faseAlign/utils.py | EricWilbanks/fase-align | 9c10e16f6c729b3281c447f2519299975aa974be | [
"MIT"
] | 9 | 2017-10-27T22:13:27.000Z | 2021-12-28T16:37:35.000Z | faseAlign/utils.py | EricWilbanks/fase-align | 9c10e16f6c729b3281c447f2519299975aa974be | [
"MIT"
] | 6 | 2018-01-12T01:38:56.000Z | 2022-02-24T17:47:59.000Z | faseAlign/utils.py | EricWilbanks/fase-align | 9c10e16f6c729b3281c447f2519299975aa974be | [
"MIT"
] | 6 | 2017-11-10T21:59:14.000Z | 2022-03-14T16:29:10.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
class spanish_word(object):
# class level attributes
phones = ['a', 'e', 'i', 'o', 'u', 'p', 't', 'k', 'f', 's', 'x', '2', 'CH', 'l', 'b', 'd', 'g', 'm', 'n', '1', 'NY', 'y', 'R', 'r']
unstressed_high_vowels = ['i','u']
vowels = ['a','e','i','o','u','á','é','í','ó','ú']
consonants = ['p', 't', 'k', 'f', 's', 'x', '2', 'CH', 'l', 'b', 'd', 'g', 'm', 'n', '1', 'NY', 'y', 'R', 'r']
legal_clusters = ['pr','br','tr','dr','kr','gr','fr','pl','bl','kl','gl','fl'] # note: not including 'tl'
x_exception = ['mexicano', 'méxico', 'mexico', 'mexicanos', 'mexicana', 'mexicanas'] # note: lots of other place/proper names could be included here
y_exception = ['ay' , 'buey' , 'caray' , 'disney' , 'doy' , 'eloy' , 'estoy' , 'ey' , 'guay' , 'güey' , 'hay' , 'hoy' , 'ley' , 'muy' , 'paraguay' , 'rey' , 'soy' , 'uruguay' , 'uy' , 'virrey' , 'voy', 'y']
corr_tups = [('ll','y'), ('qu','k'), ('ce','se'), ('cé','sé'), ('ci','si'), ('cí','sí'), ('ge','xe'), ('gé','xé'), ('gi','xi'), ('gí','xí'), ('j','x'), ('v','b'), ('z','s'), ('w','u'), ('rr','R'), ('\\br','R'), ('ñ','1'), ('ch','2'), ('c','k'), ('h',''), ('ü','u'), ('gui','gi'), ('gue','ge'), ('guí','gí'), ('gué','gé')]
corr_tups2 = [('ú','u'), ('ó','o'), ('í','i'), ('é','e'), ('á','a')]
def __init__(self, word, override = False, custom_phones = None):
self.orth = word.lower()
# overriding phone process for user-defined custom words
if override == True:
self.phones = custom_phones
else:
self.phones = self.to_phones()
self.syllables = self.process_syllables()
# Transform numbers back to digraphs
self.phones[:] = [re.sub('1','NY',p) for p in self.phones]
self.phones[:] = [re.sub('2','CH',p) for p in self.phones]
for key in self.syllables:
self.syllables[key][:] = [re.sub('1','NY',entry) for entry in self.syllables[key]]
self.syllables[key][:] = [re.sub('2','CH',entry) for entry in self.syllables[key]]
def __repr__(self):
return '\nWord: ' + self.orth + '\nPhones: ' + str(self.phones) + '\nSyllables: ' + str(self.syllables) + '\nStressed Syllable: ' + str(self.stressed) + '\nNumber of Syllables: ' + str(self.num_syllables)
def to_phones(self):
current = self.orth
if current not in spanish_word.x_exception:
current = re.sub('x','ks',current)
if current in spanish_word.y_exception:
current = re.sub('y','i',current)
for pair in spanish_word.corr_tups:
match, repl = pair
current = re.sub(match,repl,current)
return list(current)
def syllabify(self):
# convert to cv skeleton
cv = ''.join(['c' if p in spanish_word.consonants else 'v' for p in self.phones])
# first parse of groups with maximal onsets
l = re.findall('(?:c*v+c+$)|(?:c*v+)|(?:v+c+)|(?:^c+$)', cv)
syllables_c = {}
syll_n = 0
phones = self.phones
if len(phones) != len(''.join(l)):
print('Error! Investigate! - ' + str(self.orth) + ' - ' + str(self.phones) + ' - ' + str(cv))
###################
# assign consonants
for group in l:
syll_n += 1
syllables_c[syll_n] = []
if group[0] == 'v':
# add entire group to syllable
syllables_c[syll_n].extend(phones[:len(group)])
phones = phones[len(group):] # after adding group to main dict, remove it
elif group[0] == 'c':
c_len = len(re.match('c+(?!=v+c*)',group).group(0))
cluster = ''.join(phones[:c_len])
# key of previous syllable
if syll_n > 1:
prev_syll = syll_n - 1
else:
prev_syll = syll_n
# split up clusters as needed
if c_len == 4:
syllables_c[prev_syll].extend(phones[:2]) # first two consonants to previous syllable
syllables_c[syll_n].extend(phones[2:len(group)]) # remaining phones in group to current syllable
elif c_len == 3:
if cluster[1:c_len] not in spanish_word.legal_clusters: # x-sibilant-x
syllables_c[prev_syll].extend(phones[:2]) # first two consonants to previous syllable
syllables_c[syll_n].extend(phones[2:len(group)]) # remaining phones in group to current syllable
else:
syllables_c[prev_syll].extend(phones[:1]) # first consonant to previous syllable
syllables_c[syll_n].extend(phones[1:len(group)]) # remaining phones in group to current syllable
elif c_len == 2:
if cluster not in spanish_word.legal_clusters:
syllables_c[prev_syll].extend(phones[:1]) # first consonant to previous syllable
syllables_c[syll_n].extend(phones[1:len(group)]) # remaining phones in group to current syllable
else:
syllables_c[syll_n].extend(phones[:len(group)]) # all phones to current syllable
elif c_len == 1:
syllables_c[syll_n].extend(phones[:len(group)]) # all phones to current syllable
else:
print("Incorrect specification of cluster: " + str(cluster) + ' - ' + str(self.orth))
#remove phones from this group
phones = phones[len(group):]
###############################
# correctly assign vowel hiatus
syllables = {}
syll_n = 0
for syll in syllables_c.keys():
syll_n += 1
syllables[syll_n] = []
phones = ''.join(syllables_c[syll])
cv = ''.join(['c' if p in spanish_word.consonants else 'v' for p in syllables_c[syll]]) #cv framework
v_match = re.search('vv+',cv) # find adjacent vowels
if v_match is not None:
syllables[syll_n].extend(phones[:v_match.start()]) # append any onset consonants to syllable
cluster = phones[v_match.start():v_match.end()] # extract vowel cluster
while len(cluster) > 1:
block = cluster[:2] # only consider two vowels at a time
if ((block[0] not in spanish_word.unstressed_high_vowels) and (block[1] not in spanish_word.unstressed_high_vowels)) or ((block[0] == block [1]) and (block[0] in spanish_word.unstressed_high_vowels)): # adjacent (non-high or stressed high) or (identical high) = hiatus
syllables[syll_n].extend(block[0]) # add first vowel to current syllable
syll_n += 1 # create new syllable for hiatus
syllables[syll_n] = []
cluster = cluster[1:]
else:
syllables[syll_n].extend(block[0]) # add first vowel and try again
cluster = cluster[1:]
syllables[syll_n].extend(cluster) # catch any trailing singletons
syllables[syll_n].extend(phones[v_match.end():]) # catch any coda consonants
else:
syllables[syll_n].extend(phones) # no vowel cluster so append entire syllable
# NOTE: not currently dealing with lexically exceptional hiatus class words such as du.eto (cf. duelo)
# add attribute containing number of syllables
self.num_syllables = syll_n
return syllables
def process_syllables(self):
syllables = self.syllabify()
stressed = 0
# determine stressed syllable
for syll in syllables.keys():
stress_match = re.search('(á|é|í|ó|ú)',''.join(syllables[syll]))
if stress_match is not None:
stressed = int(syll)
if self.num_syllables == 1:
stressed = 1
if stressed == 0: # normal rules, no orthographic accent
if self.orth[-1] in spanish_word.vowels + ['n','s']:
stressed = self.num_syllables - 1 # penultimate stress since no orthographic accent
else:
stressed = self.num_syllables # final stress since no orthographic accent
# remove accent marks
for syll in syllables.keys():
for pair in spanish_word.corr_tups2:
match, repl = pair
syllables[syll][:] = [re.sub(match,repl,x) for x in syllables[syll]] # whole list slice
for pair in spanish_word.corr_tups2:
match, repl = pair
self.phones[:] = [re.sub(match,repl,x) for x in self.phones]
self.stressed = stressed
return syllables
| 40.465241 | 322 | 0.625743 |
acee9de977e66af873443edfcece2c3c1f862bc7 | 4,187 | py | Python | src/web/process_tmp_imgs.py | whs2k/vasegen | d049e58941281b34d080254423a6b8b5b5771933 | [
"MIT"
] | null | null | null | src/web/process_tmp_imgs.py | whs2k/vasegen | d049e58941281b34d080254423a6b8b5b5771933 | [
"MIT"
] | null | null | null | src/web/process_tmp_imgs.py | whs2k/vasegen | d049e58941281b34d080254423a6b8b5b5771933 | [
"MIT"
] | null | null | null | import os
import sys
import time
orig = sys.argv
sys.argv = [sys.argv[0]]
sys.argv+= '--dataroot ' \
'../../data/processed/pix2pix_vase_examples_512 ' \
'--name pix2pix_vase_fragments_512 ' \
'--model pix2pix --netG unet_512 --direction BtoA --dataset_mode aligned --norm batch ' \
'--eval --preprocess none --gpu_ids -1'.split()
os.chdir('models/pix2pix/')
sys.path.append('.')
from models import create_model
from options.test_options import TestOptions
from data.base_dataset import get_params, get_transform
import numpy as np
import torch
import torchvision
from PIL import Image
import glob
from tempfile import gettempdir
from multiprocessing import Process
TMPDIR = gettempdir() + '/vasegen/'
MAX_JOBS = 1
jobs = {}
class DummyJob:
exitcode = 0
# opt = None
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
to_pil = torchvision.transforms.ToPILImage()
to_tensor = torchvision.transforms.ToTensor()
def load_img(file):
return Image.open(file.stream).convert('RGB')
def create_img_dict_single(A, B=None):
if type(A) is torch.Tensor:
A = to_pil(A)
# apply the same transform to both A and B
transform_params = get_params(opt, A.size)
A_transform = get_transform(opt, transform_params, grayscale=False)
A = A_transform(A).unsqueeze(0)
if B is not None:
B_transform = get_transform(opt, transform_params, grayscale=False)
B = B_transform(B).unsqueeze(0)
else:
B = torch.randn(1, 3, A.shape[2], A.shape[3])
return {'A': B, 'B': A, 'A_paths': '', 'B_paths': ''}
def create_img_dict(AB):
w, h = AB.size
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
return create_img_dict_single(A, B)
def process_img(model, data):
model.set_input(data) # unpack data from data loader
model.test() # run inference
visuals = model.get_current_visuals() # get image results
image_numpy = visuals['fake_B'][0].numpy() # convert it into a numpy array
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(np.uint8)
def predict(model, fname):
outname = fname.replace('A_', 'B_')
outtouch = outname.replace('.png', '')
time.sleep(.1)
img = Image.open(fname).convert('RGB')
# img.show()
# print(img.info)
# print(img.mode)
# input()
img_data = create_img_dict_single(img)
res = process_img(model, img_data)
res = Image.fromarray(res)
res.save(outname)
os.system(f'touch {outtouch}')
return 0
def process_imgs(model):
while True:
time.sleep(.1)
print(time.time(), 'jobs dict', jobs)
fnames = glob.glob(TMPDIR + f'A_*.png')
for fname in fnames:
if MAX_JOBS == 1:
predict(model, fname)
jobs[fname] = DummyJob
else:
if fname not in jobs and len(jobs) < MAX_JOBS:
jobs[fname] = Process(target=predict, args=(model, fname,))
jobs[fname].start()
for fname in list(jobs.keys()):
if jobs[fname].exitcode is not None:
os.remove(fname)
del jobs[fname]
if __name__ == '__main__':
# dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
model.eval()
os.chdir('../..')
# for fname in glob.glob(TMPDIR + f'A_*.png'):
# predict(model, fname)
process_imgs(model)
| 31.719697 | 119 | 0.636255 |
acee9eda29c326b53c916edaf31b8441529d340d | 2,759 | py | Python | readthedocs/docsitalia/search/documents.py | italia/readthedocs.org | 440d3885380d20ec24081f76e26d20701749e179 | [
"MIT"
] | 19 | 2018-03-28T12:28:35.000Z | 2022-02-14T20:09:42.000Z | readthedocs/docsitalia/search/documents.py | berez23/docs.italia.it | 440d3885380d20ec24081f76e26d20701749e179 | [
"MIT"
] | 274 | 2017-10-10T07:59:04.000Z | 2022-03-12T00:56:03.000Z | readthedocs/docsitalia/search/documents.py | italia/readthedocs.org | 440d3885380d20ec24081f76e26d20701749e179 | [
"MIT"
] | 13 | 2018-04-03T09:49:50.000Z | 2021-04-18T22:04:15.000Z | from django.conf import settings
from django.db.models import F
from django_elasticsearch_dsl import DocType, Index, fields
from elasticsearch_dsl import analyzer, tokenizer
from readthedocs.docsitalia.models import Publisher, PublisherProject
from readthedocs.projects.constants import PRIVATE
from readthedocs.projects.models import HTMLFile
from readthedocs.search.documents import RTDDocTypeMixin
trigram_analyzer = analyzer(
'trigram_analyzer',
tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3),
filter=['lowercase']
)
quicksearch_conf = settings.ES_INDEXES['quicksearch']
quicksearch_index = Index(quicksearch_conf['name'])
quicksearch_index.settings(**quicksearch_conf['settings'])
@quicksearch_index.doc_type
class PageQuickSearchDocument(RTDDocTypeMixin, DocType):
"""
Page document based on HTMLFile.
The use of HTMLFile as in :class:`~readthedocs.search.documents.PageDocument`
allows a uniform user experience between the autocomplete search and the SERP.
"""
model = fields.KeywordField()
link = fields.KeywordField(attr='get_absolute_url')
text = fields.TextField(attr='processed_json.title', analyzer=trigram_analyzer)
version = fields.KeywordField(attr='version.slug')
class Meta:
model = HTMLFile
ignore_signals = True
def get_queryset(self):
"""
Overwrite default queryset to filter certain files to index.
Do not index files that belong to non sphinx project.
Also do not index certain files.
"""
return super().get_queryset().internal().filter(
project__documentation_type__contains='sphinx',
version__slug=F("project__default_version"),
).exclude(version__privacy_level=PRIVATE)
def prepare_model(self, obj):
"""Return the document model name."""
return "documento"
@quicksearch_index.doc_type
class ProjectQuickSearchDocument(RTDDocTypeMixin, DocType):
model = fields.KeywordField()
link = fields.KeywordField(attr='get_absolute_url')
text = fields.TextField(attr='name', analyzer=trigram_analyzer)
class Meta:
model = PublisherProject
ignore_signals = True
def prepare_model(self, obj):
"""Return the document model name."""
return "progetto"
@quicksearch_index.doc_type
class PublisherQuickSearchDocument(RTDDocTypeMixin, DocType):
model = fields.KeywordField()
link = fields.KeywordField(attr='get_absolute_url')
text = fields.TextField(attr='name', analyzer=trigram_analyzer)
class Meta:
model = Publisher
ignore_signals = True
def prepare_model(self, obj):
"""Return the document model name."""
return "amministrazione"
| 31.352273 | 83 | 0.7249 |
acee9f3022e408519237dda8a4141b0f84a925ca | 3,181 | py | Python | scripts/phobos/pose.py | oliverlee/phobos | eba4adcca8f9f6afb060904b056596f383b52d8d | [
"BSD-2-Clause"
] | 2 | 2016-12-14T01:21:34.000Z | 2018-09-04T10:43:10.000Z | scripts/phobos/pose.py | oliverlee/phobos | eba4adcca8f9f6afb060904b056596f383b52d8d | [
"BSD-2-Clause"
] | 182 | 2016-04-25T13:36:52.000Z | 2020-07-20T10:24:18.000Z | scripts/phobos/pose.py | oliverlee/phobos | eba4adcca8f9f6afb060904b056596f383b52d8d | [
"BSD-2-Clause"
] | 6 | 2016-04-25T13:20:53.000Z | 2021-02-18T18:25:32.000Z | # Old-style pose type for flimnap is defined in scripts/phobos/pose_def.cc
import os
import re
import struct
import numpy as np
script_dir = os.path.dirname(os.path.realpath(__file__))
pose_def_file = os.path.join(script_dir, 'pose_def.cc')
def get_time_vector(data):
ts = data['timestamp']
ts_shift = np.roll(ts, -1).astype('int')
overflow = np.roll(np.array(ts - ts_shift > 0).astype('int'), 1)
overflow[0] = 0
nan_index = np.where(np.isnan(data['x']))[0]
try:
overflow[nan_index + 1] = 0
except IndexError:
overflow[nan_index[:-1]] = 0
discont = 2**16
ts_offset = np.cumsum(overflow) * discont
ts_offset[nan_index] = -(discont - 1) # nan values have time set to 0
return ts + ts_offset
def parse_format(source_file=pose_def_file):
with open(source_file, 'r') as f:
start_pose_definition = False
struct_format = '<'
np_dtype = []
pose_format_text = ''
for line in f:
if not line:
raise EOFError('Pose not fully defined in file {0}'.format(
source_file))
if not start_pose_definition:
if 'struct __attribute__((__packed__)) pose_t {' in line:
start_pose_definition = True
pose_format_text += line
else:
match = re.search('}; /\* ([0-9]+) bytes \*/', line)
if match: # end of pose definition
pose_size = int(match.group(1))
pose_format_text += line
struct_size = struct.Struct(struct_format).size
assert struct_size == pose_size, \
'{0}, {1}'.format(pose_size, struct_size)
assert np.dtype(np_dtype).itemsize == struct_size
return (struct_format, np.dtype(np_dtype), pose_format_text)
else:
words = line.split()
if words[1][-1] != ';':
# line is commented
assert words[0].startswith(('//', '/*', '*/', '*')), \
'Unexpected format for pose definition: ' + line
continue
t = words[0]
if t == 'float':
struct_format += 'f'
np_dtype.append((words[1][:-1], 'f4'))
elif t == 'uint8_t':
struct_format += 'B'
np_dtype.append((words[1][:-1], 'u1'))
elif t == 'uint16_t':
struct_format += 'H'
np_dtype.append((words[1][:-1], 'u2'))
elif t == 'uint32_t':
struct_format += 'I'
np_dtype.append((words[1][:-1], 'u4'))
else:
msg = 'Conversion for type {0} not handled'.format(t)
raise ValueError(msg)
pose_format_text += line
if __name__ == '__main__':
a, b, c = parse_format(pose_def_file)
print(a)
print(b)
print(c)
| 37.423529 | 80 | 0.486954 |
acee9f33008657bf1d4648c4c4de6b0dfd503f4c | 26,376 | py | Python | mqtt.old.py | chrispab/broadlink-mqtt | 688617bf9d3bc2b86ec67bca98484ac299a5eec6 | [
"MIT"
] | null | null | null | mqtt.old.py | chrispab/broadlink-mqtt | 688617bf9d3bc2b86ec67bca98484ac299a5eec6 | [
"MIT"
] | null | null | null | mqtt.old.py | chrispab/broadlink-mqtt | 688617bf9d3bc2b86ec67bca98484ac299a5eec6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import paho.mqtt.client as paho # pip install paho-mqtt
import broadlink # pip install broadlink
import os
import sys
import time
import logging
import logging.config
import socket
import sched
import json
import binascii
import types
from threading import Thread
from test import TestDevice
HAVE_TLS = True
try:
import ssl
except ImportError:
HAVE_TLS = False
# read initial config files
# print("CONFIG:",CONFIG)
# print("================================================")
# CONFIG_CUSTOM = os.getenv('BROADLINKMQTTCONFIGCUSTOM', dirname + 'custom.conf')
# print("CONFIG_CUSTOM:",CONFIG_CUSTOM)
# print("================================================")
# print("..cwd: " + os.getcwd())
# dirname = os.path.dirname(os.path.abspath(__file__))
# print("dirname: " + dirname)
# dirname = dirname + '/data/'
# CONFIG = os.getenv('BROADLINKMQTTCONFIG', dirname + 'mqtt.conf')
# print("CONFIG: "+CONFIG)
# CONFIG_CUSTOM = os.getenv('BROADLINKMQTTCONFIGCUSTOM', dirname + 'custom.conf')
# print("CONFIG_CUSTOM: "+CONFIG_CUSTOM)
# print("path with data dir: " + dirname)
# confname = dirname + 'logging.conf'
# # confname = 'logging.conf'
# print("confnamem : " + confname)
# logging.config.fileConfig(confname)
# # logging.config.fileConfig('logging.conf')
# read initial config files
dirname = os.path.dirname(os.path.abspath(__file__)) + '/'
logging.config.fileConfig(dirname + 'logging.conf')
CONFIG = os.getenv('BROADLINKMQTTCONFIG', dirname + 'mqtt.conf')
CONFIG_CUSTOM = os.getenv('BROADLINKMQTTCONFIGCUSTOM', dirname + 'data/custom.conf')
class Config(object):
def __init__(self, filename=CONFIG, custom_filename=CONFIG_CUSTOM):
self.config = {}
exec(compile(open(filename, "rb").read(), filename, 'exec'), self.config)
if os.path.isfile(custom_filename):
exec(compile(open(custom_filename, "rb").read(), custom_filename, 'exec'), self.config)
print("Found custom config")
# print(self.config)
if self.config.get('ca_certs', None) is not None:
self.config['tls'] = True
tls_version = self.config.get('tls_version', None)
if tls_version is not None:
if not HAVE_TLS:
logging.error("TLS parameters set but no TLS available (SSL)")
sys.exit(2)
if tls_version == 'tlsv1':
self.config['tls_version'] = ssl.PROTOCOL_TLSv1
if tls_version == 'tlsv1.2':
# TLS v1.2 is available starting from python 2.7.9 and requires openssl version 1.0.1+.
if sys.version_info >= (2, 7, 9):
self.config['tls_version'] = ssl.PROTOCOL_TLSv1_2
else:
logging.error("TLS version 1.2 not available but 'tlsv1.2' is set.")
sys.exit(2)
if tls_version == 'sslv3':
self.config['tls_version'] = ssl.PROTOCOL_SSLv3
def get(self, key, default='special empty value'):
v = self.config.get(key, default)
if v == 'special empty value':
logging.error("Configuration parameter '%s' should be specified" % key)
sys.exit(2)
return v
try:
cf = Config()
except Exception as e:
print("Cannot load configuration from file %s: %s" % (CONFIG, str(e)))
sys.exit(2)
qos = cf.get('mqtt_qos', 0)
retain = cf.get('mqtt_retain', False)
topic_prefix = cf.get('mqtt_topic_prefix', 'broadlink/')
# noinspection PyUnusedLocal
def on_message(client, device, msg):
command = msg.topic[len(topic_prefix):]
if isinstance(device, dict):
for subprefix in device:
if command.startswith(subprefix):
device = device[subprefix]
command = command[len(subprefix):]
break
else:
logging.error("MQTT topic %s has no recognized device reference, expected one of %s" %
(msg.topic, ','.join(device.keys())))
return
# internal notification
if command == 'temperature' or \
command == 'humidity' or \
command == 'energy' or \
command == 'sensors' or \
command == 'position' or \
command == 'state' or \
command.startswith('state/') or \
command.startswith('sensor/'):
return
try:
action = msg.payload.decode('utf-8').lower()
logging.debug("Received MQTT message " + msg.topic + " " + action)
# SP1/2 / MP1/ BG1 power control
if command == 'power':
if device.type == 'SP1' or device.type == 'SP2':
state = action == 'on' or action == '1'
logging.debug("Setting power state to {0}".format(state))
device.set_power(1 if state else 0)
return
if device.type == 'MP1':
parts = action.split("/", 2)
if len(parts) == 2:
sid = int(parts[0])
state = parts[1] == 'on' or parts[1] == '1'
logging.debug("Setting power state of socket {0} to {1}".format(sid, state))
device.set_power(sid, state)
return
if device.type == 'BG1':
state = action == 'on' or action == '1'
logging.debug("Setting power state of all sockets to {0}".format(state))
device.set_state(pwr1=state, pwr2=state)
return
# power adapters
if device.type == 'SP4B':
state = action == 'on' or action == '1'
logging.debug("Setting power state of adapter to {0}".format(state))
device.set_state(state)
return
# MP1 power control
if command.startswith('power/') and device.type == 'MP1':
sid = int(command[6:])
state = action == 'on' or action == '1'
logging.debug("Setting power state of socket {0} to {1}".format(sid, state))
device.set_power(sid, state)
return
# BG1 power control
if command.startswith('power/') and device.type == 'BG1':
sid = int(command[6:])
state = action == 'on' or action == '1'
logging.debug("Setting power state of socket {0} to {1}".format(sid, state))
if sid == 1:
device.set_state(pwr1=state)
elif sid == 2:
device.set_state(pwr2=state)
return
# BG1 led brightness
if command == 'brightness' and device.type == 'BG1':
state = int(action)
logging.debug("Setting led brightness to {0}".format(state))
device.set_state(idcbrightness=state)
return
# Dooya curtain control
if command == 'action':
if device.type == 'Dooya DT360E':
if action == 'open':
logging.debug("Opening curtain")
device.open()
device.publish(100)
elif action == 'close':
logging.debug("Closing curtain")
device.close()
device.publish(0)
elif action == 'stop':
logging.debug("Stopping curtain")
device.stop()
device.publish(device.get_percentage())
else:
logging.warning("Unrecognized curtain action " + action)
return
if command == 'set' and device.type == 'Dooya DT360E':
percentage = int(action)
logging.debug("Setting curtain position to {0}".format(percentage))
device.set_percentage_and_wait(percentage)
device.publish(device.get_percentage())
return
# RM2/RM4 record/replay control
if device.type == 'RM2' or device.type == 'RM4':
file = dirname + "commands/" + command
handy_file = file + '/' + action
if command == 'macro':
file = dirname + "macros/" + action
macro(device, file)
return
elif action == '' or action == 'auto':
record_or_replay(device, file)
return
elif action == 'autorf':
record_or_replay_rf(device, file)
return
elif os.path.isfile(handy_file):
replay(device, handy_file)
return
elif action == 'record':
record(device, file)
return
elif action == 'recordrf':
record_rf(device, file)
return
elif action == 'replay':
replay(device, file)
return
elif action == 'macro':
file = dirname + "macros/" + command
macro(device, file)
return
logging.warning("Unrecognized MQTT message " + action)
except Exception:
logging.exception("Error")
# noinspection PyUnusedLocal
def on_connect(client, device, flags, result_code):
if cf.get('mqtt_birth_payload', False):
mqttc.publish(cf.get('mqtt_birth_topic', 'clients/broadlink'), payload=cf.get('mqtt_birth_payload'), qos=0, retain=True)
topic = topic_prefix + '#'
logging.debug("Connected to MQTT broker, subscribing to topic " + topic)
mqttc.subscribe(topic, qos)
# noinspection PyUnusedLocal
def on_disconnect(client, device, rc):
logging.warning("OOOOPS! MQTT disconnection")
time.sleep(10)
def record_or_replay(device, file):
if os.path.isfile(file):
replay(device, file)
else:
record(device, file)
def record_or_replay_rf(device, file):
if os.path.isfile(file):
replay(device, file)
else:
record_rf(device, file)
def record(device, file):
logging.debug("Recording command to file " + file)
# receive packet
device.enter_learning()
ir_packet = None
attempt = 0
while ir_packet is None and attempt < 8:
attempt = attempt + 1
time.sleep(5)
try:
ir_packet = device.check_data()
except (broadlink.exceptions.ReadError, broadlink.exceptions.StorageError):
continue
if ir_packet is not None:
# write to file
directory = os.path.dirname(file)
if not os.path.exists(directory):
os.makedirs(directory)
with open(file, 'wb') as f:
f.write(binascii.hexlify(ir_packet))
logging.debug("Done")
else:
logging.warning("No command received")
def record_rf(device, file):
logging.debug("Recording RF command to file " + file)
logging.debug("Learning RF Frequency, press and hold the button to learn...")
device.sweep_frequency()
timeout = 20
while (not device.check_frequency()) and (timeout > 0):
time.sleep(1)
timeout -= 1
if timeout <= 0:
logging.warn("RF Frequency not found")
device.cancel_sweep_frequency()
return
logging.debug("Found RF Frequency - 1 of 2!")
time.sleep(5)
logging.debug("To complete learning, single press the button you want to learn")
# receive packet
device.find_rf_packet()
rf_packet = None
attempt = 0
while rf_packet is None and attempt < 6:
time.sleep(5)
rf_packet = device.check_data()
attempt = attempt + 1
if rf_packet is not None:
# write to file
directory = os.path.dirname(file)
if not os.path.exists(directory):
os.makedirs(directory)
with open(file, 'wb') as f:
f.write(binascii.hexlify(rf_packet))
logging.debug("Done")
else:
logging.warn("No command received")
def replay(device, file):
logging.debug("Replaying command from file " + file)
with open(file, 'rb') as f:
ir_packet = f.read()
device.send_data(binascii.unhexlify(ir_packet.strip()))
def macro(device, file):
logging.debug("Replaying macro from file " + file)
with open(file, 'r') as f:
for line in f:
line = line.strip(' \n\r\t')
if len(line) == 0 or line.startswith("#"):
continue
if line.startswith("pause "):
pause = int(line[6:].strip())
logging.debug("Pause for " + str(pause) + " milliseconds")
time.sleep(pause / 1000.0)
else:
command_file = dirname + "commands/" + line
replay(device, command_file)
def get_device(cf):
print("looking for device(s)!")
print('device_type: ' + cf.get('device_type', 'lookup'))
device_type = cf.get('device_type', 'lookup')
if device_type == 'lookup':
local_address = cf.get('local_address', None)
# print(local_address)
lookup_timeout = cf.get('lookup_timeout', 20)
devices = broadlink.discover(timeout=lookup_timeout) if local_address is None else \
broadlink.discover(timeout=lookup_timeout, local_ip_address=local_address)
if len(devices) == 0:
logging.error('No Broadlink device found')
sys.exit(2)
if len(devices) > 1:
logging.error('More than one Broadlink device found (' +
', '.join([d.type + '/' + d.host[0] + '/' + ':'.join(format(s, '02x') for s in d.mac[::-1]) for d in devices]) +
')')
sys.exit(2)
return configure_device(devices[0], topic_prefix)
elif device_type == 'multiple_lookup':
local_address = cf.get('local_address', None)
lookup_timeout = cf.get('lookup_timeout', 20)
devices = broadlink.discover(timeout=lookup_timeout) if local_address is None else \
broadlink.discover(timeout=lookup_timeout, local_ip_address=local_address)
if len(devices) == 0:
logging.error('No Broadlink devices found')
sys.exit(2)
mqtt_multiple_prefix_format = cf.get('mqtt_multiple_subprefix_format', None)
devices_dict = {}
for device in devices:
print(device)
mqtt_subprefix = mqtt_multiple_prefix_format.format(
type=device.type,
host=device.host[0],
<<<<<<< HEAD
mac='_'.join(format(s, '02x') for s in device.mac[::-1]),
mac_nic='_'.join(format(s, '02x') for s in device.mac[2::-1]))
print(device,topic_prefix, mqtt_subprefix)
=======
mac='_'.join(format(s, '02x') for s in device.mac),
mac_nic='_'.join(format(s, '02x') for s in device.mac[3::]))
>>>>>>> upstream2/master
device = configure_device(device, topic_prefix + mqtt_subprefix)
devices_dict[mqtt_subprefix] = device
return devices_dict
elif device_type == 'test':
return configure_device(TestDevice(cf), topic_prefix)
else:
host = (cf.get('device_host'), 80)
mac = bytearray.fromhex(cf.get('device_mac').replace(':', ' '))
if device_type == 'rm':
device = broadlink.rm(host=host, mac=mac, devtype=0x2712)
elif device_type == 'rm4':
device = broadlink.rm4(host=host, mac=mac, devtype=0x51da)
elif device_type == 'sp1':
device = broadlink.sp1(host=host, mac=mac, devtype=0)
elif device_type == 'sp2':
device = broadlink.sp2(host=host, mac=mac, devtype=0x2711)
elif device_type == 'a1':
device = broadlink.a1(host=host, mac=mac, devtype=0x2714)
elif device_type == 'mp1':
device = broadlink.mp1(host=host, mac=mac, devtype=0x4EB5)
elif device_type == 'dooya':
device = broadlink.dooya(host=host, mac=mac, devtype=0x4E4D)
elif device_type == 'bg1':
device = broadlink.bg1(host=host, mac=mac, devtype=0x51E3)
else:
logging.error('Incorrect device configured: ' + device_type)
sys.exit(2)
return configure_device(device, topic_prefix)
def configure_device(device, mqtt_prefix):
device.auth()
<<<<<<< HEAD
logging.debug('Connected to \'%s\' Broadlink device at \'%s\' (MAC %s) and started listening for commands at MQTT topic having prefix \'%s\' '
% (device.type, device.host[0], ':'.join(format(s, '02x') for s in device.mac[::-1]), mqtt_prefix))
=======
logging.debug('Connected to \'%s\' Broadlink device at \'%s\' (MAC %s) and started listening to MQTT commands at \'%s#\' '
% (device.type, device.host[0], ':'.join(format(s, '02x') for s in device.mac), mqtt_prefix))
>>>>>>> upstream2/master
broadlink_rm_temperature_interval = cf.get('broadlink_rm_temperature_interval', 0)
if (device.type == 'RM2' or device.type == 'RM4') and broadlink_rm_temperature_interval > 0:
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(broadlink_rm_temperature_interval, 1, broadlink_rm_temperature_timer,
[scheduler, broadlink_rm_temperature_interval, device, mqtt_prefix])
# scheduler.run()
tt = SchedulerThread(scheduler)
tt.daemon = True
tt.start()
broadlink_sp_energy_interval = cf.get('broadlink_sp_energy_interval', 0)
if device.type == 'SP2' and broadlink_sp_energy_interval > 0:
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(broadlink_sp_energy_interval, 1, broadlink_sp_energy_timer,
[scheduler, broadlink_sp_energy_interval, device, mqtt_prefix])
# scheduler.run()
tt = SchedulerThread(scheduler)
tt.daemon = True
tt.start()
broadlink_a1_sensors_interval = cf.get('broadlink_a1_sensors_interval', 0)
if device.type == 'A1' and broadlink_a1_sensors_interval > 0:
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(broadlink_a1_sensors_interval, 1, broadlink_a1_sensors_timer,
[scheduler, broadlink_a1_sensors_interval, device, mqtt_prefix])
# scheduler.run()
tt = SchedulerThread(scheduler)
tt.daemon = True
tt.start()
broadlink_mp1_state_interval = cf.get('broadlink_mp1_state_interval', 0)
if device.type == 'MP1' and broadlink_mp1_state_interval > 0:
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(broadlink_mp1_state_interval, 1, broadlink_mp1_state_timer,
[scheduler, broadlink_mp1_state_interval, device, mqtt_prefix])
# scheduler.run()
tt = SchedulerThread(scheduler)
tt.daemon = True
tt.start()
if device.type == 'Dooya DT360E':
# noinspection PyUnusedLocal
def publish(dev, percentage):
try:
percentage = str(percentage)
topic = mqtt_prefix + "position"
logging.debug("Sending Dooya position " + percentage + " to topic " + topic)
mqttc.publish(topic, percentage, qos=qos, retain=retain)
except:
logging.exception("Error")
device.publish = types.MethodType(publish, device)
broadlink_dooya_position_interval = cf.get('broadlink_dooya_position_interval', 0)
if broadlink_dooya_position_interval > 0:
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(broadlink_dooya_position_interval, 1, broadlink_dooya_position_timer,
[scheduler, broadlink_dooya_position_interval, device])
# scheduler.run()
tt = SchedulerThread(scheduler)
tt.daemon = True
tt.start()
broadlink_bg1_state_interval = cf.get('broadlink_bg1_state_interval', 0)
if (device.type == 'BG1' or device.type == 'SP4B') and broadlink_bg1_state_interval > 0:
scheduler = sched.scheduler(time.time, time.sleep)
scheduler.enter(broadlink_bg1_state_interval, 1, broadlink_bg1_state_timer,
[scheduler, broadlink_bg1_state_interval, device, mqtt_prefix])
# scheduler.run()
tt = SchedulerThread(scheduler)
tt.daemon = True
tt.start()
return device
def broadlink_rm_temperature_timer(scheduler, delay, device, mqtt_prefix):
scheduler.enter(delay, 1, broadlink_rm_temperature_timer, [scheduler, delay, device, mqtt_prefix])
try:
temperature = str(device.check_temperature())
topic = mqtt_prefix + "temperature"
logging.debug("Sending RM temperature " + temperature + " to topic " + topic)
mqttc.publish(topic, temperature, qos=qos, retain=retain)
if device.type == 'RM4':
humidity = str(device.check_humidity())
topic = mqtt_prefix + "humidity"
logging.debug("Sending RM humidity " + humidity + " to topic " + topic)
mqttc.publish(topic, humidity, qos=qos, retain=retain)
except:
logging.exception("Error")
def broadlink_sp_energy_timer(scheduler, delay, device, mqtt_prefix):
scheduler.enter(delay, 1, broadlink_sp_energy_timer, [scheduler, delay, device, mqtt_prefix])
try:
energy = str(device.get_energy())
topic = mqtt_prefix + "energy"
logging.debug("Sending SP energy " + energy + " to topic " + topic)
mqttc.publish(topic, energy, qos=qos, retain=retain)
except:
logging.exception("Error")
def broadlink_a1_sensors_timer(scheduler, delay, device, mqtt_prefix):
scheduler.enter(delay, 1, broadlink_a1_sensors_timer, [scheduler, delay, device, mqtt_prefix])
try:
text_values = cf.get('broadlink_a1_sensors_text_values', False)
is_json = cf.get('broadlink_a1_sensors_json', False)
sensors = device.check_sensors() if text_values else device.check_sensors_raw()
if is_json:
topic = mqtt_prefix + "sensors"
value = json.dumps(sensors)
logging.debug("Sending A1 sensors '%s' to topic '%s'" % (value, topic))
mqttc.publish(topic, value, qos=qos, retain=retain)
else:
for name in sensors:
topic = mqtt_prefix + "sensor/" + name
value = str(sensors[name])
logging.debug("Sending A1 %s '%s' to topic '%s'" % (name, value, topic))
mqttc.publish(topic, value, qos=qos, retain=retain)
except:
logging.exception("Error")
def broadlink_mp1_state_timer(scheduler, delay, device, mqtt_prefix):
scheduler.enter(delay, 1, broadlink_mp1_state_timer, [scheduler, delay, device, mqtt_prefix])
try:
is_json = cf.get('broadlink_mp1_state_json', False)
state = device.check_power()
if is_json:
topic = mqtt_prefix + "state"
value = json.dumps(state)
logging.debug("Sending MP1 state '%s' to topic '%s'" % (value, topic))
mqttc.publish(topic, value, qos=qos, retain=retain)
elif state is not None:
for name in state:
topic = mqtt_prefix + "state/" + name
value = str(state[name])
logging.debug("Sending MP1 %s '%s' to topic '%s'" % (name, value, topic))
mqttc.publish(topic, value, qos=qos, retain=retain)
except:
logging.exception("Error")
def broadlink_dooya_position_timer(scheduler, delay, device):
scheduler.enter(delay, 1, broadlink_dooya_position_timer, [scheduler, delay, device])
device.publish(device.get_percentage())
def broadlink_bg1_state_timer(scheduler, delay, device, mqtt_prefix):
scheduler.enter(delay, 1, broadlink_bg1_state_timer, [scheduler, delay, device, mqtt_prefix])
try:
is_json = cf.get('broadlink_bg1_state_json', False)
state = device.get_state()
if is_json:
topic = mqtt_prefix + "state"
value = json.dumps(state)
logging.debug("-Sending '%s' state '%s' to topic '%s'" % (device.type, value, topic))
mqttc.publish(topic, value, qos=qos, retain=retain)
elif state is not None:
for name in state:
topic = mqtt_prefix + "state/" + name
value = str(state[name])
logging.debug("--Sending device type: '%s', name: '%s', value: '%s' to topic: '%s'" % (device.type, name, value, topic))
mqttc.publish(topic, value, qos=qos, retain=retain)
except:
logging.exception("Error")
class SchedulerThread(Thread):
def __init__(self, scheduler):
Thread.__init__(self)
self.scheduler = scheduler
def run(self):
try:
self.scheduler.run()
except:
logging.exception("Error")
if __name__ == '__main__':
devices = get_device(cf)
clientid = cf.get('mqtt_clientid', 'broadlink-%s' % os.getpid())
# initialise MQTT broker connection
mqttc = paho.Client(clientid, clean_session=cf.get('mqtt_clean_session', False), userdata=devices)
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_disconnect = on_disconnect
if cf.get('mqtt_will_payload', False):
mqttc.will_set(cf.get('mqtt_will_topic', 'clients/broadlink'), payload=cf.get('mqtt_will_payload'), qos=0, retain=True)
# Delays will be: 3, 6, 12, 24, 30, 30, ...
# mqttc.reconnect_delay_set(delay=3, delay_max=30, exponential_backoff=True)
if cf.get('tls', False):
mqttc.tls_set(cf.get('ca_certs', None), cf.get('certfile', None), cf.get('keyfile', None),
tls_version=cf.get('tls_version', None), ciphers=None)
if cf.get('tls_insecure', False):
mqttc.tls_insecure_set(True)
mqttc.username_pw_set(cf.get('mqtt_username'), cf.get('mqtt_password'))
print("MQTT broker: " + cf.get('mqtt_broker', 'localhost'))
while True:
try:
mqttc.connect(cf.get('mqtt_broker', 'localhost'), int(cf.get('mqtt_port', '1883')), 60)
mqttc.loop_forever()
except socket.error:
logging.warn("Cannot connect to MQTT server, will try to reconnect in 5 seconds")
time.sleep(5)
except KeyboardInterrupt:
sys.exit(0)
except:
logging.exception("Error")
| 38.561404 | 146 | 0.594556 |
acee9f9ad243d041843ddd010268504806ca64f8 | 986 | py | Python | thrift/test/copy_files.py | tahmidbintaslim/fbthrift | 1492869dbd748295a2262ad76a19623c90e47b95 | [
"Apache-2.0"
] | null | null | null | thrift/test/copy_files.py | tahmidbintaslim/fbthrift | 1492869dbd748295a2262ad76a19623c90e47b95 | [
"Apache-2.0"
] | null | null | null | thrift/test/copy_files.py | tahmidbintaslim/fbthrift | 1492869dbd748295a2262ad76a19623c90e47b95 | [
"Apache-2.0"
] | 1 | 2020-02-13T10:42:16.000Z | 2020-02-13T10:42:16.000Z | #!/usr/local/bin/python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import string
import sys
if __name__ == "__main__":
dir = "."
for arg in sys.argv:
pair = string.split(arg, '=')
if len(pair) == 2 and pair[0] == "--install_dir":
dir = pair[1]
if not os.path.exists(dir):
os.makedirs(dir)
shutil.copy("EnumTest.thrift", dir + "/EnumTestStrict.thrift")
| 30.8125 | 74 | 0.691684 |
aceea04e5dbd12187139f344221d4f5ff8816807 | 780 | py | Python | setup.py | soumasish/hipster | 1fe3f0526839a2a418a7e9e6a0557bfc4a5ef061 | [
"MIT"
] | null | null | null | setup.py | soumasish/hipster | 1fe3f0526839a2a418a7e9e6a0557bfc4a5ef061 | [
"MIT"
] | null | null | null | setup.py | soumasish/hipster | 1fe3f0526839a2a418a7e9e6a0557bfc4a5ef061 | [
"MIT"
] | null | null | null | import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="hipster",
version="3.0.1",
description="A thread safe implementation of the MinHeap and MaxHeap",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/soumasish/hipster",
author="Soumasish Goswami",
author_email="soumasish@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(exclude=("tests",)),
include_package_data=True,
install_requires=['readerwriterlock', ],
)
| 28.888889 | 74 | 0.678205 |
aceea08a8e675cf4d7f65b858957cc51e2fefc70 | 3,860 | py | Python | qf_lib/backtesting/portfolio/trade.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 198 | 2019-08-16T15:09:23.000Z | 2022-03-30T12:44:00.000Z | qf_lib/backtesting/portfolio/trade.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 13 | 2021-01-07T10:15:19.000Z | 2022-03-29T13:01:47.000Z | qf_lib/backtesting/portfolio/trade.py | webclinic017/qf-lib | 96463876719bba8a76c8269cef76addf3a2d836d | [
"Apache-2.0"
] | 29 | 2019-08-16T15:21:28.000Z | 2022-02-23T09:53:49.000Z | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.common.utils.dateutils.date_to_string import date_to_str
class Trade:
"""
Trade is a logical unit representing getting exposure to the market (long or short) finished by closing the
position. Trade begins with new position opened and finishes when the position is closed.
Note: For futures contracts, automated rolling of the contract will result in closing the existing exposure on the
specific ticker, and therefore will generate a trade.
Parameters
-----------
start_time: datetime
Moment when we opened the position corresponding to this trade.
end_time: datetime
Moment when we close the trade.
contract: Contract
Contract defining the security.
pnl: float
Profit or loss associated with a trade expressed in currency units including transaction costs and commissions.
commission: float
All the transaction costs related to the trade. It includes the cost of opening the position and also cost
of reducing it. Expressed in currency units.
direction: int
Direction of the position: Long = 1, Short = -1. Defined by the initial transaction.
percentage_pnl: float
Total pnl divided by the most recent value of the portfolio.
"""
def __init__(self, start_time: datetime, end_time: datetime, contract: Contract, pnl: float, commission: float,
direction: int, percentage_pnl: float = float('nan')):
self.start_time = start_time
self.end_time = end_time
self.contract = contract
self.pnl = pnl
self.commission = commission
self.direction = direction
self.percentage_pnl = percentage_pnl
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Trade):
return False
return (self.contract, self.start_time, self.end_time, self.pnl, self.commission, self.direction,
self.percentage_pnl) == (other.contract, other.start_time, other.end_time, other.pnl, other.commission,
other.direction, other.percentage_pnl)
def __str__(self):
string_template = "{class_name:s} ({start_date:s} - {end_date:s}) -> " \
"Asset: {asset:>20}, " \
"Direction: {direction:>8}, " \
"P&L %: {percentage_pnl:>10.2%}, " \
"P&L: {pnl:>10.2f}".format(class_name=self.__class__.__name__,
start_date=date_to_str(self.start_time),
end_date=date_to_str(self.end_time),
direction=self.direction,
asset=self.contract.symbol,
percentage_pnl=self.percentage_pnl,
pnl=self.pnl,
)
return string_template
| 45.952381 | 119 | 0.610104 |
aceea1ea21498cf4bd8b620ea0dcac3a9764f607 | 2,070 | py | Python | src/downward/experiments/issue213/v7-lama-30min.py | ScarfZapdos/conan-bge-questgen | 4d184c5bf0ae4b768b8043cec586395df9ce1451 | [
"MIT"
] | 1 | 2021-09-09T13:03:02.000Z | 2021-09-09T13:03:02.000Z | src/downward/experiments/issue213/v7-lama-30min.py | ScarfZapdos/conan-bge-questgen | 4d184c5bf0ae4b768b8043cec586395df9ce1451 | [
"MIT"
] | null | null | null | src/downward/experiments/issue213/v7-lama-30min.py | ScarfZapdos/conan-bge-questgen | 4d184c5bf0ae4b768b8043cec586395df9ce1451 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue213-v7"]
BUILDS = ["release32", "release64"]
CONFIGS = [
IssueConfig(
"lama-" + build,
[],
build_options=[build],
driver_options=["--build", build, "--alias", "seq-sat-lama-2011"])
for rev in REVISIONS
for build in BUILDS
]
SUITE = common_setup.DEFAULT_SATISFICING_SUITE
ENVIRONMENT = BaselSlurmEnvironment(
partition="infai_2",
email="jendrik.seipp@unibas.ch",
export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.ANYTIME_SEARCH_PARSER)
exp.add_parser(exp.PLANNER_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
#exp.add_absolute_report_step()
#exp.add_comparison_table_step()
attributes = IssueExperiment.DEFAULT_TABLE_ATTRIBUTES
# Compare builds.
for build1, build2 in itertools.combinations(BUILDS, 2):
algorithm_pairs = [
("{rev}-{config_nick}-{build1}".format(**locals()),
"{rev}-{config_nick}-{build2}".format(**locals()),
"Diff ({config_nick}-{rev})".format(**locals()))
for config_nick in ["lama"]]
exp.add_report(
ComparativeReport(algorithm_pairs, attributes=attributes),
name="issue213-{build1}-vs-{build2}-{SCRIPT_NAME}".format(**locals()))
exp.run_steps()
| 29.15493 | 78 | 0.72657 |
aceea2fef7024cfe721ff3d1f37934c465c3904f | 1,174 | py | Python | test/util/kronecker_product_test.py | daviswert/gpytorch | b1b546eea573aead6c509c8b23ccf93bd9ce82ec | [
"MIT"
] | 1 | 2021-06-22T11:53:41.000Z | 2021-06-22T11:53:41.000Z | test/util/kronecker_product_test.py | daviswert/gpytorch | b1b546eea573aead6c509c8b23ccf93bd9ce82ec | [
"MIT"
] | null | null | null | test/util/kronecker_product_test.py | daviswert/gpytorch | b1b546eea573aead6c509c8b23ccf93bd9ce82ec | [
"MIT"
] | null | null | null | import torch
from gpytorch.utils.kronecker_product import kronecker_product_toeplitz_matmul, kronecker_product
from gpytorch.utils.toeplitz import toeplitz
def test_kronecker_product():
matrix_list = []
matrix1 = torch.Tensor([
[1, 2, 3],
[4, 5, 6],
])
matrix2 = torch.Tensor([
[1, 2],
[4, 3],
])
matrix_list.append(matrix1)
matrix_list.append(matrix2)
res = kronecker_product(matrix_list)
actual = torch.Tensor([
[1, 2, 2, 4, 3, 6],
[4, 3, 8, 6, 12, 9],
[4, 8, 5, 10, 6, 12],
[16, 12, 20, 15, 24, 18]
])
assert(torch.equal(res, actual))
def test_kronecker_product_toeplitz_matmul():
toeplitz_columns = torch.randn(3, 3)
matrix = torch.randn(27, 10)
res = kronecker_product_toeplitz_matmul(toeplitz_columns, toeplitz_columns, matrix)
toeplitz_matrices = torch.zeros(3, 3, 3)
for i in range(3):
toeplitz_matrices[i] = toeplitz(toeplitz_columns[i], toeplitz_columns[i])
kronecker_product_matrix = kronecker_product(toeplitz_matrices)
actual = kronecker_product_matrix.mm(matrix)
assert(torch.norm(res - actual) < 1e-4)
| 27.302326 | 97 | 0.650767 |
aceea455b7371f507997ad7baa6e769725a34dcf | 69 | py | Python | crmsystem/controllers/__init__.py | iomegak12/sislcrmsystem | 476b4c2ac8a0e6007a8e6511072fa4f4f9e79f97 | [
"MIT"
] | null | null | null | crmsystem/controllers/__init__.py | iomegak12/sislcrmsystem | 476b4c2ac8a0e6007a8e6511072fa4f4f9e79f97 | [
"MIT"
] | null | null | null | crmsystem/controllers/__init__.py | iomegak12/sislcrmsystem | 476b4c2ac8a0e6007a8e6511072fa4f4f9e79f97 | [
"MIT"
] | null | null | null | from .crmsystem_service_controller import CRMSystemServiceController
| 34.5 | 68 | 0.927536 |
aceea45e2c67d6bb718fad137d6b70727d7c15fd | 212 | py | Python | leetcode-problems/0136_single_number.py | shikhalakra22/Algorithms | 5932b162214add40f4fd3ca217e16217a36823fe | [
"MIT"
] | null | null | null | leetcode-problems/0136_single_number.py | shikhalakra22/Algorithms | 5932b162214add40f4fd3ca217e16217a36823fe | [
"MIT"
] | null | null | null | leetcode-problems/0136_single_number.py | shikhalakra22/Algorithms | 5932b162214add40f4fd3ca217e16217a36823fe | [
"MIT"
] | null | null | null | class Solution:
def singleNumber(self, nums: List[int]) -> int:
from collections import Counter
d = Counter(nums)
for k, v in d.items():
if v == 1:
return k | 30.285714 | 51 | 0.518868 |
aceea61876fa698cd0a0baf665255cf5c3354537 | 247 | py | Python | apps/xfzauth/serializers.py | LishenZz/my_project | c2ac8199efb467e303d343ea34ed1969b64280d7 | [
"Apache-2.0"
] | null | null | null | apps/xfzauth/serializers.py | LishenZz/my_project | c2ac8199efb467e303d343ea34ed1969b64280d7 | [
"Apache-2.0"
] | null | null | null | apps/xfzauth/serializers.py | LishenZz/my_project | c2ac8199efb467e303d343ea34ed1969b64280d7 | [
"Apache-2.0"
] | null | null | null | #Author:Li Shen
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model=User
fields = ('uid','telephone','username','email','is_staff','is_active',) | 27.444444 | 79 | 0.720648 |
aceea75e9fe552d32fcab038de27fc5dcdd3b2d3 | 1,463 | py | Python | ann_tools_eric/exif_info.py | simonchanper/ml_ann | 353d6f6a2dd09243c035f8dd0e21f58bca4c6571 | [
"MIT"
] | 1 | 2018-06-11T09:30:47.000Z | 2018-06-11T09:30:47.000Z | ann_tools_eric/exif_info.py | simonchanper/ml_ann | 353d6f6a2dd09243c035f8dd0e21f58bca4c6571 | [
"MIT"
] | null | null | null | ann_tools_eric/exif_info.py | simonchanper/ml_ann | 353d6f6a2dd09243c035f8dd0e21f58bca4c6571 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import exifread
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
def _convert_to_degress(value):
d = float(value.values[0].num) / float(value.values[0].den)
m = float(value.values[1].num) / float(value.values[1].den)
s = float(value.values[2].num) / float(value.values[2].den)
return d + (m / 60.0) + (s / 3600.0)
def get_exif_info(exif_data):
lat = None
lon = None
image_date = _get_if_exist(exif_data, 'Image DateTime')
gps_altitude = _get_if_exist(exif_data, 'GPS GPSAltitude')
gps_latitude = _get_if_exist(exif_data, 'GPS GPSLatitude')
gps_latitude_ref = _get_if_exist(exif_data, 'GPS GPSLatitudeRef')
gps_longitude = _get_if_exist(exif_data, 'GPS GPSLongitude')
gps_longitude_ref = _get_if_exist(exif_data, 'GPS GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert_to_degress(gps_latitude)
if gps_latitude_ref.values[0] != 'N':
lat = 0 - lat
lon = _convert_to_degress(gps_longitude)
if gps_longitude_ref.values[0] != 'E':
lon = 0 - lon
return image_date, gps_altitude, lat, gps_latitude_ref, lon, gps_longitude_ref
def get_info_from_image(image_path):
f = open(image_path, 'rb')
info_list = []
tags = exifread.process_file(f)
info_list = get_exif_info(tags)
return info_list
| 31.804348 | 82 | 0.673958 |
aceea810f62033a84b83e467ffbb0e1c7d90f0a1 | 2,853 | py | Python | divik/sampler/_core.py | Hirni-Meshram/divik | 0f542ec2669428458a4ecf6bb450dc90c33b0653 | [
"Apache-2.0"
] | 10 | 2020-01-10T13:10:38.000Z | 2022-03-17T05:08:40.000Z | divik/sampler/_core.py | Hirni-Meshram/divik | 0f542ec2669428458a4ecf6bb450dc90c33b0653 | [
"Apache-2.0"
] | 45 | 2019-10-26T12:42:50.000Z | 2022-03-12T07:50:40.000Z | divik/sampler/_core.py | Hirni-Meshram/divik | 0f542ec2669428458a4ecf6bb450dc90c33b0653 | [
"Apache-2.0"
] | 5 | 2021-11-24T04:55:45.000Z | 2021-12-17T23:38:19.000Z | from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from itertools import count
from sklearn.base import BaseEstimator, clone
class BaseSampler(BaseEstimator, metaclass=ABCMeta):
"""Base class for all the samplers
Sampler is Pool-safe, i.e. can simply store a dataset.
It will not be serialized by pickle when going to another process,
if handled properly.
Before you spawn a pool, a data must be moved to a module-level
variable. To simplify that process a contract has been prepared.
You open a context and operate within a context:
>>> with sampler.parallel() as sampler_,
... Pool(initializer=sampler_.initializer,
... initargs=sampler_.initargs) as pool:
... pool.map(sampler_.get_sample, range(10))
Keep in mind, that __iter__ and fit are not accessible in parallel
context. __iter__ would yield the same values independently in
all the workers. Now it needs to be done consciously and in
well-though manner. fit could lead to a non-predictable behaviour.
If you need the original sampler, you can get a clone (not fit to
the data).
"""
def __iter__(self):
"""Iter through `n_samples` samples or infinitely if unspecified"""
if hasattr(self, "n_samples") and self.n_samples is not None:
samples = range(self.n_samples)
else:
samples = count()
for i in samples:
yield self.get_sample(i)
@abstractmethod
def get_sample(self, seed):
"""Return specific sample
Following assumptions should be met:
a) sampler.get_sample(x) == sampler.get_sample(x)
b) x != y should yield sampler.get_sample(x) != sampler.get_sample(y)
Parameters
----------
seed : int
The seed to use to draw the sample
Returns
-------
sample : array_like, (*self.shape_)
Returns the drawn sample
"""
raise NotImplementedError("get_sample is not implemented")
def fit(self, X, y=None):
"""Fit sampler to data
It's a base for both supervised and unsupervised samplers.
"""
return self
@contextmanager
def parallel(self):
"""Create parallel context for the sampler to operate"""
yield ParallelSampler(self)
class ParallelSampler:
"""Helper class for sharing the sampler functionality"""
def __init__(self, sampler: BaseSampler):
self.sampler = sampler
def get_sample(self, seed):
"""Return specific sample"""
return self.sampler.get_sample(seed)
def initializer(self, *args):
pass
@property
def initargs(self):
return ()
def clone(self):
"""Clones the original sampler"""
return clone(self.sampler)
| 30.351064 | 77 | 0.644935 |
aceea9f8f5a7ba9a08e4bc00ad816f1f2492619f | 5,499 | py | Python | test/functional/p2p_instantsend.py | luckycoinblu/luckycoinoro | 40524d4143ab67def698ccbd87ad6a3885d5720e | [
"MIT"
] | null | null | null | test/functional/p2p_instantsend.py | luckycoinblu/luckycoinoro | 40524d4143ab67def698ccbd87ad6a3885d5720e | [
"MIT"
] | 1 | 2022-01-27T01:34:48.000Z | 2022-01-27T01:59:47.000Z | test/functional/p2p_instantsend.py | luckycoinblu/luckycoinoro | 40524d4143ab67def698ccbd87ad6a3885d5720e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import LuckyCoinOROTestFramework
from test_framework.util import isolate_node, reconnect_isolated_node, assert_equal, \
assert_raises_rpc_error
'''
p2p_instantsend.py
Tests InstantSend functionality (prevent doublespend for unconfirmed transactions)
'''
class InstantSendTest(LuckyCoinOROTestFramework):
def set_test_params(self):
self.set_luckycoinoro_test_params(7, 3, fast_dip3_enforcement=True)
# set sender, receiver, isolated nodes
self.isolated_idx = 1
self.receiver_idx = 2
self.sender_idx = 3
def run_test(self):
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
self.test_mempool_doublespend()
self.test_block_doublespend()
def test_block_doublespend(self):
sender = self.nodes[self.sender_idx]
receiver = self.nodes[self.receiver_idx]
isolated = self.nodes[self.isolated_idx]
# feed the sender with some balance
sender_addr = sender.getnewaddress()
self.nodes[0].sendtoaddress(sender_addr, 1)
self.bump_mocktime(1)
self.nodes[0].generate(2)
self.sync_all()
# create doublespending transaction, but don't relay it
dblspnd_tx = self.create_raw_tx(sender, isolated, 0.5, 1, 100)
# isolate one node from network
isolate_node(isolated)
# instantsend to receiver
receiver_addr = receiver.getnewaddress()
is_id = sender.sendtoaddress(receiver_addr, 0.9)
# wait for the transaction to propagate
connected_nodes = self.nodes.copy()
del connected_nodes[self.isolated_idx]
self.sync_mempools(connected_nodes)
for node in connected_nodes:
self.wait_for_instantlock(is_id, node)
# send doublespend transaction to isolated node
isolated.sendrawtransaction(dblspnd_tx['hex'])
# generate block on isolated node with doublespend transaction
self.bump_mocktime(1)
isolated.generate(1)
wrong_block = isolated.getbestblockhash()
# connect isolated block to network
reconnect_isolated_node(isolated, 0)
# check doublespend block is rejected by other nodes
timeout = 10
for i in range(0, self.num_nodes):
if i == self.isolated_idx:
continue
res = self.nodes[i].waitforblock(wrong_block, timeout)
assert (res['hash'] != wrong_block)
# wait for long time only for first node
timeout = 1
# send coins back to the controller node without waiting for confirmations
receiver.sendtoaddress(self.nodes[0].getnewaddress(), 0.9, "", "", True)
assert_equal(receiver.getwalletinfo()["balance"], 0)
# mine more blocks
# TODO: mine these blocks on an isolated node
self.bump_mocktime(1)
# make sure the above TX is on node0
self.sync_mempools([n for n in self.nodes if n is not isolated])
self.nodes[0].generate(2)
self.sync_all()
def test_mempool_doublespend(self):
sender = self.nodes[self.sender_idx]
receiver = self.nodes[self.receiver_idx]
isolated = self.nodes[self.isolated_idx]
# feed the sender with some balance
sender_addr = sender.getnewaddress()
self.nodes[0].sendtoaddress(sender_addr, 1)
self.bump_mocktime(1)
self.nodes[0].generate(2)
self.sync_all()
# create doublespending transaction, but don't relay it
dblspnd_tx = self.create_raw_tx(sender, isolated, 0.5, 1, 100)
dblspnd_txid = bytes_to_hex_str(hash256(hex_str_to_bytes(dblspnd_tx['hex']))[::-1])
# isolate one node from network
isolate_node(isolated)
# send doublespend transaction to isolated node
isolated.sendrawtransaction(dblspnd_tx['hex'])
# let isolated node rejoin the network
# The previously isolated node should NOT relay the doublespending TX
reconnect_isolated_node(isolated, 0)
for node in self.nodes:
if node is not isolated:
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", node.getrawtransaction, dblspnd_txid)
# instantsend to receiver. The previously isolated node should prune the doublespend TX and request the correct
# TX from other nodes.
receiver_addr = receiver.getnewaddress()
is_id = sender.sendtoaddress(receiver_addr, 0.9)
# wait for the transaction to propagate
self.sync_mempools()
for node in self.nodes:
self.wait_for_instantlock(is_id, node)
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", isolated.getrawtransaction, dblspnd_txid)
# send coins back to the controller node without waiting for confirmations
receiver.sendtoaddress(self.nodes[0].getnewaddress(), 0.9, "", "", True)
assert_equal(receiver.getwalletinfo()["balance"], 0)
# mine more blocks
self.bump_mocktime(1)
self.nodes[0].generate(2)
self.sync_all()
if __name__ == '__main__':
InstantSendTest().main()
| 42.3 | 126 | 0.675577 |
aceeab3a6ded51e3896e7b799cbdaf55ca60471d | 7,086 | py | Python | src/video.py | rustielin/cvision | dd8fe08371a6a039319e0240b64aee726d7c1335 | [
"BSD-3-Clause"
] | null | null | null | src/video.py | rustielin/cvision | dd8fe08371a6a039319e0240b64aee726d7c1335 | [
"BSD-3-Clause"
] | null | null | null | src/video.py | rustielin/cvision | dd8fe08371a6a039319e0240b64aee726d7c1335 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Video capture sample.
Sample shows how VideoCapture class can be used to acquire video
frames from a camera of a movie file. Also the sample provides
an example of procedural video generation by an object, mimicking
the VideoCapture interface (see Chess class).
'create_capture' is a convinience function for capture creation,
falling back to procedural video in case of error.
Usage:
video.py [--shotdir <shot path>] [source0] [source1] ...'
sourceN is an
- integer number for camera capture
- name of video file
- synth:<params> for procedural video
Synth examples:
synth:bg=../data/lena.jpg:noise=0.1
synth:class=chess:bg=../data/lena.jpg:noise=0.1:size=640x480
Keys:
ESC - exit
SPACE - save current frame to <shot path> directory
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
from numpy import pi, sin, cos
import cv2
# built-in modules
from time import clock
# local modules
from opencv.tst_scene_render import TestSceneRender
import opencv.common
class VideoSynthBase(object):
def __init__(self, size=None, noise=0.0, bg = None, **params):
self.bg = None
self.frame_size = (640, 480)
if bg is not None:
self.bg = cv2.imread(bg, 1)
h, w = self.bg.shape[:2]
self.frame_size = (w, h)
if size is not None:
w, h = map(int, size.split('x'))
self.frame_size = (w, h)
self.bg = cv2.resize(self.bg, self.frame_size)
self.noise = float(noise)
def render(self, dst):
pass
def read(self, dst=None):
w, h = self.frame_size
if self.bg is None:
buf = np.zeros((h, w, 3), np.uint8)
else:
buf = self.bg.copy()
self.render(buf)
if self.noise > 0.0:
noise = np.zeros((h, w, 3), np.int8)
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
return True, buf
def isOpened(self):
return True
class Book(VideoSynthBase):
def __init__(self, **kw):
super(Book, self).__init__(**kw)
backGr = cv2.imread('../data/graf1.png')
fgr = cv2.imread('../data/box.png')
self.render = TestSceneRender(backGr, fgr, speed = 1)
def read(self, dst=None):
noise = np.zeros(self.render.sceneBg.shape, np.int8)
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
class Cube(VideoSynthBase):
def __init__(self, **kw):
super(Cube, self).__init__(**kw)
self.render = TestSceneRender(cv2.imread('../data/pca_test1.jpg'), deformation = True, speed = 1)
def read(self, dst=None):
noise = np.zeros(self.render.sceneBg.shape, np.int8)
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3)
class Chess(VideoSynthBase):
def __init__(self, **kw):
super(Chess, self).__init__(**kw)
w, h = self.frame_size
self.grid_size = sx, sy = 10, 7
white_quads = []
black_quads = []
for i, j in np.ndindex(sy, sx):
q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]]
[white_quads, black_quads][(i + j) % 2].append(q)
self.white_quads = np.float32(white_quads)
self.black_quads = np.float32(black_quads)
fx = 0.9
self.K = np.float64([[fx*w, 0, 0.5*(w-1)],
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
self.dist_coef = np.float64([-0.2, 0.1, 0, 0])
self.t = 0
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
def render(self, dst):
t = self.t
self.t += 1.0/30.0
sx, sy = self.grid_size
center = np.array([0.5*sx, 0.5*sy, 0.0])
phi = pi/3 + sin(t*3)*pi/8
c, s = cos(phi), sin(phi)
ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2
eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs
target_pos = center + ofs
R, self.tvec = common.lookat(eye_pos, target_pos)
self.rvec = common.mtx2rvec(R)
self.draw_quads(dst, self.white_quads, (245, 245, 245))
self.draw_quads(dst, self.black_quads, (10, 10, 10))
classes = dict(chess=Chess, book=Book, cube=Cube)
presets = dict(
empty = 'synth:',
lena = 'synth:bg=../data/lena.jpg:noise=0.1',
chess = 'synth:class=chess:bg=../data/lena.jpg:noise=0.1:size=640x480',
book = 'synth:class=book:bg=../data/graf1.png:noise=0.1:size=640x480',
cube = 'synth:class=cube:bg=../data/pca_test1.jpg:noise=0.0:size=640x480'
)
def create_capture(source = 0, fallback = presets['chess']):
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''
source = str(source).strip()
chunks = source.split(':')
# handle drive letter ('c:', ...)
if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
chunks[1] = chunks[0] + ':' + chunks[1]
del chunks[0]
source = chunks[0]
try: source = int(source)
except ValueError: pass
params = dict( s.split('=') for s in chunks[1:] )
cap = None
if source == 'synth':
Class = classes.get(params.get('class', None), VideoSynthBase)
try: cap = Class(**params)
except: pass
else:
cap = cv2.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
print('Warning: unable to open video source: ', source)
if fallback is not None:
return create_capture(fallback, None)
return cap
if __name__ == '__main__':
import sys
import getopt
print(__doc__)
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
args = dict(args)
shotdir = args.get('--shotdir', '.')
if len(sources) == 0:
sources = [ 0 ]
caps = list(map(create_capture, sources))
shot_idx = 0
while True:
imgs = []
for i, cap in enumerate(caps):
ret, img = cap.read()
imgs.append(img)
cv2.imshow('capture %d' % i, img)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
if ch == ord(' '):
for i, img in enumerate(imgs):
fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx)
cv2.imwrite(fn, img)
print(fn, 'saved')
shot_idx += 1
cv2.destroyAllWindows()
| 30.808696 | 109 | 0.574795 |
aceead0b0801f65131d4f6f78609ade2954c4448 | 1,810 | py | Python | setup.py | kingkastle/cw-sdk-python | 64791a8a6eeaf500933115319488923098163ac5 | [
"BSD-2-Clause"
] | null | null | null | setup.py | kingkastle/cw-sdk-python | 64791a8a6eeaf500933115319488923098163ac5 | [
"BSD-2-Clause"
] | null | null | null | setup.py | kingkastle/cw-sdk-python | 64791a8a6eeaf500933115319488923098163ac5 | [
"BSD-2-Clause"
] | 1 | 2021-02-15T19:49:05.000Z | 2021-02-15T19:49:05.000Z | import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="cryptowatch-sdk",
version="0.0.14",
description="Python bindings for the Cryptowatch API. Cryptocurrency markets, assets, instruments and exchanges data.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/cryptowatch/cw-sdk-python",
author="Cryptowatch",
author_email="infra@cryptowat.ch",
keywords="cryptowatch sdk bitcoin crypto",
license="BSD-2",
include_package_data=True,
classifiers=[
"Intended Audience :: Developers",
"Topic :: Office/Business :: Financial",
"Topic :: Office/Business :: Financial :: Investment",
"Intended Audience :: Financial and Insurance Industry",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.7",
],
install_requires=[
"marshmallow >= 3.2.2",
"requests >= 2.22.0",
"PyYAML >= 5.1.2",
"urllib3 >= 1.25.7",
"websocket-client >= 0.56.0",
"protobuf >= 3.11.3",
],
packages=find_packages(exclude=("tests", "examples")),
entry_points={"console_scripts": ["cryptowatch=cryptowatch.__main__:main",]},
python_requires=">=3.7",
project_urls={
"Bug Tracker": "https://github.com/cryptowatch/cw-sdk-python/issues",
"Documentation": "https://github.com/cryptowatch/cw-sdk-python#installation",
"Source Code": "https://github.com/cryptowatch/cw-sdk-python",
},
tests_require=["pytest >= 5.3.1", "pytest-mock >= 1.12", "requests-mock >= 1.7"],
)
| 36.2 | 123 | 0.648066 |
aceead5b20da153ea5712a84f6589e2a03d3eaf1 | 1,102 | py | Python | dev/myapp/views.py | atleta/django-bootstrap-datepicker-plus | 7fcafa7159eeab3cd1e2a6ebdf650052d21aa2c1 | [
"Apache-2.0"
] | 1 | 2018-03-01T17:50:54.000Z | 2018-03-01T17:50:54.000Z | dev/myapp/views.py | atleta/django-bootstrap-datepicker-plus | 7fcafa7159eeab3cd1e2a6ebdf650052d21aa2c1 | [
"Apache-2.0"
] | null | null | null | dev/myapp/views.py | atleta/django-bootstrap-datepicker-plus | 7fcafa7159eeab3cd1e2a6ebdf650052d21aa2c1 | [
"Apache-2.0"
] | null | null | null | from django.http import HttpResponseRedirect
from django.views.generic.edit import FormView, UpdateView
from .forms import CustomForm, EventForm
from .models import Event
class Bootstrap3_CustomFormView(FormView):
template_name = "myapp/bootstrap3/custom-form.html"
form_class = CustomForm
def form_valid(self, form):
return HttpResponseRedirect(self.request.META.get("HTTP_REFERER"))
class Bootstrap4_CustomFormView(FormView):
template_name = "myapp/bootstrap4/custom-form.html"
form_class = CustomForm
def form_valid(self, form):
return HttpResponseRedirect(self.request.META.get("HTTP_REFERER"))
class Bootstrap3_UpdateView(UpdateView):
template_name = "myapp/bootstrap3/model-form.html"
model = Event
form_class = EventForm
def get_success_url(self):
return self.request.META.get("HTTP_REFERER")
class Bootstrap4_UpdateView(UpdateView):
template_name = "myapp/bootstrap4/model-form.html"
model = Event
form_class = EventForm
def get_success_url(self):
return self.request.META.get("HTTP_REFERER")
| 27.55 | 74 | 0.750454 |
aceeaf00bd4da1fa06b82a5ed66ea6613409174b | 3,608 | py | Python | PyLudus/apps/home/views/dash.py | ccall48/PyLudus | 69f0770113b1df67e731897877aedbda1e663590 | [
"MIT"
] | 1 | 2021-02-16T04:54:18.000Z | 2021-02-16T04:54:18.000Z | PyLudus/apps/home/views/dash.py | ccall48/PyLudus | 69f0770113b1df67e731897877aedbda1e663590 | [
"MIT"
] | null | null | null | PyLudus/apps/home/views/dash.py | ccall48/PyLudus | 69f0770113b1df67e731897877aedbda1e663590 | [
"MIT"
] | 2 | 2021-02-16T01:06:50.000Z | 2021-05-26T11:12:47.000Z | import dateparser
from PyLudus.apps.home.views import get_login_url, is_user_login_ok
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect, render
from django.views.generic import View
from fusionauth.fusionauth_client import FusionAuthClient
class DashView(View):
"""The main landing page for the website."""
def get(self, request: WSGIRequest) -> HttpResponse:
"""HTTP GET: Return the view template."""
login_url = get_login_url(request)
user_id = is_user_login_ok(request)
if not user_id:
return redirect(login_url)
birthday = None
user = None
try:
client = FusionAuthClient(
settings.FUSION_AUTH_API_KEY, settings.FUSION_AUTH_INTERNAL_API_URL
)
print(f"{user_id=}")
r = client.retrieve_user(user_id)
if r.was_successful():
user = r.success_response["user"]
print(f"{user=}")
birthday = r.success_response["user"].get("birthDate", None)
print(f"{birthday=}")
else:
print("couldn't get user")
print(r.error_response)
print("render dashboard with ", user_id)
except Exception as e:
print("Error occurred while communicating with Fusion API")
print(e)
return redirect(login_url)
return render(request, "home/dash.html", {"user": user, "birthday": birthday})
def post(self, request: HttpRequest) -> HttpResponse:
"""HTTP POST: Set user birthdate."""
birthday = request.POST.get("birthday")
user_id = request.POST.get("user_id")
normalised_birthday = None
print(f"{birthday=}")
print(f"{user_id=}")
try:
dt = dateparser.parse(birthday)
normalised_birthday = dt.strftime("%Y-%m-%d")
except Exception as e:
print(e)
print("Couldn't parse birthday")
if not normalised_birthday:
return render(
request,
"home/dash.html",
{
"message": "Couldn't parse birthday. Please use YYYY-MM-DD",
"user_id": user_id,
},
)
try:
client = FusionAuthClient(
settings.FUSION_AUTH_API_KEY, settings.FUSION_AUTH_INTERNAL_API_URL
)
r = client.patch_user(user_id, {"user": {"birthDate": normalised_birthday}})
if r.was_successful():
print(r.success_response)
return render(
request,
"home/dash.html",
{
"message": "Your birthday has been set",
"birthday": normalised_birthday,
"user": r.success_response["user"],
},
)
else:
print(r.error_response)
return render(
request,
"home/dash.html",
{
"message": "Something went wrong",
"user": r.error_response["user"],
},
)
except Exception as e:
print(e)
return render(
request,
"home/dash.html",
{"message": "Something went wrong"},
)
| 34.361905 | 88 | 0.521064 |
aceeaf14d4cd828a8d1f6e4df5f6622147f8097b | 7,318 | py | Python | networking_nec/nwa/nwalib/workflow.py | nec-openstack/networking-nec-nwa | 0c5a4a9fb74b6dc78b773d78755c758ed67ed777 | [
"Apache-2.0"
] | null | null | null | networking_nec/nwa/nwalib/workflow.py | nec-openstack/networking-nec-nwa | 0c5a4a9fb74b6dc78b773d78755c758ed67ed777 | [
"Apache-2.0"
] | null | null | null | networking_nec/nwa/nwalib/workflow.py | nec-openstack/networking-nec-nwa | 0c5a4a9fb74b6dc78b773d78755c758ed67ed777 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import six
class NwaWorkflow(object):
'''Workflow definition of NWA. '''
_path_prefix = '/umf/workflow/'
_nameid_initialized = False
_nameid = {
'CreateTenantNW': '40030001',
'DeleteTenantNW': '40030016',
'CreateVLAN': '40030002',
'DeleteVLAN': '40030018',
'CreateGeneralDev': '40030021',
'DeleteGeneralDev': '40030022',
'CreateTenantFW': '40030019',
'UpdateTenantFW': '40030009',
'DeleteTenantFW': '40030020',
'SettingNAT': '40030005',
'DeleteNAT': '40030011',
'SettingFWPolicy': '40030081',
'SettingLBPolicy': '40030091',
'CreateTenantLB': '40030092',
'UpdateTenantLB': '40030093',
'DeleteTenantLB': '40030094',
'CreateConnectPort': '50000001',
'DeleteConnectPort': '50000002',
}
_errno = {
'1': 'Unknown parent node',
'2': 'Already exists',
'3': 'Resources are insufficient',
'4': 'Unknown node',
'5': 'Can not access the file',
'6': 'Unknown parameters',
'7': 'Undefined parameters',
'8': 'Permission error',
'9': 'It is not possible to remove because it is in use',
'10': 'An error occurred while deleting the node',
'11': 'Execution environment is invalid',
'31': 'Specified IP subnet does not exist',
'32': 'Specified IP address does not exist',
'33': 'Can not allocate IP subnet to be paid out',
'34': 'IP subnet will not exceed the threshold',
'101': 'An unknown error has occurred',
'102': 'An internal error has occurred',
'103': 'Failed to connect to CMDB',
'104': 'Out of memory',
'105': 'An error occurred in the select process to the CMDB',
'106': 'An error occurred in the update process to the CMDB',
'107': 'An error occurred in the insert process to the CMDB',
'108': 'Input parameter is invalid',
'109': 'An error occurred in the file processing',
'110': 'An error occurred in the delete process to the CMDB',
'201': 'There is no free VLAN ID',
'202': 'Exceeded the threshold of VLAN',
'203': 'Exceeded the threshold ot Tenant equipment',
'204': 'Resource group is not specified in the input',
'205': 'Tenant-ID is not specified in the input',
'206': 'Tenant-Network is already created',
'207': 'There is no available devices',
'208': 'IP address depletion for assignment of LB',
'209': 'The device in the cluster group is 0 or 2 or more',
'210': 'The device in the cluster group is 0',
'211': 'There is no specified resource group',
'212': 'There is no character "/" in the resource group name',
'213': 'Tenant-FW is not specified one',
'214': 'Tenant-FW is specified two or more',
'215': 'Can not be extended because there is no PFS',
'216': 'Logical NW name is not specified',
'217': 'There is no Registered SSL-VPN equipment',
'218': 'Tenant network is not yet created',
'219': 'There is no free LoadBalancer',
'220': 'Can not get the physical server uuid',
'221': 'There is no deletion of VLAN',
'222': 'Tenant ID in use is still exists',
'223': 'Tenant-FW not found',
'224': 'There is no specified device name',
'225': 'Can not get the information of tenant vlan',
'226': 'There is no specified logical NW',
'227': 'Can not get the device information of the tenant in use',
'228': 'For updated is in use, it could not be updated',
'229': 'For deletion is in use, it could not be deleted',
'230': 'Exceeded the threshold',
'231': 'Exceeded the allocation possible number',
'232': 'Exceeded the allocation range',
'233': 'Authentication setting is incorrect',
'234': 'Usable IP address range setting of is invalid',
'235': 'IP address specified is invalid',
'236': 'There is no available for allocation Tenant FW',
'237': 'IP address depletion for assignment of FW',
'238': 'IP address is invalid',
'239': 'Can not set the number of records to zero',
'240': 'The specification does not include a payout already IP subnet',
'241': 'Not specified LogicalPort under the same controller or domain',
'242': 'IP address depletion for assignment of SSL',
'243': 'IP address is invalid',
'244': 'The type of controller is invalid',
'245': 'Device or VDOM name is invalid specified',
'246': 'Exceeds the upper limit of naming convention',
'251': 'In the same tenant, scenario there are still concurrent or '
'reservation ID',
'252': '(unused)',
'253': 'The preceding scenario, can not be reserved',
'254': 'Can not get the reserved id because of the preceding scenario',
'298': 'Resources are insufficient',
'299': 'Unknown error',
}
@staticmethod
def init(name):
pass # pragma: no cover
@staticmethod
def path(name):
"""Returns path of workflow.
:param name: The name of workflow.
"""
return '%s%s/execute' % (NwaWorkflow._path_prefix,
NwaWorkflow._nameid[name])
@staticmethod
def name(path):
"""Returns name of workflow.
:param name: The name of workflow.
"""
wid = path[len(NwaWorkflow._path_prefix):-len('/execute')]
for (name, _id) in NwaWorkflow._nameid.items():
if _id == wid:
return name
return None
@staticmethod
def strerror(errno):
"""Returns error name of errno.
:param errno: The number of error.
"""
return NwaWorkflow._errno.get(errno)
@staticmethod
def get_errno_from_resultdata(data):
resultdata = data.get('resultdata')
if resultdata:
errmsg = resultdata.get('ErrorMessage')
if isinstance(errmsg, six.string_types):
m = re.search(r'ErrorNumber=(\d+)', errmsg)
if m:
return m.group(1)
m = re.search(r'ReservationErrorCode = (\d+)', errmsg, re.M)
if m:
return m.group(1)
return None
@staticmethod
def update_nameid(new_nameid):
if NwaWorkflow._nameid_initialized:
return
if new_nameid:
NwaWorkflow._nameid = new_nameid
NwaWorkflow._nameid_initialized = True
| 40.882682 | 79 | 0.594561 |
aceeb1ce709f351886bb7eb43af6db1e024d92ac | 2,978 | py | Python | recording/loggers/CustomWAndBLogger.py | ALRhub/ALRProject | 5705b04d174855f1e874fa6548c3d4c12c8c2f5c | [
"MIT"
] | null | null | null | recording/loggers/CustomWAndBLogger.py | ALRhub/ALRProject | 5705b04d174855f1e874fa6548c3d4c12c8c2f5c | [
"MIT"
] | null | null | null | recording/loggers/CustomWAndBLogger.py | ALRhub/ALRProject | 5705b04d174855f1e874fa6548c3d4c12c8c2f5c | [
"MIT"
] | 2 | 2021-09-02T13:58:20.000Z | 2021-09-03T08:28:51.000Z | from util.Types import *
from recording.loggers.AbstractLogger import AbstractLogger
from src.algorithms.AbstractIterativeAlgorithm import AbstractIterativeAlgorithm
from src.tasks.AbstractTask import AbstractTask
import wandb
import os
from util.Functions import get_from_nested_dict
class CustomWAndBLogger(AbstractLogger):
"""
Logs (some of the) recorded results using wandb.ai.
"""
def __init__(self, config: ConfigDict, algorithm: AbstractIterativeAlgorithm, task: AbstractTask):
super().__init__(config=config, algorithm=algorithm, task=task)
wandb_params = get_from_nested_dict(config, list_of_keys=["recording", "wandb_params"], raise_error=True)
project_name = wandb_params.get("project_name")
recording_structure = config.get("_recording_structure")
groupname = recording_structure.get("_groupname")[-127:]
runname = recording_structure.get("_runname")[-127:]
recording_dir = recording_structure.get("_recording_dir")
job_name = recording_structure.get("_job_name")
tags = []
if get_from_nested_dict(config, list_of_keys=["algorithm", "name"], default_return=None) is not None:
tags.append(get_from_nested_dict(config, list_of_keys=["algorithm", "name"]))
if get_from_nested_dict(config, list_of_keys=["task", "task"], default_return=None) is not None:
tags.append(get_from_nested_dict(config, list_of_keys=["task", "task"]))
self.wandb_logger = wandb.init(project=project_name, # name of the whole project
tags=tags, # tags to search the runs by. Currently contains task and algorithm
job_type=job_name, # name of your experiment
group=groupname, # group of identical hyperparameters for different seeds
name=runname, # individual repetitions
dir=recording_dir, # local directory for wandb recording
config=config, # full file config
reinit=False,
settings=wandb.Settings(start_method="thread"))
def log_iteration(self, previous_recorded_values: RecordingDict, iteration: int) -> None:
"""
Parses and logs the given dict of recorder metrics to wandb.
Args:
previous_recorded_values: A dictionary of previously recorded things
iteration: The current iteration of the algorithm
Returns:
"""
if "scalars" in previous_recorded_values:
self.wandb_logger.log(data=previous_recorded_values.get("scalars"), step=iteration)
# extend me to log other things as well!
def finalize(self) -> None:
"""
Properly close the wandb logger
Returns:
"""
self.wandb_logger.finish()
| 46.53125 | 118 | 0.640698 |
aceeb228ac950bcb649c426ceff08eaa92cf88f4 | 1,129 | py | Python | modules/py/scripts/ncu_opt_tester.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 29 | 2020-04-13T04:40:35.000Z | 2021-12-17T11:21:35.000Z | modules/py/scripts/ncu_opt_tester.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 6 | 2020-03-12T17:40:00.000Z | 2021-01-20T12:15:08.000Z | modules/py/scripts/ncu_opt_tester.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 9 | 2020-09-28T05:00:30.000Z | 2022-03-04T02:11:49.000Z | """
Shows the performance difference in optimised versus non optimised NCU operations.
mpirun -n 8 python ./ncu_opt_tester.py <num ctrl lines> <operating mode: {0,1}>
mpirun -n 8 python ./ncu_opt_tester.py 11 0 # Should be realtively fast
mpirun -n 8 python ./ncu_opt_tester.py 11 1 # Should take a while
"""
from PyQNLPSimulator import PyQNLPSimulator as p
import sys
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_ctrl = int(sys.argv[1])
num_qubits = num_ctrl*2 - 1
target = num_qubits-1
sim = p(num_qubits, False)
regD = range(num_ctrl)
regA = range(num_ctrl, target)
sim.initRegister()
for i in regD:
if rank == 0:
print("Applying gateX to {}".format(i))
sim.applyGateX(i)
res0 = sim.applyMeasurementToRegister([target], True)
if sys.argv[2] == "1": #unoptimised
sim.applyGateNCU(sim.getGateX(), regD, target ,"X")
else: #optimised
sim.applyGateNCU(sim.getGateX(), regD, regA, target ,"X")
res1 = sim.applyMeasurementToRegister([target], True)
if rank == 0:
print(list(regD) + [target], list(regA), "Before NCU={}".format(res0), "After NCU={}".format(res1))
| 25.088889 | 103 | 0.703277 |
aceeb22f7b0c1706d7b39abe077c77101b5ce756 | 3,146 | py | Python | generate_tfrecord.py | ankushdecoded123/Tensorflow-Object-Detection-API-Train-Model | c6fe7a5b1ba8217edecd63a680a86d15a8adcba2 | [
"MIT"
] | null | null | null | generate_tfrecord.py | ankushdecoded123/Tensorflow-Object-Detection-API-Train-Model | c6fe7a5b1ba8217edecd63a680a86d15a8adcba2 | [
"MIT"
] | null | null | null | generate_tfrecord.py | ankushdecoded123/Tensorflow-Object-Detection-API-Train-Model | c6fe7a5b1ba8217edecd63a680a86d15a8adcba2 | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
from tensorflow.python.framework.versions import VERSION
if VERSION >= "2.0.0a0":
import tensorflow.compat.v1 as tf
else:
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('image_dir', '', 'Path to images')
FLAGS = flags.FLAGS
def class_text_to_int(row_label):
return 1
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
path = os.path.join(FLAGS.image_dir)
examples = pd.read_csv(FLAGS.csv_input)
grouped = split(examples, 'filename')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
output_path = os.path.join(os.getcwd(), FLAGS.output_path)
print('Successfully created the TFRecords: {}'.format(output_path))
if __name__ == '__main__':
tf.app.run()
| 33.827957 | 96 | 0.697076 |
aceeb3c1ae82086bf46ef174e36dedb5084a8f65 | 5,287 | py | Python | Development/AES_dataset_class.py | tkcroat/Augerquant | e3ef4152bd0cc8cc109785aa6ef6083ca18a918f | [
"MIT"
] | null | null | null | Development/AES_dataset_class.py | tkcroat/Augerquant | e3ef4152bd0cc8cc109785aa6ef6083ca18a918f | [
"MIT"
] | null | null | null | Development/AES_dataset_class.py | tkcroat/Augerquant | e3ef4152bd0cc8cc109785aa6ef6083ca18a918f | [
"MIT"
] | 1 | 2019-10-15T15:55:04.000Z | 2019-10-15T15:55:04.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:10:58 2017
@author: tkc
"""
import os
import pandas as pd
import numpy as np
from tkinter import filedialog
AESQUANTPARAMFILE='C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv'
class AESspectrum():
''' Single instance of AES spectra file created from row of spelist (child of AESdataset)
load file from AESdataset (pd dataframe row)
#TODO add direct file load? '''
def __init__(self, AESdataset, rowindex):
# can be opened with AESdataset parent and associated row from
# open files from directory arg
self.AESdataset=AESdataset
self.path=self.parent.path # same path as AESdataset parent
# load params from batch processing of AESdataset
row=AESdataset.Augerparamlog.iloc[rowindex]
self.filename=row.Filename
self.numareas=row.Numareas
self.evbreaks=row.Evbreaks # TODO data type?
self.spectype = row.Type.lower() # multiplex or survey
self.AESdf = None # entire AES dataframe (all areas)
self.energy = None # same for all cols
self.aesquantparams = None
self.loadAESquantparams()
self.elems_smdiff = None
self.get_elems_smdiff() # get quant from existing smdifpeakslog
self.elems_integ = None #
self.elemdata = None
self.getelemdata()
print('Auger QM file', self.filename, 'loaded.')
def open_csvfile(self):
''' Read Auger spectral file '''
self.AESdf=pd.read_csv(self.filename.replace('.spe','.csv'))
self.colset=self.AESdf.columns # Counts1, Counts2, S7D71, S7D72, etc.
self.energy=self.AESdf['Energy']
self.backfit=self.EDXdf['Backfit']
self.subdata=self.EDXdf['Subdata']
print('EDXfile ', self.filename,' loaded.')
def loadAESquantparams(self):
''' Loads standard values of Auger quant parameters
TODO what about dealing with local shifts '''
# Checkbutton option for local (or standard) AESquantparams in file loader?
print('AESquantparams loaded')
self.aesquantparams=pd.read_csv(AESQUANTPARAMFILE, encoding='utf-8')
def get_elems_smdiff(self):
''' Finds element quant already performed from smdifflog (within AESdataset) '''
match=self.AESdataset.Smdifpeakslog.loc[ (self.AESdataset.Smdifpeakslog['Filename']==self.filename)]
# should contain row for each element included in quant
self.elems_smdiff=[] # elem/ peak name
self.smdiff_shifts=[] # shifts from ideal position
self.smdiff_ampl=[] # negintensity - posintensity
self.smdiff_widths=[] # ev diff between negpeak and pospeak
for index, row in match.iterrows():
self.elems_smdiff.append(row.PeakID)
self.smdiff_shifts.append(row.Shift)
self.smdiff_ampl.append(row.Amplitude)
self.smdiff_widths.append(row.Peakwidth)
def get_elems_integ(self):
''' Pull existing quant results from integ log file (if present) '''
pass
def savecsv():
''' Save any changes to underlying csv file '''
class AESdataset():
''' loads all dataframes with Auger parameters from current project folder '''
def __init__(self, *args, **kwargs):
self.path = filedialog.askdirectory()
# open files
self.open_main_files()
self.filelist=np.ndarray.tolist(self.Augerparamlog.Filenumber.unique())
self.numfiles=len(self.Augerparamlog)
print(str(self.numfiles),' loaded from EDXdataset.')
def open_main_files(self):
''' Auto loads Auger param files from working directory including
EDXparalog- assorted params associated w/ each SEM-EDX or TEM-EDX emsa file
Backfitparamslog - ranges and parameters for EDX background fits
Integquantlog - subtracted and corrected counts for chosen elements
Peakfitlog - params of gaussian fits to each element (xc, width, peakarea, Y0, rsquared)'''
if os.path.exists('Augerparamlog.csv'):
self.Augerparamlog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
self.spelist=self.Augerparamlog[pd.notnull(self.Augerparamlog['Areas'])]
else:
self.Augerparamlog=pd.DataFrame()
self.spelist=pd.DataFrame()
if os.path.exists('Smdifpeakslog.csv'):
self.Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
else:
self.Smdifpeakslog=pd.DataFrame()
if os.path.exists('Backfitlog.csv'):
self.Backfitlog=pd.read_csv('Backfitlog.csv', encoding='cp437')
else:
self.Backfitlog=pd.DataFrame()
if os.path.exists('Integquantlog.csv'):
self.Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
else:
self.Integquantlog=pd.DataFrame()
# Print TEM or SEM to console based on beam kV
try:
self.AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv', encoding='utf-8')
except:
self.AESquantparams=pd.DataFrame()
| 42.98374 | 146 | 0.651409 |
aceeb4813e004273d27858b5815adfda4b4552da | 756 | py | Python | users/views.py | jwinternet/learning_log | 5de0ba9c0d1a767b54afc3338570f85e8479880d | [
"MIT"
] | null | null | null | users/views.py | jwinternet/learning_log | 5de0ba9c0d1a767b54afc3338570f85e8479880d | [
"MIT"
] | null | null | null | users/views.py | jwinternet/learning_log | 5de0ba9c0d1a767b54afc3338570f85e8479880d | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
def register(request):
"""Register a new user."""
if request.method != 'POST':
# Display blank registration form.
form = UserCreationForm()
else:
# Process completed form.
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
# Log the user in and then redirect to home page.
login(request, new_user)
return redirect('learning_logs:index')
# Display a blank or invalid form.
context = {'form': form}
return render(request, 'registration/register.html', context) | 32.869565 | 65 | 0.646825 |
aceeb566166547e2ecf89aec42eae9ecd9e0c2b6 | 4,860 | py | Python | langate/portal/views.py | InsaLan/langate2000 | ad0d971f46c85887a24cd0fa614e9c729596b531 | [
"Apache-2.0"
] | 3 | 2020-05-15T19:29:03.000Z | 2021-05-25T13:26:48.000Z | langate/portal/views.py | InsaLan/langate2000 | ad0d971f46c85887a24cd0fa614e9c729596b531 | [
"Apache-2.0"
] | 17 | 2020-02-10T20:01:02.000Z | 2022-02-10T10:37:21.000Z | langate/portal/views.py | Insalan-EquipeTechnique/langate2000 | 55c64c10ac83130b5dba3c873d81fba12a889c12 | [
"Apache-2.0"
] | 2 | 2019-10-08T20:08:51.000Z | 2020-07-31T22:47:42.000Z | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.conf import settings
from .models import *
from modules import netcontrol
# Create your views here.
@staff_member_required
def announces(request):
context = {"page_name": "management_announces"}
return render(request, 'portal/management_announces.html', context)
@staff_member_required
def whitelist(request):
context = {"page_name": "management_whitelist"}
return render(request, 'portal/management_whitelist.html', context)
@staff_member_required
def management(request):
context = {"page_name": "management"}
return render(request, 'portal/management.html', context)
@staff_member_required
def devices(request):
context = {"page_name": "management_devices"}
return render(request, 'portal/management_devices.html', context)
@login_required
def connected(request):
user_devices = Device.objects.filter(user=request.user)
client_ip = request.META.get('HTTP_X_FORWARDED_FOR')
context = {"page_name": "connected",
"too_many_devices": False,
"current_ip": client_ip,
"is_announce_panel_visible": Announces.objects.filter(visible=True).count() > 0,
"pinned_announces": Announces.objects.filter(pinned=True).order_by('-last_update_date'),
"announces": Announces.objects.filter(pinned=False).order_by('-last_update_date'),
"device_quota": request.user.profile.max_device_nb}
# Checking if the device accessing the gate is already in user devices
if not user_devices.filter(ip=client_ip).exists():
#client_mac = network.get_mac(client_ip)
r = netcontrol.query("get_mac", { "ip": client_ip })
client_mac = r["mac"]
if Device.objects.filter(mac=client_mac).count() > 0:
# If the device MAC is already registered on the network but with a different IP,
# * If the registered device is owned by the requesting user, we change the IP of the registered device.
# * If the registered device is owned by another user, we delete the old device and we register the new one.
# This could happen if the DHCP has changed the IP of the client.
# The following should never raise a MultipleObjectsReturned exception
# because it would mean that there are more than one devices
# already registered with the same MAC.
dev = Device.objects.get(mac=client_mac)
if request.user != dev.user:
dev.delete()
new_dev = Device(user=request.user, ip=client_ip)
new_dev.save()
else:
dev.ip = client_ip # We edit the IP to reflect the change.
dev.save()
elif len(user_devices) >= request.user.profile.max_device_nb:
# If user has too much devices already registered, then we can't connect the device to the internet.
# We will let him choose to remove one of them.
context["too_many_devices"] = True
else:
# We can add the client device to the user devices.
# See the networking functions in the receivers in portal/models.py
dev = Device(user=request.user, ip=client_ip)
dev.save()
# TODO: What shall we do if an user attempts to connect with a device that has the same IP
# that another device already registered (ie in the Device array) but from a different user account ?
# We could either kick out the already registered user from the network or refuse the connection of
# the device that attempts to connect.
return render(request, 'portal/connected.html', context)
@login_required
def disconnect(request):
user_devices = Device.objects.filter(user=request.user)
client_ip = request.META.get('HTTP_X_FORWARDED_FOR')
if user_devices.filter(ip=client_ip).exists():
# When the user decides to disconnect from the portal from a device,
# we remove the Device from the array (if it still exists) and then we log the user out.
user_devices.filter(ip=client_ip).delete()
logout(request)
return redirect(settings.LOGIN_URL)
def faq(request):
context = {
"page_name": "faq",
"is_announce_panel_visible": Announces.objects.filter(visible=True).count() > 0,
"pinned_announces": Announces.objects.filter(pinned=True).order_by('-last_update_date'),
"announces": Announces.objects.filter(pinned=False).order_by('-last_update_date'),
}
return render(request, 'portal/faq.html', context)
| 37.674419 | 120 | 0.686214 |
aceeb63173bb511eed43ff9ad109ad2a217a1c8b | 1,151 | py | Python | MODAK/setup.py | mihaTrajbaric/application-optimisation | 45767c8b10c18645f71d96e275165c68c2479117 | [
"Apache-2.0"
] | null | null | null | MODAK/setup.py | mihaTrajbaric/application-optimisation | 45767c8b10c18645f71d96e275165c68c2479117 | [
"Apache-2.0"
] | null | null | null | MODAK/setup.py | mihaTrajbaric/application-optimisation | 45767c8b10c18645f71d96e275165c68c2479117 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="MODAK",
version="0.1.0",
packages=find_packages(),
include_package_data=True,
install_requires=[
"pydantic[email]",
"Jinja2",
"fastapi",
"uvicorn",
"sqlalchemy[asyncio,aiosqlite]",
"rich",
"requests",
"archspec",
"typing_extensions",
"httpx",
"aiocache",
"python-jose[cryptography]",
"loguru",
],
extras_require={
"testing": [
"pytest",
"pytest-cov",
"sqlalchemy[mypy]",
"pytest-console-scripts",
],
"docs": [
"sphinx",
"autodoc_pydantic",
"sphinx-rtd-theme",
"myst_parser",
"autoapi",
],
},
entry_points={
"console_scripts": [
"modak = MODAK.cli:modak",
"modak-validate-json = MODAK.cli:validate_json",
"modak-schema = MODAK.cli:schema",
"modak-import-script = MODAK.cli:import_script",
"modak-dbshell = MODAK.cli:dbshell",
],
},
)
| 23.979167 | 60 | 0.487402 |
aceeb6a3e6fdf52e325767b0e0717f4cc8031595 | 1,291 | py | Python | 9term/fipt/P2PLending/partnership/signals.py | nik-sergeson/bsuir-informatics-labs | 14805fb83b8e2324580b6253158565068595e804 | [
"Apache-2.0"
] | null | null | null | 9term/fipt/P2PLending/partnership/signals.py | nik-sergeson/bsuir-informatics-labs | 14805fb83b8e2324580b6253158565068595e804 | [
"Apache-2.0"
] | null | null | null | 9term/fipt/P2PLending/partnership/signals.py | nik-sergeson/bsuir-informatics-labs | 14805fb83b8e2324580b6253158565068595e804 | [
"Apache-2.0"
] | null | null | null | from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import receiver
from P2PLending.lending.models import MoneyProposalAd, MoneyRequestAd
from P2PLending.lending.signals import proposal_closed, request_closed
from P2PLending.partnership.models import MoneyRequestPartnershipSuggestion, MoneyProposalPartnershipSuggestion
@receiver(post_save, sender=MoneyRequestPartnershipSuggestion)
def validate_request_suggestion(sender, instance, created, **kwargs):
if created:
instance.validate()
@receiver(post_save, sender=MoneyProposalPartnershipSuggestion)
def validate_proposal_suggestion(sender, instance, created, **kwargs):
if created:
instance.validate()
@receiver(proposal_closed, sender=MoneyProposalAd)
def proposal_close_listener(sender, proposal, **kwargs):
with transaction.atomic():
for req_suggestion in MoneyRequestPartnershipSuggestion.objects.filter(from_proposal=proposal):
req_suggestion.cancel()
@receiver(request_closed, sender=MoneyRequestAd)
def request_close_listener(sender, request, **kwargs):
with transaction.atomic():
for prop_suggestion in MoneyProposalPartnershipSuggestion.objects.filter(from_request=request):
prop_suggestion.cancel()
| 37.970588 | 111 | 0.804802 |
aceeb6b81d425efe7acdf17357027dd3b9885861 | 11,223 | py | Python | graph_objs/waterfall/_textfont.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/waterfall/_textfont.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/waterfall/_textfont.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "waterfall"
_path_str = "waterfall.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.new_plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the font used for `text`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.waterfall.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.waterfall.Textfont
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.waterfall.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.906344 | 82 | 0.558763 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.