hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79508ef0a1803755e122c9c087115570ad3b8146
| 4,236
|
py
|
Python
|
autosklearn/pipeline/components/feature_preprocessing/liblinear_svc_preprocessor.py
|
FelixNeutatz/auto-sklearn
|
b5d141603332041475ed746aa1640334f5561aea
|
[
"BSD-3-Clause"
] | 2
|
2020-02-22T15:00:49.000Z
|
2020-06-28T08:20:19.000Z
|
autosklearn/pipeline/components/feature_preprocessing/liblinear_svc_preprocessor.py
|
FelixNeutatz/auto-sklearn
|
b5d141603332041475ed746aa1640334f5561aea
|
[
"BSD-3-Clause"
] | null | null | null |
autosklearn/pipeline/components/feature_preprocessing/liblinear_svc_preprocessor.py
|
FelixNeutatz/auto-sklearn
|
b5d141603332041475ed746aa1640334f5561aea
|
[
"BSD-3-Clause"
] | null | null | null |
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
CategoricalHyperparameter, Constant
from ConfigSpace.forbidden import ForbiddenEqualsClause, \
ForbiddenAndConjunction
from autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm
from autosklearn.pipeline.constants import SPARSE, DENSE, UNSIGNED_DATA, INPUT
from autosklearn.util.common import check_for_bool, check_none
class LibLinear_Preprocessor(AutoSklearnPreprocessingAlgorithm):
# Liblinear is not deterministic as it uses a RNG inside
def __init__(self, penalty, loss, dual, tol, C, multi_class,
fit_intercept, intercept_scaling, class_weight=None,
random_state=None):
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.preprocessor = None
def fit(self, X, Y):
import sklearn.svm
from sklearn.feature_selection import SelectFromModel
self.C = float(self.C)
self.tol = float(self.tol)
self.dual = check_for_bool(self.dual)
self.fit_intercept = check_for_bool(self.fit_intercept)
self.intercept_scaling = float(self.intercept_scaling)
if check_none(self.class_weight):
self.class_weight = None
estimator = sklearn.svm.LinearSVC(penalty=self.penalty,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
class_weight=self.class_weight,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
multi_class=self.multi_class,
random_state=self.random_state)
estimator.fit(X, Y)
self.preprocessor = SelectFromModel(estimator=estimator,
threshold='mean',
prefit=True)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'LinearSVC Preprocessor',
'name': 'Liblinear Support Vector Classification Preprocessing',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': False,
'input': (SPARSE, DENSE, UNSIGNED_DATA),
'output': (INPUT,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
penalty = Constant("penalty", "l1")
loss = CategoricalHyperparameter(
"loss", ["hinge", "squared_hinge"], default_value="squared_hinge")
dual = Constant("dual", "False")
# This is set ad-hoc
tol = UniformFloatHyperparameter("tol", 1e-5, 1e-1, default_value=1e-4, log=True)
C = UniformFloatHyperparameter("C", 0.03125, 32768, log=True, default_value=1.0)
multi_class = Constant("multi_class", "ovr")
# These are set ad-hoc
fit_intercept = Constant("fit_intercept", "True")
intercept_scaling = Constant("intercept_scaling", 1)
cs.add_hyperparameters([penalty, loss, dual, tol, C, multi_class,
fit_intercept, intercept_scaling])
penalty_and_loss = ForbiddenAndConjunction(
ForbiddenEqualsClause(penalty, "l1"),
ForbiddenEqualsClause(loss, "hinge")
)
cs.add_forbidden_clause(penalty_and_loss)
return cs
| 41.940594
| 89
| 0.602927
|
79508f1b47d46c1b8e2ea3b1a415c1c3131e0cc8
| 7,207
|
py
|
Python
|
acme/examples/http01_example.py
|
RConn12/certbot
|
34b568f36648dd5c4103c3a444e81b5662e6be81
|
[
"Apache-2.0"
] | 4
|
2020-04-04T19:08:35.000Z
|
2020-07-03T04:57:18.000Z
|
acme/examples/http01_example.py
|
RConn12/certbot
|
34b568f36648dd5c4103c3a444e81b5662e6be81
|
[
"Apache-2.0"
] | null | null | null |
acme/examples/http01_example.py
|
RConn12/certbot
|
34b568f36648dd5c4103c3a444e81b5662e6be81
|
[
"Apache-2.0"
] | 1
|
2019-12-29T16:34:20.000Z
|
2019-12-29T16:34:20.000Z
|
"""Example ACME-V2 API for HTTP-01 challenge.
Brief:
This a complete usage example of the python-acme API.
Limitations of this example:
- Works for only one Domain name
- Performs only HTTP-01 challenge
- Uses ACME-v2
Workflow:
(Account creation)
- Create account key
- Register account and accept TOS
(Certificate actions)
- Select HTTP-01 within offered challenges by the CA server
- Set up http challenge resource
- Set up standalone web server
- Create domain private key and CSR
- Issue certificate
- Renew certificate
- Revoke certificate
(Account update actions)
- Change contact information
- Deactivate Account
"""
from contextlib import contextmanager
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import OpenSSL
from acme import challenges
from acme import client
from acme import crypto_util
from acme import errors
from acme import messages
from acme import standalone
import josepy as jose
# Constants:
# This is the staging point for ACME-V2 within Let's Encrypt.
DIRECTORY_URL = 'https://acme-staging-v02.api.letsencrypt.org/directory'
USER_AGENT = 'python-acme-example'
# Account key size
ACC_KEY_BITS = 2048
# Certificate private key size
CERT_PKEY_BITS = 2048
# Domain name for the certificate.
DOMAIN = 'client.example.com'
# If you are running Boulder locally, it is possible to configure any port
# number to execute the challenge, but real CA servers will always use port
# 80, as described in the ACME specification.
PORT = 80
# Useful methods and classes:
def new_csr_comp(domain_name, pkey_pem=None):
"""Create certificate signing request."""
if pkey_pem is None:
# Create private key.
pkey = OpenSSL.crypto.PKey()
pkey.generate_key(OpenSSL.crypto.TYPE_RSA, CERT_PKEY_BITS)
pkey_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
pkey)
csr_pem = crypto_util.make_csr(pkey_pem, [domain_name])
return pkey_pem, csr_pem
def select_http01_chall(orderr):
"""Extract authorization resource from within order resource."""
# Authorization Resource: authz.
# This object holds the offered challenges by the server and their status.
authz_list = orderr.authorizations
for authz in authz_list:
# Choosing challenge.
# authz.body.challenges is a set of ChallengeBody objects.
for i in authz.body.challenges:
# Find the supported challenge.
if isinstance(i.chall, challenges.HTTP01):
return i
raise Exception('HTTP-01 challenge was not offered by the CA server.')
@contextmanager
def challenge_server(http_01_resources):
"""Manage standalone server set up and shutdown."""
# Setting up a fake server that binds at PORT and any address.
address = ('', PORT)
try:
servers = standalone.HTTP01DualNetworkedServers(address,
http_01_resources)
# Start client standalone web server.
servers.serve_forever()
yield servers
finally:
# Shutdown client web server and unbind from PORT
servers.shutdown_and_server_close()
def perform_http01(client_acme, challb, orderr):
"""Set up standalone webserver and perform HTTP-01 challenge."""
response, validation = challb.response_and_validation(client_acme.net.key)
resource = standalone.HTTP01RequestHandler.HTTP01Resource(
chall=challb.chall, response=response, validation=validation)
with challenge_server({resource}):
# Let the CA server know that we are ready for the challenge.
client_acme.answer_challenge(challb, response)
# Wait for challenge status and then issue a certificate.
# It is possible to set a deadline time.
finalized_orderr = client_acme.poll_and_finalize(orderr)
return finalized_orderr.fullchain_pem
# Main examples:
def example_http():
"""This example executes the whole process of fulfilling a HTTP-01
challenge for one specific domain.
The workflow consists of:
(Account creation)
- Create account key
- Register account and accept TOS
(Certificate actions)
- Select HTTP-01 within offered challenges by the CA server
- Set up http challenge resource
- Set up standalone web server
- Create domain private key and CSR
- Issue certificate
- Renew certificate
- Revoke certificate
(Account update actions)
- Change contact information
- Deactivate Account
"""
# Create account key
acc_key = jose.JWKRSA(
key=rsa.generate_private_key(public_exponent=65537,
key_size=ACC_KEY_BITS,
backend=default_backend()))
# Register account and accept TOS
net = client.ClientNetwork(acc_key, user_agent=USER_AGENT)
directory = messages.Directory.from_json(net.get(DIRECTORY_URL).json())
client_acme = client.ClientV2(directory, net=net)
# Terms of Service URL is in client_acme.directory.meta.terms_of_service
# Registration Resource: regr
# Creates account with contact information.
email = ('fake@example.com')
regr = client_acme.new_account(
messages.NewRegistration.from_data(
email=email, terms_of_service_agreed=True))
# Create domain private key and CSR
pkey_pem, csr_pem = new_csr_comp(DOMAIN)
# Issue certificate
orderr = client_acme.new_order(csr_pem)
# Select HTTP-01 within offered challenges by the CA server
challb = select_http01_chall(orderr)
# The certificate is ready to be used in the variable "fullchain_pem".
fullchain_pem = perform_http01(client_acme, challb, orderr)
# Renew certificate
_, csr_pem = new_csr_comp(DOMAIN, pkey_pem)
orderr = client_acme.new_order(csr_pem)
challb = select_http01_chall(orderr)
# Performing challenge
fullchain_pem = perform_http01(client_acme, challb, orderr)
# Revoke certificate
fullchain_com = jose.ComparableX509(
OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, fullchain_pem))
try:
client_acme.revoke(fullchain_com, 0) # revocation reason = 0
except errors.ConflictError:
# Certificate already revoked.
pass
# Query registration status.
client_acme.net.account = regr
try:
regr = client_acme.query_registration(regr)
except errors.Error as err:
if err.typ == messages.OLD_ERROR_PREFIX + 'unauthorized' \
or err.typ == messages.ERROR_PREFIX + 'unauthorized':
# Status is deactivated.
pass
raise
# Change contact information
email = 'newfake@example.com'
regr = client_acme.update_registration(
regr.update(
body=regr.body.update(
contact=('mailto:' + email,)
)
)
)
# Deactivate account/registration
regr = client_acme.deactivate_registration(regr)
if __name__ == "__main__":
example_http()
| 29.904564
| 78
| 0.690301
|
7950906957146e5ba74b6d0bf78825decdfc67c5
| 981
|
py
|
Python
|
cranserver/contrib/s3.py
|
darshandoshi95/cran-server
|
6b1e13dee9c19d8898eb57368a159c77e70d9430
|
[
"BSD-3-Clause"
] | null | null | null |
cranserver/contrib/s3.py
|
darshandoshi95/cran-server
|
6b1e13dee9c19d8898eb57368a159c77e70d9430
|
[
"BSD-3-Clause"
] | null | null | null |
cranserver/contrib/s3.py
|
darshandoshi95/cran-server
|
6b1e13dee9c19d8898eb57368a159c77e70d9430
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import io
from contextlib import contextmanager
import boto3
import botocore
from lib.storage import Storage
from lib.package import Package
class S3Storage(Storage):
def __init__(self, bucket_loc=None):
s3 = boto3.resource('s3')
self._bucket_loc = bucket_loc or os.getenv('DEFAULT_BUCKET')
self.bucket = s3.Bucket(self._bucket_loc)
def __iter__(self):
prefix = '/src/contrib'
objs = self.bucket.objects.filter(Prefix=prefix)
for el in objs:
yield el.key
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, pkg_id):
target = io.StringIO()
self.bucket.download_fileobj(pkg_id, target)
return target
def __setitem__(self, pkg_id, fobj):
self.bucket.upload_fileobj(fobj, pkg_id)
def __delitem__(self, pkg_id):
pass
if __name__ == '__main__':
s = S3Storage()
for p in s:
print(p)
| 22.295455
| 68
| 0.641182
|
79509095eed237e4537999be8cf321333998faa1
| 1,988
|
py
|
Python
|
gammapy/utils/scripts_test.py
|
Jaleleddine/gammapy
|
de9195df40fa5bbf8840cda4e7cd5e8cc5eaadbb
|
[
"BSD-3-Clause"
] | 1
|
2021-02-02T21:35:27.000Z
|
2021-02-02T21:35:27.000Z
|
gammapy/utils/scripts_test.py
|
kabartay/gammapy
|
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
|
[
"BSD-3-Clause"
] | 1
|
2017-02-22T23:12:30.000Z
|
2017-02-22T23:12:30.000Z
|
gammapy/utils/scripts_test.py
|
kabartay/gammapy
|
015206d2418b1d254f1c9d3ea819ab0c5ece99e9
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test if Jupyter notebooks work."""
import logging
import os
import subprocess
import sys
from pathlib import Path
import pkg_resources
import yaml
log = logging.getLogger(__name__)
def get_scripts():
"""Read `scripts.yaml` info."""
path = Path("examples") / "scripts.yaml"
with path.open() as fh:
return yaml.safe_load(fh)
def requirement_missing(script):
"""Check if one of the requirements is missing."""
if "requires" in script:
if script["requires"] is None:
return False
for package in script["requires"].split():
try:
pkg_resources.working_set.require(package)
except Exception:
return True
return False
def script_test(path):
"""Check if example Python script is broken."""
log.info(f" ... EXECUTING {path}")
cmd = [sys.executable, str(path)]
cp = subprocess.run(cmd, stderr=subprocess.PIPE)
if cp.returncode:
log.info(" ... FAILED")
log.info(" ___ TRACEBACK")
log.info(cp.stderr.decode("utf-8") + "\n\n")
return False
else:
log.info(" ... PASSED")
return True
def main():
logging.basicConfig(level=logging.INFO)
if "GAMMAPY_DATA" not in os.environ:
log.info("GAMMAPY_DATA environment variable not set.")
log.info("Running scripts tests requires this environment variable.")
log.info("Exiting now.")
sys.exit()
passed = True
for script in get_scripts():
if requirement_missing(script):
log.info(f"Skipping script (missing requirement): {script['name']}")
continue
filename = script["name"] + ".py"
path = Path("examples") / filename
if not script_test(path):
passed = False
if not passed:
sys.exit("Some tests failed. Existing now.")
if __name__ == "__main__":
main()
| 25.487179
| 80
| 0.611167
|
795090bc4ec0f2eb40098715a83b48e641cc7214
| 13,297
|
py
|
Python
|
oci_tools/training_tools.py
|
AnykeyNL/oci-tools
|
bcf24b26693a123b988f645210e6726f3897b81b
|
[
"Apache-2.0"
] | 1
|
2019-06-27T04:17:37.000Z
|
2019-06-27T04:17:37.000Z
|
oci_tools/training_tools.py
|
AnykeyNL/oci-tools
|
bcf24b26693a123b988f645210e6726f3897b81b
|
[
"Apache-2.0"
] | null | null | null |
oci_tools/training_tools.py
|
AnykeyNL/oci-tools
|
bcf24b26693a123b988f645210e6726f3897b81b
|
[
"Apache-2.0"
] | 1
|
2021-11-22T19:39:24.000Z
|
2021-11-22T19:39:24.000Z
|
from pprint import pformat
from .oci_resources import *
from oci_tools import RESOURCE as R
from oci_tools import REGIONS
from oci.exceptions import ServiceError
compute_client: oci.core.ComputeClient = None
network_client: oci.core.VirtualNetworkClient = None
bv_client: oci.core.BlockstorageClient = None
identity_client: oci.identity.IdentityClient = None
lb_client: oci.load_balancer.LoadBalancerClient = None
db_client: oci.database.DatabaseClient = None
def _init_api_client(conf: OCIConfig):
global compute_client
global network_client
global bv_client
global lb_client
global db_client
lb_client = oci.load_balancer.LoadBalancerClient(conf.config)
network_client = oci.core.VirtualNetworkClient(conf.config)
compute_client = oci.core.ComputeClient(conf.config)
bv_client = oci.core.BlockstorageClient(conf.config)
db_client = oci.database.DatabaseClient(conf.config)
def run(config: OCIConfig):
get_regions(config)
scan_tenancy(config)
# currently cleanup and terminate-all are equivalent
if config.operation == 'terminate-all':
cleanup(config)
elif config.operation == 'cleanup':
cleanup(config)
def scan_tenancy(config: OCIConfig):
"""
Scan the tenancy by compartments
:param config: OCIConfig object
"""
compartment_list(config)
resource_list(config)
logging.info('{}'.format(pformat(config.compartments_tree)))
def cleanup(config: OCIConfig, force=False):
"""
Clean up operations
TODO: currently the cleanup operation follow the compartment tree. It should take in consideration the dependency tree
:param config: OCIConfig object
:param force: terminate also the top level compartment
"""
for r in config.compartments_tree.keys():
# logging.info(r)
config.workon_region = r
logging.info("Clean-up resources in {} region".format(r))
for tree in config.compartments_tree[r]:
tree.cleanup(config=config, force=force)
def get_regions(conf: OCIConfig):
"""
discover subscribed regions and home region.
:param conf: OCI configuration
:return:
"""
global identity_client
# loop over the full list of regions as we don't know in advance what are the subscribed regions
for r in REGIONS:
conf.workon_region = r
identity_client = oci.identity.IdentityClient(conf.config)
try:
rs = identity_client.list_region_subscriptions(conf.tenancy)
conf.region_subscriptions = rs.data
break
except ServiceError as se:
continue
logging.info('Home region: {}'.format(conf.home_region))
logging.info('Regions: {}'.format(conf.region_subscriptions))
def compartment_list(conf: OCIConfig):
"""
list all compartments
:param conf: OCIConfig object
"""
region_tree = {}
for r in conf.region_subscriptions:
conf.workon_region = r.region_name
# TODO: implement copy function to avoid scanning compartment for each region
region_tree[r.region_name] = compartment_tree_build(conf)
'''
logging.info('_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-')
logging.info('Compartment tree')
logging.info('Region: {}\n{}'.format(r, pformat(region_tree[r])))
logging.info('_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-')
'''
conf.compartments_tree = region_tree
def compartment_tree_build(conf: OCIConfig):
"""
build a full compartment tree
"""
global identity_client
identity_client = oci.identity.IdentityClient(conf.config)
#get_regions(conf)
tree = []
def _get_nested_resources(api_list_call: identity_client.list_compartments, id: str, tree: []):
elems = oci.pagination.list_call_get_all_results(api_list_call, id,compartment_id_in_subtree=False)
for item in elems.data:
compartment = OciCompartment(item, identity_client)
if (conf.preserve_compartments and compartment.name in conf.preserve_compartments or
(conf.skip_scan_preserved_resources and compartment.check_tags(conf.preserve_tags))):
continue
if not compartment.is_active():
continue
_get_nested_resources(api_list_call, compartment.id, compartment)
tree.append(compartment)
_get_nested_resources(identity_client.list_compartments, conf.tenancy, tree)
return tree
def resource_list(conf: OCIConfig):
"""
recursively visit all compartments in all regions and retrieve resources
:param conf: OCIConfig object
"""
def _retrieve_resources_in_compartment(tree, region, traverse_level=1, scan_resources=False):
logging.info('{} {}'.format('__'*traverse_level, tree['name']))
items = tree.get(R.COMPARTMENT)
for nested_item in [] if not items else items:
traverse_level += 1
scan = scan_resources or not bool(conf.compartment_filter) or nested_item.name in conf.compartment_filter
_retrieve_resources_in_compartment(nested_item, region, traverse_level, scan_resources=scan)
traverse_level -= 1
if scan_resources:
_get_network_resources(tree, conf)
_get_bv_resources(tree, conf)
_get_instance_resources(tree, conf)
_get_lb_resources(tree, conf)
_get_db_resources(tree, conf)
_get_autonomous_resources(tree, conf)
for r in conf.compartments_tree.keys():
# logging.info(r)
conf.workon_region = r
logging.info("Resource discovery - visit compartments in {} region".format(r))
_init_api_client(conf)
# bv_client.list_volumes('').data
for tree in conf.compartments_tree[r]:
scan = not bool(conf.compartment_filter) or tree.name in conf.compartment_filter
_retrieve_resources_in_compartment(tree, r, scan_resources=scan)
def _get_instance_resources(tree: OciResource, conf: OCIConfig):
"""
retrieve instances and vnics
:param tree: compartment subtree
"""
ilist = oci.pagination.list_call_get_all_results(compute_client.list_instances, compartment_id=tree['id'])
def _get_nested_resources(api_list_call, res: OciResource):
try:
rlist = oci.pagination.list_call_get_all_results(api_list_call,
compartment_id=tree['id'],
instance_id=i.id)
for r in rlist.data:
res_obj = res(r, compute_client)
if conf.skip_scan_preserved_resources and res_obj.check_tags(conf.preserve_tags):
continue
if not res_obj or not res_obj.is_active():
continue
res_obj.append(res_obj)
# vcn dependency tree for clean-up operation
if isinstance(res_obj, OciVnicAttachment):
# if primary vnic the dependency is on the instance as I can't detach the primary vnic
# else is just the vnic-attachment
vnic_id= res_obj.resource.vnic_id
vnic = network_client.get_vnic(vnic_id)
if vnic.data.is_primary:
OciResource.set_dependency(r.subnet_id, instance)
else:
OciResource.set_dependency(r.subnet_id, res_obj)
except Exception as e:
logging.error('unable to retrieve {} Instance {}'.format(res.resource_type, i.id))
for i in ilist.data:
instance = OciInstance(i, compute_client)
if not instance.is_active():
continue
_get_nested_resources(compute_client.list_vnic_attachments, OciVnicAttachment)
tree.append(instance)
def _get_network_resources(tree, conf: OCIConfig):
"""
retrieve: vcn, subnet, gateways, secury list, route tables
:param tree: compartment subtree
"""
ilist = oci.pagination.list_call_get_all_results(network_client.list_vcns, compartment_id=tree['id'])
def _get_nested_resources(api_list_call, res: OciResource, **kwargs):
try:
if 'vcn_id' in kwargs:
rlist = oci.pagination.list_call_get_all_results(api_list_call,
compartment_id=tree['id'],
vcn_id=kwargs.get('vcn_id'))
else:
rlist = oci.pagination.list_call_get_all_results(api_list_call,
compartment_id=tree['id'])
if not rlist.data:
return None
for r in rlist.data or []:
res_obj = res(r, network_client)
if conf.skip_scan_preserved_resources and res_obj.check_tags(conf.preserve_tags):
continue
if not res_obj or not res_obj.is_active():
continue
return res_obj
except oci.exceptions.ServiceError as se:
logging.error('unable to retrieve {} VCN {}'.format(res.resource_type, vcn.id))
return None
for i in ilist.data:
vcn = OciVcn(i, network_client)
vcn.append(_get_nested_resources(network_client.list_subnets, OciSubnet, vcn_id=vcn.id))
vcn.append(_get_nested_resources(network_client.list_internet_gateways, OciInternetGw, vcn_id=vcn.id))
vcn.append(_get_nested_resources(network_client.list_nat_gateways, OciNatGw, vcn_id=vcn.id))
vcn.append(_get_nested_resources(network_client.list_security_lists, OciSecurityList, vcn_id=vcn.id))
vcn.append(_get_nested_resources(network_client.list_route_tables, OciRouteTable, vcn_id=vcn.id))
vcn.append(_get_nested_resources(network_client.list_local_peering_gateways, OciLocalPeeringGw, vcn_id=vcn.id))
vcn.append(_get_nested_resources(network_client.list_service_gateways, OciServiceGw, vcn_id=vcn.id))
tree.append(vcn)
tree.append(_get_nested_resources(network_client.list_drgs, OciDRG))
tree.append(_get_nested_resources(network_client.list_cpes, OciCPE))
tree.append(_get_nested_resources(network_client.list_drg_attachments, OciDRGAttachment))
tree.append(_get_nested_resources(network_client.list_remote_peering_connections, OciRPC))
tree.append(_get_nested_resources(network_client.list_ip_sec_connections, OciVPN))
def _get_bv_resources(tree, conf: OCIConfig):
"""
retrieve block volumes
:param tree: compartment subtree
"""
try:
ilist = oci.pagination.list_call_get_all_results(bv_client.list_volumes, compartment_id=tree['id'])
for i in ilist.data:
res_obj = OciBlockVolume(i, bv_client)
if (conf.skip_scan_preserved_resources and res_obj.check_tags(
conf.preserve_tags)) or not res_obj.is_active():
continue
tree.append(res_obj)
except Exception as e:
logging.error('error while retrieving Block Volume resources')
def _get_lb_resources(tree, conf: OCIConfig):
"""
retrieve: lb resources
:param tree: compartment subtree
"""
ilist = oci.pagination.list_call_get_all_results(lb_client.list_load_balancers, compartment_id=tree['id'])
for i in ilist.data:
res_obj = OciLoadBalancer(i, lb_client)
if (conf.skip_scan_preserved_resources and res_obj.check_tags(conf.preserve_tags)) or not res_obj.is_active():
continue
tree.append(res_obj)
def _get_db_resources(tree, conf: OCIConfig):
"""
retrieve: db_system resources
:param tree: compartment subtree
"""
ilist = oci.pagination.list_call_get_all_results(db_client.list_db_systems, compartment_id=tree['id'])
for i in ilist.data:
res_obj = OciDbSystem(i, db_client)
if (conf.skip_scan_preserved_resources and res_obj.check_tags(conf.preserve_tags)) or not res_obj.is_active():
continue
dbhomes = db_client.list_db_homes(tree['id'], res_obj.id)
if dbhomes and dbhomes.data:
for dbh in dbhomes.data:
res_obj.append(OciDBHome(dbh, db_client))
tree.append(res_obj)
ilist = oci.pagination.list_call_get_all_results(db_client.list_backups, compartment_id=tree['id'])
for i in ilist.data:
res_obj = OciDbBackup(i, db_client)
if (conf.skip_scan_preserved_resources and res_obj.check_tags(conf.preserve_tags)) or not res_obj.is_active():
continue
tree.append(res_obj)
def _get_autonomous_resources(tree, conf: OCIConfig):
"""
retrieve: autonomous db resources
:param tree: compartment subtree
"""
ilist = oci.pagination.list_call_get_all_results(db_client.list_autonomous_databases, compartment_id=tree['id'])
for i in ilist.data:
res_obj = OciAutonomousDB(i, db_client)
if (conf.skip_scan_preserved_resources and res_obj.check_tags(conf.preserve_tags)) or not res_obj.is_active():
continue
tree.append(res_obj)
| 38.20977
| 122
| 0.665789
|
795090e85980208bf055c01fd07b218b0bb658e2
| 2,520
|
py
|
Python
|
atoman/rendering/renderers/vectorRenderer.py
|
chrisdjscott/Atoman
|
e87ac31bbdcf53bb8f3efdfb109787d604890394
|
[
"MIT"
] | 9
|
2015-11-23T12:13:34.000Z
|
2021-11-18T05:23:35.000Z
|
atoman/rendering/renderers/vectorRenderer.py
|
chrisdjscott/Atoman
|
e87ac31bbdcf53bb8f3efdfb109787d604890394
|
[
"MIT"
] | 1
|
2017-07-17T20:27:50.000Z
|
2017-07-23T05:27:15.000Z
|
atoman/rendering/renderers/vectorRenderer.py
|
chrisdjscott/Atoman
|
e87ac31bbdcf53bb8f3efdfb109787d604890394
|
[
"MIT"
] | 4
|
2015-11-23T12:13:37.000Z
|
2017-05-03T08:24:19.000Z
|
"""
Module for rendering vectors
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import vtk
from . import baseRenderer
from .. import utils
class VectorRenderer(baseRenderer.BaseRenderer):
"""
Render vectors as arrows
"""
def __init__(self):
super(VectorRenderer, self).__init__()
self._logger = logging.getLogger(__name__)
def render(self, pointsData, scalarsArray, vectorsArray, nspecies, colouringOptions, vectorsOptions, lut,
invert=False):
"""
Render vectors.
"""
self._logger.debug("Rendering vectors")
# points
points = vtk.vtkPoints()
points.SetData(pointsData.getVTK())
# polydata
arrowPolyData = vtk.vtkPolyData()
arrowPolyData.SetPoints(points)
arrowPolyData.GetPointData().SetScalars(scalarsArray.getVTK())
arrowPolyData.GetPointData().SetVectors(vectorsArray.getVTK())
# arrow source
arrowSource = vtk.vtkArrowSource()
arrowSource.SetShaftResolution(vectorsOptions.vectorResolution)
arrowSource.SetTipResolution(vectorsOptions.vectorResolution)
if invert:
arrowSource.InvertOn()
arrowSource.Update()
# glyph mapper
arrowGlyph = vtk.vtkGlyph3DMapper()
arrowGlyph.OrientOn()
if vtk.vtkVersion.GetVTKMajorVersion() <= 5:
arrowGlyph.SetInputConnection(arrowPolyData.GetProducerPort())
else:
arrowGlyph.SetInputData(arrowPolyData)
arrowGlyph.SetSourceConnection(arrowSource.GetOutputPort())
arrowGlyph.SetScaleModeToScaleByMagnitude()
arrowGlyph.SetScaleArray("vectors")
arrowGlyph.SetScalarModeToUsePointFieldData()
arrowGlyph.SelectColorArray("colours")
arrowGlyph.SetScaleFactor(vectorsOptions.vectorScaleFactor)
arrowMapper = arrowGlyph
arrowMapper.SetLookupTable(lut)
utils.setMapperScalarRange(arrowMapper, colouringOptions, nspecies)
# actor
arrowActor = vtk.vtkActor()
arrowActor.SetMapper(arrowMapper)
# store attributes
self._actor = utils.ActorObject(arrowActor)
self._data["Points"] = pointsData
self._data["Scalars"] = scalarsArray
self._data["Vectors"] = vectorsArray
self._data["LUT"] = lut
self._data["Scale factor"] = vectorsOptions.vectorScaleFactor
| 31.898734
| 109
| 0.661905
|
79509106752ebadaddc25fee8b0b63bef681a60a
| 633
|
py
|
Python
|
examples/flow.py
|
jennranta/pyreaclib
|
bd9210153b0c01c7ce230b43b88f0a5a1e198c0f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/flow.py
|
jennranta/pyreaclib
|
bd9210153b0c01c7ce230b43b88f0a5a1e198c0f
|
[
"BSD-3-Clause"
] | null | null | null |
examples/flow.py
|
jennranta/pyreaclib
|
bd9210153b0c01c7ce230b43b88f0a5a1e198c0f
|
[
"BSD-3-Clause"
] | null | null | null |
import pyreaclib
from pyreaclib.networks import RateCollection, PythonNetwork, Composition
files = ["c12-pg-n13-ls09",
"c13-pg-n14-nacr",
"n13--c13-wc12",
"n13-pg-o14-lg06",
"n14-pg-o15-im05",
"n15-pa-c12-nacr",
"o14--n14-wc12",
"o15--n15-wc12"]
rc = RateCollection(files)
comp = Composition(rc.get_nuclei())
comp.set_all(0.005)
comp.set_nuc("p", 0.7)
comp.set_nuc("he4", 0.28)
comp.normalize()
rho = 2000
T = 2.e8
rr = rc.evaluate_rates(rho, T, comp)
for k, v in rr.items():
print("{}: {}".format(k, v))
rc.plot(outfile="flow.png", rho=rho, T=T, comp=comp)
| 21.1
| 73
| 0.603476
|
79509328ba589824c90315faab9844df456aa3fa
| 1,459
|
py
|
Python
|
pennylane/templates/subroutines/hardware_efficient.py
|
pearcandy/pennylane
|
dfa35989cd0798496e41999a197bcf0eb26185df
|
[
"Apache-2.0"
] | null | null | null |
pennylane/templates/subroutines/hardware_efficient.py
|
pearcandy/pennylane
|
dfa35989cd0798496e41999a197bcf0eb26185df
|
[
"Apache-2.0"
] | null | null | null |
pennylane/templates/subroutines/hardware_efficient.py
|
pearcandy/pennylane
|
dfa35989cd0798496e41999a197bcf0eb26185df
|
[
"Apache-2.0"
] | null | null | null |
'''
hardware_efficient.py
This code is distributed under the constitution of GNU-GPL.
(c) PearCandy
Log of hardware_efficient
2021/01/06 Released by PearCandy
'''
#coding:utf-8
#-------------------------------------------------------------
from pennylane import numpy as np
from pennylane.templates import template #import the decorator
from pennylane.ops import CNOT, RX, RY, RZ, Hadamard, CZ
@template
def HardwareEfficient(weights, wires, depth=1):
for d in range(depth):
for i in range(len(wires)):
RY(weights[2 * i + 2 * len(wires) * d], wires=i)
RZ(weights[2 * i + 1 + 2 * len(wires) * d], wires=i)
for i in range(len(wires) // 2):
CZ(wires=[2 * i, 2 * i + 1])
for i in range(len(wires) // 2 - 1):
CZ(wires=[2 * i + 1, 2 * i + 2])
for i in range(len(wires)):
RY(weights[2 * i + 2 * len(wires) * depth], wires=i)
RZ(weights[2 * i + 1 + 2 * len(wires) * depth], wires=i)
| 45.59375
| 81
| 0.374914
|
7950940db8021dbf1930f2916d2a228372755416
| 1,363
|
py
|
Python
|
behavioral/command.py
|
codingbeautifully/python-patterns
|
5e83c2a085a51019486cc3d167dd6b09ad56770f
|
[
"Apache-2.0"
] | null | null | null |
behavioral/command.py
|
codingbeautifully/python-patterns
|
5e83c2a085a51019486cc3d167dd6b09ad56770f
|
[
"Apache-2.0"
] | null | null | null |
behavioral/command.py
|
codingbeautifully/python-patterns
|
5e83c2a085a51019486cc3d167dd6b09ad56770f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from os.path import lexists
class MoveFileCommand(object):
def __init__(self, src, dest):
self.src = src
self.dest = dest
def execute(self):
self.rename(self.src, self.dest)
def undo(self):
self.rename(self.dest, self.src)
def rename(self, src, dest):
print(u"renaming %s to %s" % (src, dest))
os.rename(src, dest)
def main():
command_stack = []
# commands are just pushed into the command stack
command_stack.append(MoveFileCommand('foo.txt', 'bar.txt'))
command_stack.append(MoveFileCommand('bar.txt', 'baz.txt'))
# verify that none of the target files exist
assert(not lexists("foo.txt"))
assert(not lexists("bar.txt"))
assert(not lexists("baz.txt"))
try:
with open("foo.txt", "w"): # Creating the file
pass
# they can be executed later on
for cmd in command_stack:
cmd.execute()
# and can also be undone at will
for cmd in reversed(command_stack):
cmd.undo()
finally:
os.unlink("foo.txt")
if __name__ == "__main__":
main()
### OUTPUT ###
# renaming foo.txt to bar.txt
# renaming bar.txt to baz.txt
# renaming baz.txt to bar.txt
# renaming bar.txt to foo.txt
| 23.5
| 63
| 0.614087
|
795094429795236c798d5d6665a308e2e881f295
| 17,707
|
py
|
Python
|
pyvoltha/adapters/extensions/omci/tasks/omci_get_request.py
|
willkurk/pyvoltha
|
5be22260ea8d5a1c68bb20222e000f74c0727ef0
|
[
"Apache-2.0"
] | null | null | null |
pyvoltha/adapters/extensions/omci/tasks/omci_get_request.py
|
willkurk/pyvoltha
|
5be22260ea8d5a1c68bb20222e000f74c0727ef0
|
[
"Apache-2.0"
] | null | null | null |
pyvoltha/adapters/extensions/omci/tasks/omci_get_request.py
|
willkurk/pyvoltha
|
5be22260ea8d5a1c68bb20222e000f74c0727ef0
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from task import Task
from twisted.internet import reactor
from twisted.internet.defer import failure, inlineCallbacks, TimeoutError, returnValue
from pyvoltha.adapters.extensions.omci.omci_defs import ReasonCodes, EntityOperations
from pyvoltha.adapters.extensions.omci.omci_me import MEFrame
from pyvoltha.adapters.extensions.omci.omci_frame import OmciFrame, OmciGetNext
from pyvoltha.adapters.extensions.omci.omci_cc import DEFAULT_OMCI_TIMEOUT
from pyvoltha.adapters.extensions.omci.omci_messages import OmciGet
from pyvoltha.adapters.extensions.omci.omci_fields import OmciTableField
RC = ReasonCodes
OP = EntityOperations
class GetException(Exception):
pass
class OmciGetRequest(Task):
"""
OpenOMCI Get an OMCI ME Instance Attributes
Upon completion, the Task deferred callback is invoked with a reference of
this Task object.
The Task has an initializer option (allow_failure) that will retry all
requested attributes if the original request fails with a status code of
9 (Attributes failed or unknown). This result means that an attribute
is not supported by the ONU or that a mandatory/optional attribute could
not be executed by the ONU, even if it is supported, for example,
because of a range or type violation.
"""
task_priority = 128
name = "ONU OMCI Get Task"
MAX_TABLE_SIZE = 16 * 1024 # Keep get-next logic reasonable
def __init__(self, omci_agent, device_id, entity_class, entity_id, attributes,
exclusive=True, allow_failure=False):
"""
Class initialization
:param omci_agent: (OmciAdapterAgent) OMCI Adapter agent
:param device_id: (str) ONU Device ID
:param entity_class: (EntityClass) ME Class to retrieve
:param entity_id: (int) ME Class instance ID to retrieve
:param attributes: (list or set) Name of attributes to retrieve
:param exclusive: (bool) True if this GET request Task exclusively own the
OMCI-CC while running. Default: True
:param allow_failure: (bool) If true, attempt to get all valid attributes
if the original request receives an error
code of 9 (Attributes failed or unknown).
"""
super(OmciGetRequest, self).__init__(OmciGetRequest.name,
omci_agent,
device_id,
priority=OmciGetRequest.task_priority,
exclusive=exclusive)
self._device = omci_agent.get_device(device_id)
self._entity_class = entity_class
self._entity_id = entity_id
self._attributes = attributes
self._allow_failure = allow_failure
self._failed_or_unknown_attributes = set()
self._results = None
self._local_deferred = None
def cancel_deferred(self):
super(OmciGetRequest, self).cancel_deferred()
d, self._local_deferred = self._local_deferred, None
try:
if d is not None and not d.called:
d.cancel()
except:
pass
@property
def me_class(self):
"""The OMCI Managed Entity Class associated with this request"""
return self._entity_class
@property
def entity_id(self):
"""The ME Entity ID associated with this request"""
return self._entity_id
@property
def attributes(self):
"""
Return a dictionary of attributes for the request if the Get was
successfully completed. None otherwise
"""
if self._results is None:
return None
omci_msg = self._results.fields['omci_message'].fields
return omci_msg['data'] if 'data' in omci_msg else None
@property
def success_code(self):
"""
Return the OMCI success/reason code for the Get Response.
"""
if self._results is None:
return None
return self._results.fields['omci_message'].fields['success_code']
@property
def raw_results(self):
"""
Return the raw Get Response OMCIFrame
"""
return self._results
def start(self):
"""
Start MIB Capabilities task
"""
super(OmciGetRequest, self).start()
self._local_deferred = reactor.callLater(0, self.perform_get_omci)
@property
def failed_or_unknown_attributes(self):
"""
Returns a set attributes that failed or unknown in the original get
request that resulted in an initial status code of 9 (Attributes
failed or unknown).
:return: (set of str) attributes
"""
return self._failed_or_unknown_attributes
def is_table_attr(self, attr):
index = self._entity_class.attribute_name_to_index_map[attr]
attr_def = self._entity_class.attributes[index]
return isinstance(attr_def.field, OmciTableField)
@inlineCallbacks
def perform_get_omci(self):
"""
Perform the initial get request
"""
self.log.info('perform-get', entity_class=self._entity_class,
entity_id=self._entity_id, attributes=self._attributes)
try:
# If one or more attributes is a table attribute, get it separately
first_attributes = {attr for attr in self._attributes if not self.is_table_attr(attr)}
table_attributes = {attr for attr in self._attributes if self.is_table_attr(attr)}
if len(first_attributes):
frame = MEFrame(self._entity_class, self._entity_id, first_attributes).get()
self.strobe_watchdog()
results = yield self._device.omci_cc.send(frame)
status = results.fields['omci_message'].fields['success_code']
self.log.debug('perform-get-status', status=status)
if status == RC.AttributeFailure.value:
# What failed? Note if only one attribute was attempted, then
# that is an overall failure
if not self._allow_failure or len(self._attributes) <= 1:
raise GetException('Get failed with status code: {}'.
format(RC.AttributeFailure.value))
self.strobe_watchdog()
# TODO: update failed & unknown attributes set
# Success?
if status in {RC.Success.value, RC.AttributeFailure.value}:
self._results = results
results_omci = results.fields['omci_message'].fields
# Were all attributes fetched?
missing_attr = frame.fields['omci_message'].fields['attributes_mask'] ^ \
results_omci['attributes_mask']
if missing_attr > 0 or len(table_attributes) > 0:
self.log.info('perform-get-missing', num_missing=missing_attr,
table_attr=table_attributes)
self.strobe_watchdog()
self._local_deferred = reactor.callLater(0,
self.perform_get_missing_attributes,
missing_attr, table_attributes)
returnValue(self._local_deferred)
else:
raise GetException('Get failed with status code: {}'.format(status))
self.log.debug('get-completed')
self.deferred.callback(self)
elif len(table_attributes) > 0:
# Here if only table attributes were requested
self.log.info('perform-get-table', table_attr=table_attributes)
self.strobe_watchdog()
self._local_deferred = reactor.callLater(0,
self.process_get_table,
table_attributes)
returnValue(self._local_deferred)
except TimeoutError as e:
self.deferred.errback(failure.Failure(e))
except Exception as e:
self.log.exception('perform-get', e=e, class_id=self._entity_class,
entity_id=self._entity_id, attributes=self._attributes)
self.deferred.errback(failure.Failure(e))
@inlineCallbacks
def perform_get_missing_attributes(self, missing_attr, table_attributes):
"""
This method is called when the original Get requests completes with success
but not all attributes were returned. This can happen if one or more of the
attributes would have exceeded the space available in the OMCI frame or if
one of the attributes is a table.
This routine iterates through the missing attributes and attempts to retrieve
the ones that were missing.
Once missing attributes are recovered, the table attributes are requested
:param missing_attr: (int) Missing attributes bitmask
:param table_attributes: (set) Attributes that need table get/get-next support
"""
self.log.debug('perform-get-missing', attrs=missing_attr, tbl=table_attributes)
# Retrieve missing attributes first (if any)
results_omci = self._results.fields['omci_message'].fields
try:
# Get remaining attributes one at a time
for index in range(15, 1, -1):
attr_mask = 1 << index
if attr_mask & missing_attr:
# Get this attribute
frame = OmciFrame(
transaction_id=None, # OMCI-CC will set
message_type=OmciGet.message_id,
omci_message=OmciGet(
entity_class=self._entity_class.class_id,
entity_id=self._entity_id,
attributes_mask=attr_mask
)
)
self.strobe_watchdog()
get_results = yield self._device.omci_cc.send(frame)
get_omci = get_results.fields['omci_message'].fields
status = get_omci['success_code']
if status == RC.AttributeFailure.value:
# TODO: update failed & unknown attributes set
continue
elif status != RC.Success.value:
raise GetException('Get failed with status code: {}'.format(status))
# assert attr_mask == get_omci['attributes_mask'], 'wrong attribute'
if attr_mask != get_omci['attributes_mask']:
self.log.debug('attr mask does not match expected mask', attr_mask=attr_mask,
expected_mask = get_omci['attributes_mask'])
results_omci['attributes_mask'] |= attr_mask
if results_omci.get('data') is None:
results_omci['data'] = dict()
results_omci['data'].update(get_omci['data'])
except TimeoutError as e:
self.log.debug('missing-timeout')
self.deferred.errback(failure.Failure(e))
except Exception as e:
self.log.exception('missing-failure', class_id=self._entity_class.class_id,
entity_id=self._entity_id, e=e)
self.deferred.errback(failure.Failure(e))
# Now any table attributes
if len(table_attributes):
self.strobe_watchdog()
self._local_deferred = reactor.callLater(0,
self.process_get_table,
table_attributes)
returnValue(self._local_deferred)
self.deferred.callback(self)
@inlineCallbacks
def process_get_table(self, table_attributes):
"""
Special handling for Get Requests that may require additional 'get_next' operations
if a table attribute was requested.
"""
# Retrieve attributes retrieved so far so we can add to them
try:
results_omci = self._results.fields['omci_message'].fields if self._results is not None else {}
for tbl_attr in table_attributes:
attr_mask = self._entity_class.mask_for(tbl_attr)
attr_index = self._entity_class.attribute_indices_from_mask(attr_mask)[0]
frame = OmciFrame(
transaction_id=None, # OMCI-CC will set
message_type=OmciGet.message_id,
omci_message=OmciGet(
entity_class=self._entity_class.class_id,
entity_id=self._entity_id,
attributes_mask=attr_mask
)
)
# First get will retrieve the size
get_results = yield self._device.omci_cc.send(frame)
self.strobe_watchdog()
if self._results is None:
self._results = get_results
results_omci = self._results.fields['omci_message'].fields
omci_fields = get_results.fields['omci_message'].fields
if omci_fields['success_code'] == RC.AttributeFailure.value:
# Copy over any failed or unsupported attribute masks for final result
results_fields = results_omci.fields['omci_message'].fields
results_fields['unsupported_attributes_mask'] |= omci_fields['unsupported_attributes_mask']
results_fields['failed_attributes_mask'] |= omci_fields['failed_attributes_mask']
if omci_fields['success_code'] != RC.Success.value:
raise GetException('Get table attribute failed with status code: {}'.
format(omci_fields['success_code']))
eca = self._entity_class.attributes[attr_index]
self.log.debug('omcc-get-table-attribute', table_name=eca.field.name)
attr_size = omci_fields['data'][eca.field.name + '_size']
if attr_size > self.MAX_TABLE_SIZE:
self.log.error('omcc-get-table-huge', count=attr_size, name=eca.field.name)
raise ValueError('Huge Table Size: {}'.format(attr_size))
# Start the loop
seq_no = 0
data_buffer = ''
for offset in xrange(0, attr_size, OmciTableField.PDU_SIZE):
frame = OmciFrame(
transaction_id=None, # OMCI-CC will set
message_type=OmciGetNext.message_id,
omci_message=OmciGetNext(
entity_class=self._entity_class.class_id,
entity_id=self._entity_id,
attributes_mask=attr_mask,
command_sequence_number=seq_no
)
)
get_results = yield self._device.omci_cc.send(frame)
omci_fields = get_results.fields['omci_message'].fields
status = omci_fields['success_code']
if status != ReasonCodes.Success.value:
raise Exception('get-next-failure table=' + eca.field.name +
' entity_id=' + str(self._entity_id) +
' sqn=' + str(seq_no) + ' omci-status ' + str(status))
# Extract the data
num_octets = attr_size - offset
if num_octets > OmciTableField.PDU_SIZE:
num_octets = OmciTableField.PDU_SIZE
data = omci_fields['data'][eca.field.name]
data_buffer += data[:num_octets]
seq_no += 1
vals = []
while data_buffer:
data_buffer, val = eca.field.getfield(None, data_buffer)
vals.append(val)
# Save off the retrieved data
results_omci['attributes_mask'] |= attr_mask
results_omci['data'][eca.field.name] = vals
self.deferred.callback(self)
except TimeoutError as e:
self.log.debug('tbl-attr-timeout')
self.deferred.errback(failure.Failure(e))
except Exception as e:
self.log.exception('tbl-attr-timeout', class_id=self._entity_class.class_id,
entity_id=self._entity_id, e=e)
self.deferred.errback(failure.Failure(e))
| 42.978155
| 111
| 0.578811
|
795094965c54054960cb03fe472aac8ad3a82726
| 6,078
|
py
|
Python
|
ckan/model/meta.py
|
kata-csc/ckan
|
53c89b904a773bead8a40c1ef300e90d9d9eda5a
|
[
"Apache-2.0"
] | 1
|
2019-12-17T02:16:55.000Z
|
2019-12-17T02:16:55.000Z
|
ckan/model/meta.py
|
kata-csc/ckan
|
53c89b904a773bead8a40c1ef300e90d9d9eda5a
|
[
"Apache-2.0"
] | null | null | null |
ckan/model/meta.py
|
kata-csc/ckan
|
53c89b904a773bead8a40c1ef300e90d9d9eda5a
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from paste.deploy.converters import asbool
from pylons import config
"""SQLAlchemy Metadata and Session object"""
from sqlalchemy import MetaData, and_
import sqlalchemy.orm as orm
from sqlalchemy.orm.session import SessionExtension
import extension
import ckan.lib.activity_streams_session_extension as activity
__all__ = ['Session', 'engine_is_sqlite', 'engine_is_pg']
class CkanCacheExtension(SessionExtension):
''' This extension checks what tables have been affected by
database access and allows us to act on them. Currently this is
used by the page cache to flush the cache when data in the database
is altered. '''
def __init__(self, *args, **kw):
super(CkanCacheExtension, self).__init__(*args, **kw)
# Setup Redis support if needed.
self.use_redis = asbool(config.get('ckan.page_cache_enabled'))
if self.use_redis:
import redis
self.redis = redis
self.redis_connection is None
self.redis_exception = redis.exceptions.ConnectionError
def after_commit(self, session):
if hasattr(session, '_object_cache'):
oc = session._object_cache
oc_list = oc['new']
oc_list.update(oc['changed'])
oc_list.update(oc['deleted'])
objs = set()
for item in oc_list:
objs.add(item.__class__.__name__)
# Flush Redis
if self.use_redis:
if self.redis_connection is None:
try:
self.redis_connection = self.redis.StrictRedis()
except self.redis_exception:
pass
try:
self.redis_connection.flushdb()
except self.redis_exception:
pass
class CkanSessionExtension(SessionExtension):
def before_flush(self, session, flush_context, instances):
if not hasattr(session, '_object_cache'):
session._object_cache= {'new': set(),
'deleted': set(),
'changed': set()}
changed = [obj for obj in session.dirty if
session.is_modified(obj, include_collections=False, passive=True)]
session._object_cache['new'].update(session.new)
session._object_cache['deleted'].update(session.deleted)
session._object_cache['changed'].update(changed)
def before_commit(self, session):
session.flush()
try:
obj_cache = session._object_cache
revision = session.revision
except AttributeError:
return
if getattr(session, 'revisioning_disabled', False):
return
new = obj_cache['new']
changed = obj_cache['changed']
deleted = obj_cache['deleted']
for obj in new | changed | deleted:
if not hasattr(obj, '__revision_class__'):
continue
revision_cls = obj.__revision_class__
revision_table = orm.class_mapper(revision_cls).mapped_table
## when a normal active transaction happens
if 'pending' not in obj.state:
### this is asql statement as we do not want it in object cache
session.execute(
revision_table.update().where(
and_(revision_table.c.id == obj.id,
revision_table.c.current == '1')
).values(current='0')
)
q = session.query(revision_cls)
q = q.filter_by(expired_timestamp=datetime.datetime(9999, 12, 31), id=obj.id)
results = q.all()
for rev_obj in results:
values = {}
if rev_obj.revision_id == revision.id:
values['revision_timestamp'] = revision.timestamp
if 'pending' not in obj.state:
values['current'] = '1'
else:
values['expired_id'] = revision.id
values['expired_timestamp'] = revision.timestamp
session.execute(
revision_table.update().where(
and_(revision_table.c.id == rev_obj.id,
revision_table.c.revision_id == rev_obj.revision_id)
).values(**values)
)
def after_commit(self, session):
if hasattr(session, '_object_cache'):
del session._object_cache
def after_rollback(self, session):
if hasattr(session, '_object_cache'):
del session._object_cache
# __all__ = ['Session', 'engine', 'metadata', 'mapper']
# SQLAlchemy database engine. Updated by model.init_model()
engine = None
Session = orm.scoped_session(orm.sessionmaker(
autoflush=False,
autocommit=False,
expire_on_commit=False,
extension=[CkanCacheExtension(),
CkanSessionExtension(),
extension.PluginSessionExtension(),
activity.DatasetActivitySessionExtension()],
))
create_local_session = orm.sessionmaker(
autoflush=False,
autocommit=False,
expire_on_commit=False,
extension=[CkanCacheExtension(),
CkanSessionExtension(),
extension.PluginSessionExtension(),
activity.DatasetActivitySessionExtension()],
)
#mapper = Session.mapper
mapper = orm.mapper
# Global metadata. If you have multiple databases with overlapping table
# names, you'll need a metadata for each database
metadata = MetaData()
def engine_is_sqlite(sa_engine=None):
# Returns true iff the engine is connected to a sqlite database.
return (sa_engine or engine).url.drivername == 'sqlite'
def engine_is_pg(sa_engine=None):
# Returns true iff the engine is connected to a postgresql database.
# According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql
# all Postgres driver names start with `postgresql`
return (sa_engine or engine).url.drivername.startswith('postgresql')
| 36.39521
| 89
| 0.610069
|
795094e972c8e0aead85ba374073892da9d18eeb
| 5,714
|
py
|
Python
|
tests/functional/test_ec2.py
|
doc-E-brown/botocore
|
6be520a4eeed731652d34a2bcb484fabc1c7c860
|
[
"Apache-2.0"
] | 2
|
2019-08-30T17:35:50.000Z
|
2020-06-29T21:32:33.000Z
|
tests/functional/test_ec2.py
|
doc-E-brown/botocore
|
6be520a4eeed731652d34a2bcb484fabc1c7c860
|
[
"Apache-2.0"
] | 5
|
2021-07-20T11:21:21.000Z
|
2022-02-04T13:54:11.000Z
|
tests/functional/test_ec2.py
|
doc-E-brown/botocore
|
6be520a4eeed731652d34a2bcb484fabc1c7c860
|
[
"Apache-2.0"
] | 18
|
2021-02-24T12:41:44.000Z
|
2022-03-03T15:07:54.000Z
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import mock
from tests import unittest, ClientHTTPStubber, BaseSessionTest
from botocore.compat import parse_qs, urlparse
from botocore.stub import Stubber, ANY
import botocore.session
class TestIdempotencyToken(unittest.TestCase):
def setUp(self):
self.function_name = 'purchase_scheduled_instances'
self.region = 'us-west-2'
self.session = botocore.session.get_session()
self.client = self.session.create_client(
'ec2', self.region)
self.stubber = Stubber(self.client)
self.service_response = {}
self.params_seen = []
# Record all the parameters that get seen
self.client.meta.events.register_first(
'before-call.*.*',
self.collect_params,
unique_id='TestIdempotencyToken')
def collect_params(self, model, params, *args, **kwargs):
self.params_seen.extend(params['body'].keys())
def test_provided_idempotency_token(self):
expected_params = {
'PurchaseRequests': [
{'PurchaseToken': 'foo',
'InstanceCount': 123}],
'ClientToken': ANY
}
self.stubber.add_response(
self.function_name, self.service_response, expected_params)
with self.stubber:
self.client.purchase_scheduled_instances(
PurchaseRequests=[{'PurchaseToken': 'foo',
'InstanceCount': 123}],
ClientToken='foobar')
self.assertIn('ClientToken', self.params_seen)
def test_insert_idempotency_token(self):
expected_params = {
'PurchaseRequests': [
{'PurchaseToken': 'foo',
'InstanceCount': 123}],
}
self.stubber.add_response(
self.function_name, self.service_response, expected_params)
with self.stubber:
self.client.purchase_scheduled_instances(
PurchaseRequests=[{'PurchaseToken': 'foo',
'InstanceCount': 123}])
self.assertIn('ClientToken', self.params_seen)
class TestCopySnapshotCustomization(BaseSessionTest):
def setUp(self):
super(TestCopySnapshotCustomization, self).setUp()
self.session = botocore.session.get_session()
self.client = self.session.create_client('ec2', 'us-east-1')
self.http_stubber = ClientHTTPStubber(self.client)
self.snapshot_id = 'snap-0123abc'
self.copy_response = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<CopySnapshotResponse>\n'
'<snapshotId>%s</snapshotId>\n'
'</CopySnapshotResponse>\n'
)
self.now = datetime.datetime(2011, 9, 9, 23, 36)
self.datetime_patch = mock.patch.object(
botocore.auth.datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
self.mocked_datetime = self.datetime_patch.start()
self.mocked_datetime.utcnow.return_value = self.now
def tearDown(self):
super(TestCopySnapshotCustomization, self).tearDown()
self.datetime_patch.stop()
def add_copy_snapshot_response(self, snapshot_id):
body = (self.copy_response % snapshot_id).encode('utf-8')
self.http_stubber.add_response(body=body)
def test_copy_snapshot_injects_presigned_url(self):
self.add_copy_snapshot_response(self.snapshot_id)
with self.http_stubber:
result = self.client.copy_snapshot(
SourceRegion='us-west-2',
SourceSnapshotId=self.snapshot_id,
)
self.assertEqual(result['SnapshotId'], self.snapshot_id)
self.assertEqual(len(self.http_stubber.requests), 1)
snapshot_request = self.http_stubber.requests[0]
body = parse_qs(snapshot_request.body)
self.assertIn('PresignedUrl', body)
presigned_url = urlparse(body['PresignedUrl'][0])
self.assertEqual(presigned_url.scheme, 'https')
self.assertEqual(presigned_url.netloc, 'ec2.us-west-2.amazonaws.com')
query_args = parse_qs(presigned_url.query)
self.assertEqual(query_args['Action'], ['CopySnapshot'])
self.assertEqual(query_args['Version'], ['2016-11-15'])
self.assertEqual(query_args['SourceRegion'], ['us-west-2'])
self.assertEqual(query_args['DestinationRegion'], ['us-east-1'])
self.assertEqual(query_args['SourceSnapshotId'], [self.snapshot_id])
self.assertEqual(query_args['X-Amz-Algorithm'], ['AWS4-HMAC-SHA256'])
expected_credential = 'access_key/20110909/us-west-2/ec2/aws4_request'
self.assertEqual(query_args['X-Amz-Credential'], [expected_credential])
self.assertEqual(query_args['X-Amz-Date'], ['20110909T233600Z'])
self.assertEqual(query_args['X-Amz-Expires'], ['3600'])
self.assertEqual(query_args['X-Amz-SignedHeaders'], ['host'])
expected_signature = (
'a94a6b52afdf3daa34c2e2a38a62b72c8dac129c9904c61aa1a5d86e38628537'
)
self.assertEqual(query_args['X-Amz-Signature'], [expected_signature])
| 42.014706
| 79
| 0.650508
|
795095b487efdfc4cdf95863e2f22eedec9c1e77
| 80
|
py
|
Python
|
dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeJsonPathPythonTest.shouldFailReadingString.py
|
ingorichtsmeier/camunda-spin
|
f6f929cb4b49f5be3c06fcecf03008fec9fe25c1
|
[
"Apache-2.0"
] | 27
|
2015-02-15T22:01:39.000Z
|
2022-03-02T05:41:29.000Z
|
dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeJsonPathPythonTest.shouldFailReadingString.py
|
ingorichtsmeier/camunda-spin
|
f6f929cb4b49f5be3c06fcecf03008fec9fe25c1
|
[
"Apache-2.0"
] | 101
|
2015-06-05T06:53:56.000Z
|
2022-02-28T19:32:44.000Z
|
dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeJsonPathPythonTest.shouldFailReadingString.py
|
ingorichtsmeier/camunda-spin
|
f6f929cb4b49f5be3c06fcecf03008fec9fe25c1
|
[
"Apache-2.0"
] | 25
|
2015-05-26T21:28:42.000Z
|
2021-07-06T10:04:01.000Z
|
jsonNode = S(input, "application/json")
jsonNode.jsonPath('$.id').stringValue()
| 26.666667
| 39
| 0.725
|
795095f02af881044ceff5a596af3d14cea16855
| 364
|
py
|
Python
|
src/utils/definitions.py
|
LucasFidon/TRABIT_BraTS2021
|
5e950f57a8580356b0b4037477c5069113f3cf31
|
[
"BSD-3-Clause"
] | 5
|
2022-01-04T01:27:18.000Z
|
2022-02-10T13:43:01.000Z
|
src/utils/definitions.py
|
LucasFidon/TRABIT_BraTS2021
|
5e950f57a8580356b0b4037477c5069113f3cf31
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils/definitions.py
|
LucasFidon/TRABIT_BraTS2021
|
5e950f57a8580356b0b4037477c5069113f3cf31
|
[
"BSD-3-Clause"
] | 2
|
2022-01-03T10:20:08.000Z
|
2022-01-19T05:54:19.000Z
|
"""
@brief Please, put all the user specific hyperparameters and paths here.
"""
# PATHS
SAVE_DIR = './runs'
# Folder where to save persistent dataset.
# I recommend to have it in \tmp, other wise you can get the error:
# OSError: [Errno 18] Invalid cross-device link
CACHE_DIR = '/tmp/monai_cache_dir'
# POST PROCESSING FOR BRATS (INFERENCE)
THRESHOLD_ET = 50
| 26
| 73
| 0.733516
|
7950960cf3c120d80a0faed7509e91ca326c8c53
| 572
|
py
|
Python
|
examples/classification.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
examples/classification.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
examples/classification.py
|
HelloChatterbox/little_questions
|
04bee86244b42fdaed9f8d010c2f83037ad753f6
|
[
"MIT"
] | null | null | null |
from little_questions.classifiers import QuestionClassifier
from little_questions.classifiers import MainQuestionClassifier
from little_questions.classifiers import SentenceClassifier
classifier = QuestionClassifier()
question = "who made you"
preds = classifier.predict([question])
assert preds[0] == "HUM:ind"
classifier = MainQuestionClassifier()
question = "who made you"
preds = classifier.predict([question])
assert preds[0] == "HUM"
classifier = SentenceClassifier()
question = "who made you"
preds = classifier.predict([question])
assert preds[0] == "question"
| 30.105263
| 63
| 0.793706
|
7950979eb8e44cd810a1423a6116062df85f6449
| 2,860
|
py
|
Python
|
data/load_data.py
|
iamrishab/LPRNet_Pytorch
|
b5f4b4c159d5c80b9b9e81a8eed65f4b4d79f96b
|
[
"Apache-2.0"
] | null | null | null |
data/load_data.py
|
iamrishab/LPRNet_Pytorch
|
b5f4b4c159d5c80b9b9e81a8eed65f4b4d79f96b
|
[
"Apache-2.0"
] | null | null | null |
data/load_data.py
|
iamrishab/LPRNet_Pytorch
|
b5f4b4c159d5c80b9b9e81a8eed65f4b4d79f96b
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import *
from imutils import paths
import numpy as np
import random
import cv2
import os
# CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
# '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
# '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
# '新',
# '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
# 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
# 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
# 'W', 'X', 'Y', 'Z', 'I', 'O', '-'
# ]
CHARS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z'
]
CHARS_DICT = {char:i for i, char in enumerate(CHARS)}
class LPRDataLoader(Dataset):
def __init__(self, img_dir, imgSize, lpr_max_len, PreprocFun=None):
# import pdb; pdb.set_trace()
self.img_dir = img_dir
self.img_paths = [os.path.join(img_dir, file) for file in os.listdir(img_dir)]
random.shuffle(self.img_paths)
self.img_size = imgSize
self.lpr_max_len = lpr_max_len
if PreprocFun is not None:
self.PreprocFun = PreprocFun
else:
self.PreprocFun = self.transform
def __len__(self):
return len(self.img_paths)
def __getitem__(self, index):
filename = self.img_paths[index]
# print('Processing image:', filename)
Image = cv2.imread(filename)
height, width, _ = Image.shape
if height != self.img_size[1] or width != self.img_size[0]:
Image = cv2.resize(Image, self.img_size)
Image = self.PreprocFun(Image)
basename = os.path.basename(filename)
imgname, suffix = os.path.splitext(basename)
# imgname = imgname.split("-")[0].split("_")[0]
imgname = imgname.split('.')[0].strip().replace(' ', '')
label = list()
for c in imgname:
# one_hot_base = np.zeros(len(CHARS))
# one_hot_base[CHARS_DICT[c]] = 1
label.append(CHARS_DICT[c])
if len(label) == 8:
if self.check(label) == False:
print(imgname)
assert 0, "Error label ^~^!!!"
return Image, label, len(label)
def transform(self, img):
img = img.astype('float32')
img -= 127.5
img *= 0.0078125
img = np.transpose(img, (2, 0, 1))
return img
# TODO: check this part
def check(self, label):
if label[2] != CHARS_DICT['D'] and label[2] != CHARS_DICT['F'] \
and label[-1] != CHARS_DICT['D'] and label[-1] != CHARS_DICT['F']:
print("Error label, Please check!")
return False
else:
return True
| 33.647059
| 86
| 0.47972
|
7950998a4d99541f8db83776e0b7d9a0b4224885
| 2,805
|
py
|
Python
|
data.py
|
levtelyatnikov/SVM-from-scratch
|
4107776925ac72431c3e56345da35d28e9ee4c68
|
[
"MIT"
] | 2
|
2022-01-09T13:48:17.000Z
|
2022-01-27T09:34:17.000Z
|
data.py
|
CopurOnur/SVM_from_scratch
|
c0faa68c0352eb65cde0d12fd5fc543bc985f5a0
|
[
"MIT"
] | null | null | null |
data.py
|
CopurOnur/SVM_from_scratch
|
c0faa68c0352eb65cde0d12fd5fc543bc985f5a0
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import gzip
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
"""
@author: Diego
"""
"""
this is the code you need to run to import data.
You may have to change line 36 selecting the correct path.
"""
def load_mnist(path, kind='train'):
path = os.path.dirname(os.path.realpath(__file__))
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte.gz'
% kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte.gz'
% kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8,
offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(len(labels), 784)
return images, labels
def get_data(type_problem):
cwd = os.path.dirname(os.path.realpath(__file__))
scaler = StandardScaler()
X_all_labels, y_all_labels = load_mnist(cwd, kind='train')
"""
We are only interested in the items with label 2, 4 and 6.
Only a subset of 1000 samples per class will be used.
"""
indexLabel3 = np.where((y_all_labels==3))
xLabel3 = X_all_labels[indexLabel3][:1000,:].astype('float64')
yLabel3 = y_all_labels[indexLabel3][:1000].astype('float64')
indexLabel8 = np.where((y_all_labels==8))
xLabel8 = X_all_labels[indexLabel8][:1000,:].astype('float64')
yLabel8 = y_all_labels[indexLabel8][:1000].astype('float64')
indexLabel6 = np.where((y_all_labels==6))
xLabel6 = X_all_labels[indexLabel6][:1000,:].astype('float64')
yLabel6 = y_all_labels[indexLabel6][:1000].astype('float64')
if type_problem == "binary":
X = np.concatenate([xLabel3, xLabel8], axis = 0)
y = np.where(np.concatenate([yLabel3, yLabel8], axis = 0) == 3, 1, -1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 1902392)
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test, y_train, y_test
if type_problem == 'multyclass':
X = np.concatenate([xLabel3, xLabel8, xLabel6])
y = np.concatenate([yLabel3, yLabel8, yLabel6])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 1902392)
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test, y_train, y_test
| 34.62963
| 104
| 0.618182
|
795099b5b68375bf83b1e291c61e0d797f596176
| 3,917
|
py
|
Python
|
macke/llvm_wrapper.py
|
aheroine/use-macke
|
2a8fafa79f80711f068f97d30f8ca83ff3b32ea0
|
[
"Apache-2.0"
] | 2
|
2019-07-21T15:19:48.000Z
|
2019-07-21T15:23:06.000Z
|
macke/llvm_wrapper.py
|
aheroine/use-macke
|
2a8fafa79f80711f068f97d30f8ca83ff3b32ea0
|
[
"Apache-2.0"
] | null | null | null |
macke/llvm_wrapper.py
|
aheroine/use-macke
|
2a8fafa79f80711f068f97d30f8ca83ff3b32ea0
|
[
"Apache-2.0"
] | null | null | null |
"""
Functions, that wraps all llvm actions and transformation into python functions
"""
import json
import subprocess
from .config import LIBMACKEOPT, LLVMOPT
def __run_subprocess(popenargs):
"""
Starts a subprocess with popenargs and returns it output
"""
return subprocess.check_output(popenargs)
def __run_subprocess_json_output(popenargs):
"""
Starts a subprocess with popenargs and returns the output as parsed json
"""
out = __run_subprocess(popenargs)
return json.loads(out.decode("utf-8"))
def list_all_funcs_topological(bitcodefile):
"""
Wrapper around the list all functions pass. Any circles or strongly
connected components are listed alphabetically in nested lists
"""
return __run_subprocess_json_output([
LLVMOPT, "-load", LIBMACKEOPT,
"-listallfuncstopologic", bitcodefile,
"-disable-output"])
def extract_callgraph(bitcodefile):
"""
Wrapper around the extract callgraph pass
"""
return __run_subprocess_json_output([
LLVMOPT, "-load", LIBMACKEOPT,
"-extractcallgraph", bitcodefile,
"-disable-output"])
def encapsulate_symbolic(sourcefile, function, destfile=None):
"""
Wrapper around the encapsulate symbolic pass
"""
# If no destfile is given, just modify the source file
if destfile is None:
destfile = sourcefile
return __run_subprocess([
LLVMOPT, "-load", LIBMACKEOPT,
"-encapsulatesymbolic", sourcefile,
"-encapsulatedfunction", function, "-o", destfile])
def prepend_error_from_dir(sourcefile, function, errordirlist, destfile=None):
"""
Wrapper around the prepend error pass
"""
# Reject empty error lists
assert errordirlist
# If no destfile is given, just modify the source file
if destfile is None:
destfile = sourcefile
errordirflags = []
for errordir in errordirlist:
errordirflags.append("-previouskleerundirectory")
errordirflags.append(errordir)
return __run_subprocess([
LLVMOPT, "-load", LIBMACKEOPT, "-preprenderror", sourcefile,
"-prependtofunction", function] + errordirflags + ["-o", destfile])
def prepend_error_from_ktest(sourcefile, function, ktestlist, destfile=None):
"""
Wrapper around the prepend error pass
"""
# Reject empty ktest lists
assert ktestlist
print('ktestlist',ktestlist)
# If no destfile is given, just modify the source file
if destfile is None:
destfile = sourcefile
ktestflags = []
for ktest in ktestlist:
ktestflags.append("-errorfiletoprepend")
ktestflags.append(ktest)
return __run_subprocess([
LLVMOPT, "-load", LIBMACKEOPT, "-preprenderror", sourcefile,
"-prependtofunction", function] + ktestflags + ["-o", destfile])
def remove_unreachable_from(entrypoint, sourcefile, destfile=None):
"""
Internalize everything except entrypoint and remove unused code
"""
# If no destfile is given, just modify the source file
if destfile is None:
destfile = sourcefile
return __run_subprocess([
LLVMOPT, "-internalize-public-api-list=%s" % entrypoint, sourcefile,
"-internalize", "-globalopt", "-globaldce", "-o", destfile])
def optimize_redundant_globals(sourcefile, destfile=None):
"""
Runs an llvm opt pass, that merges all globals with identical content
"""
# If no destfile is given, just modify the source file
if destfile is None:
destfile = sourcefile
return __run_subprocess([
LLVMOPT, "-constmerge", sourcefile, "-o", destfile])
def extract_lines_of_code(bitcodefile):
"""
Extract all lines of code represented inside a bitcode file
"""
return __run_subprocess_json_output([
LLVMOPT, "-load", LIBMACKEOPT,
"-extractlinesofcode", bitcodefile, "-disable-output"])
| 29.014815
| 79
| 0.684708
|
79509abad52e3f2a05f8334edb6c99a531e66097
| 4,610
|
py
|
Python
|
Occlusion_Sensitivity/occlusion_sensitivity.py
|
rao208/explainable_ai
|
515343696b55f4fa466daca4991d68aae6e12fd2
|
[
"Unlicense",
"MIT"
] | 4
|
2020-11-14T00:17:14.000Z
|
2022-03-03T14:47:54.000Z
|
Occlusion_Sensitivity/occlusion_sensitivity.py
|
rao208/explainable_ai
|
515343696b55f4fa466daca4991d68aae6e12fd2
|
[
"Unlicense",
"MIT"
] | null | null | null |
Occlusion_Sensitivity/occlusion_sensitivity.py
|
rao208/explainable_ai
|
515343696b55f4fa466daca4991d68aae6e12fd2
|
[
"Unlicense",
"MIT"
] | 1
|
2022-02-17T09:42:58.000Z
|
2022-02-17T09:42:58.000Z
|
"""
Created on Fri Aug 7 03:14:52 2020
@author: Vanditha Rao
Highly inspired from https://github.com/sicara/tf-explain
This script allows the user to implement occlusion sensitivity. Image file can
be of any format.
This script requires that tensorflow and OpenCV be installed within the python
environment you are running this script in.
Here, tensorflow version 2.2, cv2 version 4.2.0 and python version 3.7.7 is
used. This file is imported as a module and contains functions which implement
occlusion sensitivity
"""
import numpy as np
import cv2
import math
class OcclusionSensitivity:
"""
Perform Occlusion Sensitivity for a given input
"""
def __init__(self, batch_size = None):
self.batch_size = batch_size
def apply_grey_patch(self, img, h, w, patch_size,
occluding_pixel, occluding_stride):
"""
Replace a part of the image with a grey patch.
Args:
img: numpy.ndarray
Input image
h: int
Top Left X position of the applied box
w: int
Top Left Y position of the applied box
patch_size: int
Size of patch to apply
occluding_pixel: float
the pixel value of the patched area
occluding_stride: float
the amount of movement of the grey patch over the image
Returns:
patched_image: numpy.ndarray
image with grey patch
"""
width, height, _ = img.shape
h_start = h * occluding_stride
w_start = w * occluding_stride
h_end = min(height, h_start + patch_size)
w_end = min(width, w_start + patch_size)
# Getting the image copy, applying the occluding window and classifying it again:
patched_image = np.copy(img)
patched_image[h_start:h_end, w_start:w_end,:] = occluding_pixel
return patched_image
def explain(self,
original_img,
aug_img,
model,
class_index,
patch_size,
occluding_stride,
occluding_pixel,
colormap='viridis',):
"""
Compute sensitivity map on a given image for a specific class index.
Args:
model: tf.keras.Model
the model to inspect
img: numpy.ndarray
Input image
class_index: int
Index of targeted class
patch_size: int
Size of patch to apply on the image
occluding_pixel: float
the pixel value of the patched area
occluding_stride: float
the amount of movement of the grey patch over the image
Returns:
sensitivity_map: np.ndarray
Sensitivity map with shape (H, W)
"""
width, height, _ = aug_img.shape
output_height = int(math.ceil((height-patch_size) / occluding_stride + 1))
output_width = int(math.ceil((width-patch_size) / occluding_stride + 1))
sensitivity_map = np.zeros((output_height, output_width))
patches = [self.apply_grey_patch(aug_img, h, w, patch_size, occluding_pixel,
occluding_stride)
for h in range(output_height)
for w in range(output_width)]
coordinates = [(index_y, index_x)
for index_x in range(sensitivity_map.shape[1])
for index_y in range(sensitivity_map.shape[0])]
out = model.predict(np.array(patches), self.batch_size)
target_class_predictions = [prediction[class_index]
for prediction in out]
for (index_x, index_y), confidence in zip(coordinates, target_class_predictions):
sensitivity_map[index_y, index_x] = confidence
sensitivity_map = cv2.resize(sensitivity_map, (original_img.shape[1], original_img.shape[0]))
return sensitivity_map
| 30.733333
| 102
| 0.529501
|
79509ae0de663c69b13b3aa40296a01c2a31c785
| 5,077
|
py
|
Python
|
chase/simulation.py
|
Motwg/WolfAndSheep-2019
|
d6c50660368661fddf88dc860caac7236a791beb
|
[
"MIT"
] | null | null | null |
chase/simulation.py
|
Motwg/WolfAndSheep-2019
|
d6c50660368661fddf88dc860caac7236a791beb
|
[
"MIT"
] | null | null | null |
chase/simulation.py
|
Motwg/WolfAndSheep-2019
|
d6c50660368661fddf88dc860caac7236a791beb
|
[
"MIT"
] | null | null | null |
import csv
import json
import logging
import math
import random as ran
def distance(point1, point2):
logging.debug("Args: {0}".format(locals()))
if type(point1) != type(point2):
logging.warning("Types of given arguments are different: {0} != {1}".format(point1, point2))
logging.debug("Returns: {0}".format(((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5))
return ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5
class Animal:
def __init__(self, id, x, y, move_dist):
logging.info("{0}:[{1}, {2}]".format(id, x, y))
self.id = id
self.x = x
self.y = y
self.move_dist = move_dist
def __lt__(self, other):
return self.id < other.id
def move(self, x, y):
logging.info("{0}:[{1}, {2}] => [{3}, {4}]".format(self.id, self.x, self.y, self.x+x, self.y+y))
self.x += x
self.y += y
def move_in_direction(self, direction):
if direction == 0:
self.move(0, self.move_dist)
elif direction == 1:
self.move(0, -self.move_dist)
elif direction == 2:
self.move(self.move_dist, 0)
elif direction == 3:
self.move(-self.move_dist, 0)
elif type(direction) == Animal:
degrees = math.atan2(direction.y-self.y, direction.x-self.x)
self.move(
self.move_dist * math.cos(degrees),
self.move_dist * math.sin(degrees)
)
def move_in_random_direction(self):
self.move_in_direction(ran.randint(0, 3))
def distance(self, animal):
return distance([self.x, self.y], [animal.x, animal.y])
def find_the_closest_animal(self, animals):
dist = self.distance(animals[0])
closest = animals[0]
for animal in animals:
new_dist = distance([self.x, self.y], [animal.x, animal.y])
if dist > new_dist:
dist = new_dist
closest = animal
return closest
def eaten(self):
logging.info("Eaten: {0}:[{1}, {2}]".format(self.id, self.x, self.y))
self.x = None
self.y = None
def get_pos(self):
return [self.x, self.y]
@staticmethod
def generate_animals(animals_number,
move_range,
spawn_range=10.0):
logging.debug("Args: {0}".format(locals()))
new_animals = []
for s in range(animals_number):
new_animals.append(Animal(
s + 1,
ran.random() * spawn_range * 2 - spawn_range,
ran.random() * spawn_range * 2 - spawn_range,
move_range))
logging.debug("Returns: {0}".format(new_animals))
return new_animals
def save_json(json_data, filename='pos.json', save_dir='.'):
logging.debug("Args: {0}".format(locals()))
with open(save_dir+"/"+filename, 'w') as json_file:
json.dump(json_data, json_file)
def save_csv(csv_data=None, filename='alive.csv', opening_parameter='a', save_dir='.'):
logging.debug("Args: {0}".format(locals()))
with open(save_dir+"/"+filename, opening_parameter, newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
if csv_data is not None:
writer.writerow(csv_data)
def simulate(wolves_sim, sheep_sim, turns_number=50, save_dir='.', wait=False):
logging.debug("Args: {0}".format(locals()))
sheep_eaten = []
save_csv(None, 'alive.csv', 'w', save_dir) # nadpisuje plik
for t in range(turns_number):
for s in sheep_sim:
s.move_in_random_direction()
for w in wolves_sim:
closest = w.find_the_closest_animal(sheep_sim)
if w.distance(closest) <= w.move_dist:
w.x = closest.x
w.y = closest.y
closest.eaten()
sheep_index = closest.id
sheep_eaten.append(closest)
sheep_sim.remove(closest)
else:
w.move_in_direction(closest)
sheep_index = None
print("Turn: {0}\n"
"Wolf position: {1}\n"
"Sheep alive: {2}\n"
"Eaten sheep: {3}".format(t + 1, wolves_sim[0].get_pos(), len(sheep_sim), sheep_index))
# zapis json i csv
pos = {
'round_no': t + 1,
'wolf_pos': wolves_sim[0].get_pos(),
'sheep_pos': list(map(Animal.get_pos, sorted(sheep_sim+sheep_eaten)))
}
save_json(pos, 'pos.json', save_dir)
save_csv([t+1, len(sheep_sim)], 'alive.csv', 'a', save_dir)
# oczekiwanie na klawisz
if wait:
input("Press Enter to continue...")
# populacja owiec spadnie do 0 => koniec symulacji
if len(sheep_sim) == 0:
logging.info("Wolf ate every sheep. End of simulation.")
break
logging.debug("Returns: {0}".format(sheep_eaten))
return sheep_eaten
| 32.33758
| 104
| 0.554855
|
79509c6f1909659ede6b699a6f1464b72b4bb78c
| 1,103
|
py
|
Python
|
openaerostruct/aerodynamics/tests/test_rotational_velocity.py
|
eytanadler/OpenAeroStruct
|
a4f482c2bd09a504d378f144da95cc91053b9f3b
|
[
"Apache-2.0"
] | 114
|
2017-04-06T15:24:19.000Z
|
2022-03-21T09:57:43.000Z
|
openaerostruct/aerodynamics/tests/test_rotational_velocity.py
|
eytanadler/OpenAeroStruct
|
a4f482c2bd09a504d378f144da95cc91053b9f3b
|
[
"Apache-2.0"
] | 322
|
2017-04-07T01:40:03.000Z
|
2022-03-17T21:50:52.000Z
|
openaerostruct/aerodynamics/tests/test_rotational_velocity.py
|
eytanadler/OpenAeroStruct
|
a4f482c2bd09a504d378f144da95cc91053b9f3b
|
[
"Apache-2.0"
] | 83
|
2017-04-06T16:53:26.000Z
|
2022-03-19T19:34:05.000Z
|
import unittest
import numpy as np
import openmdao.api as om
from openmdao.utils.assert_utils import assert_check_partials
from openaerostruct.aerodynamics.rotational_velocity import RotationalVelocity
from openaerostruct.utils.testing import run_test, get_default_surfaces
class Test(unittest.TestCase):
def test(self):
surfaces = get_default_surfaces()
comp = RotationalVelocity(surfaces=surfaces)
run_test(self, comp)
def test_rotation_option_derivatives(self):
surfaces = get_default_surfaces()
comp = RotationalVelocity(surfaces=surfaces)
prob = om.Problem()
prob.model.add_subsystem("comp", comp)
prob.setup(force_alloc_complex=True)
prob["comp.omega"] = np.array([0.3, 0.4, -0.1])
prob["comp.cg"] = np.array([0.1, 0.6, 0.4])
prob["comp.coll_pts"] = np.random.random(prob["comp.coll_pts"].shape)
prob.run_model()
check = prob.check_partials(compact_print=True, method="cs", step=1e-40)
assert_check_partials(check)
if __name__ == "__main__":
unittest.main()
| 26.902439
| 80
| 0.69447
|
79509d003de81579d1644f88d577920bfb76c9a7
| 21,777
|
py
|
Python
|
tefla/core/learning_ss.py
|
subex/Tefla
|
34f8fd0e2f2ee02aa73c6289753e08a95cc41880
|
[
"MIT"
] | 40
|
2017-09-10T17:11:17.000Z
|
2022-02-01T17:40:53.000Z
|
tefla/core/learning_ss.py
|
openAGI/tefla
|
7dc7648198ef7a6884a3d5081836152515c1aebc
|
[
"MIT"
] | 21
|
2018-06-21T09:58:04.000Z
|
2022-03-11T23:11:37.000Z
|
tefla/core/learning_ss.py
|
subex/Tefla
|
34f8fd0e2f2ee02aa73c6289753e08a95cc41880
|
[
"MIT"
] | 11
|
2017-10-13T13:10:02.000Z
|
2020-08-17T07:07:53.000Z
|
# -------------------------------------------------------------------#
# Written by Mrinal Haloi
# Contact: mrinal.haloi11@gmail.com
# Copyright 2017, Mrinal Haloi
# -------------------------------------------------------------------#
from __future__ import division, print_function, absolute_import
import os
import time
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from . import logger as log
from . import summary as summary
from .base import Base
from ..utils import util
TRAINING_BATCH_SUMMARIES = 'training_batch_summaries'
TRAINING_EPOCH_SUMMARIES = 'training_epoch_summaries'
VALIDATION_BATCH_SUMMARIES = 'validation_batch_summaries'
VALIDATION_EPOCH_SUMMARIES = 'validation_epoch_summaries'
class SemiSupervisedTrainer(Base):
"""Semi Supervised Trainer.
Args:
model: model definition
cnf: dict, training configs
training_iterator: iterator to use for training data access, processing and augmentations
validation_iterator: iterator to use for validation data access, processing and augmentations
start_epoch: int, training start epoch; for resuming training provide the last
epoch number to resume training from, its a required parameter for training data balancing
resume_lr: float, learning rate to use for new training
classification: bool, classificattion or regression
clip_norm: bool, to clip gradient using gradient norm, stabilizes the training
n_iters_per_epoch: int, number of iteratiosn for each epoch;
e.g: total_training_samples/batch_size
gpu_memory_fraction: amount of gpu memory to use
is_summary: bool, to write summary or not
"""
def __init__(self, model, cnf, clip_by_global_norm=False, **kwargs):
self.clip_by_global_norm = clip_by_global_norm
super(SemiSupervisedTrainer, self).__init__(model, cnf, **kwargs)
def fit(self,
data_set,
num_classes=6,
weights_from=None,
start_epoch=1,
summary_every=199,
model_name='multiclass_ss',
weights_dir='weights'):
"""Train the model on the specified dataset.
Args:
data_set: dataset instance to use to access data for training/validation
weights_from: str, if not None, initializes model from exisiting weights
start_epoch: int, epoch number to start training from
e.g. for retarining set the epoch number you want to resume training from
summary_every: int, epoch interval to write summary; higher value means lower frequency
of summary writing
"""
with tf.Graph().as_default(), tf.device('/gpu:0'):
self._setup_model_loss(num_classes=num_classes)
if self.is_summary:
self._setup_summaries(self.capped_d_grads, self.capped_g_grads)
self._setup_misc()
self._print_info(data_set)
self._train_semi_supervised(data_set, start_epoch, weights_from, summary_every, model_name,
weights_dir)
def _train_semi_supervised(self, dataset, start_epoch, weights_from, summary_every, model_name,
weights_dir):
training_X, training_y, validation_X, validation_y = \
dataset.training_X, dataset.training_y, dataset.validation_X, dataset.validation_y
if not os.path.exists(weights_dir):
os.mkdir(weights_dir)
if not os.path.exists(weights_dir + '/best_models'):
os.mkdir(weights_dir + '/best_models')
# Create a saver.
saver = tf.train.Saver(max_to_keep=None)
if self.is_summary:
training_batch_summary_op = tf.merge_all_summaries(key=TRAINING_BATCH_SUMMARIES)
training_epoch_summary_op = tf.merge_all_summaries(key=TRAINING_EPOCH_SUMMARIES)
validation_batch_summary_op = tf.merge_all_summaries(key=VALIDATION_BATCH_SUMMARIES)
validation_epoch_summary_op = tf.merge_all_summaries(key=VALIDATION_EPOCH_SUMMARIES)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=self.cnf.get('gpu_memory_fraction', 0.9))
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
sess.run(init)
if start_epoch > 1:
weights_from = "weights/model-epoch-%d.ckpt" % (start_epoch - 1)
if weights_from:
self._load_weights(sess, saver, weights_from)
learning_rate_value = self.lr_policy.initial_lr
log.info("Initial learning rate: %f " % learning_rate_value)
if self.is_summary:
train_writer, validation_writer = summary.create_summary_writer(
self.cnf.get('summary_dir', '/tmp/tefla-summary'), sess)
# keep track of maximum accuracy and auroc and save corresponding
# weights
training_history = []
seed_delta = 100
batch_iter_idx = 1
n_iters_per_epoch = len(dataset.training_X) // self.training_iterator.batch_size
self.lr_policy.n_iters_per_epoch = n_iters_per_epoch
for epoch in range(start_epoch, self.cnf.get('mum_epochs', 550) + 1):
np.random.seed(epoch + seed_delta)
tf.set_random_seed(epoch + seed_delta)
tic = time.time()
d_train_losses = []
g_train_losses = []
batch_train_sizes = []
for batch_num, (Xb, yb) in enumerate(self.training_iterator(training_X, training_y)):
if Xb.shape[0] < self.cnf['batch_size_train']:
continue
feed_dict_train = {
self.inputs: Xb,
self.labels: yb,
self.learning_rate_d: learning_rate_value,
self.learning_rate_g: learning_rate_value
}
log.debug('1. Loading batch %d data done.' % batch_num)
if epoch % summary_every == 0 and self.is_summary:
log.debug('2. Running training steps with summary...')
_, _d_loss_real, _d_loss_fake, _d_loss_class, summary_str_train = sess.run(
[
self.train_op_d, self.d_loss_real, self.d_loss_fake, self.d_loss_class,
training_batch_summary_op
],
feed_dict=feed_dict_train)
_, _g_loss = sess.run([self.train_op_g, self.g_losses[0]], feed_dict=feed_dict_train)
train_writer.add_summary(summary_str_train, epoch)
train_writer.flush()
log.debug('2. Running training steps with summary done.')
log.info("Epoch %d, Batch %d D_loss_real: %s, D_loss_fake: %s,D_loss_class: %s, G_loss: %s"
% (epoch, batch_num, _d_loss_real, _d_loss_fake, _d_loss_class, _g_loss))
else:
log.debug('2. Running training steps without summary...')
_, _d_loss_real, _d_loss_fake, _d_loss_class = sess.run(
[self.train_op_d, self.d_loss_real, self.d_loss_fake, self.d_loss_class],
feed_dict=feed_dict_train)
_, _g_loss = sess.run([self.train_op_g, self.g_losses[0]], feed_dict=feed_dict_train)
log.debug('2. Running training steps without summary done.')
d_train_losses.append(_d_loss_real + _d_loss_fake + _d_loss_class)
g_train_losses.append(_g_loss)
batch_train_sizes.append(len(Xb))
learning_rate_value = self.lr_policy.batch_update(learning_rate_value, batch_iter_idx)
batch_iter_idx += 1
log.debug('4. Training batch %d done.' % batch_num)
d_avg_loss = np.average(d_train_losses, weights=batch_train_sizes)
g_avg_loss = np.average(g_train_losses, weights=batch_train_sizes)
log.info("Epoch %d, D_avg_loss: %s, G_avg_loss %s" % (epoch, d_avg_loss, g_avg_loss))
# Plot training loss every epoch
log.debug('5. Writing epoch summary...')
if self.is_summary:
summary_str_train = sess.run(
training_epoch_summary_op,
feed_dict={
self.epoch_loss: d_avg_loss,
self.epoch_loss_g: g_avg_loss,
self.learning_rate_d: learning_rate_value,
self.learning_rate_g: learning_rate_value
})
train_writer.add_summary(summary_str_train, epoch)
train_writer.flush()
log.debug('5. Writing epoch summary done.')
# Validation prediction and metrics
validation_losses = []
batch_validation_metrics = [[] for _, _ in self.validation_metrics_def]
epoch_validation_metrics = []
batch_validation_sizes = []
for batch_num, (validation_Xb, validation_y_true) in enumerate(
self.validation_iterator(validation_X, validation_y)):
feed_dict_val = {self.inputs: validation_Xb, self.labels: validation_y_true}
log.debug('6. Loading batch %d validation data done.' % batch_num)
if (epoch - 1) % summary_every == 0 and self.is_summary:
log.debug('7. Running validation steps with summary...')
validation_y_pred, _val_loss, summary_str_validation = sess.run(
[self.predictions, self.test_loss, validation_batch_summary_op],
feed_dict=feed_dict_val)
validation_writer.add_summary(summary_str_validation, epoch)
validation_writer.flush()
log.debug('7. Running validation steps with summary done.')
log.debug("Epoch %d, Batch %d validation loss: %s" % (epoch, batch_num, _val_loss))
log.debug("Epoch %d, Batch %d validation predictions: %s" % (epoch, batch_num,
validation_y_pred))
else:
log.debug('7. Running validation steps without summary...')
validation_y_pred, _val_loss = sess.run([self.predictions, self.test_loss],
feed_dict=feed_dict_val)
log.debug('7. Running validation steps without summary done.')
validation_losses.append(_val_loss)
batch_validation_sizes.append(len(validation_Xb))
for i, (_, metric_function) in enumerate(self.validation_metrics_def):
metric_score = metric_function(validation_y_true, validation_y_pred)
batch_validation_metrics[i].append(metric_score)
log.debug('8. Validation batch %d done' % batch_num)
epoch_validation_loss = np.average(validation_losses, weights=batch_validation_sizes)
for i, (_, _) in enumerate(self.validation_metrics_def):
epoch_validation_metrics.append(
np.average(batch_validation_metrics[i], weights=batch_validation_sizes))
log.debug('9. Writing epoch validation summary...')
if self.is_summary:
summary_str_validate = sess.run(
validation_epoch_summary_op,
feed_dict={
self.epoch_loss: epoch_validation_loss,
self.validation_metric_placeholders: epoch_validation_metrics
})
validation_writer.add_summary(summary_str_validate, epoch)
validation_writer.flush()
log.debug('9. Writing epoch validation summary done.')
custom_metrics_string = [
', %s: %.3f' % (name, epoch_validation_metrics[i])
for i, (name, _) in enumerate(self.validation_metrics_def)
]
custom_metrics_string = ''.join(custom_metrics_string)
log.info("Epoch %d [(%s, %s) images, %6.1fs]: t-loss: %.3f, v-loss: %.3f%s" %
(epoch, np.sum(batch_train_sizes), np.sum(batch_validation_sizes), time.time() - tic,
d_avg_loss, epoch_validation_loss, custom_metrics_string))
epoch_info = dict(epoch=epoch, training_loss=d_avg_loss, validation_loss=epoch_validation_loss)
training_history.append(epoch_info)
saver.save(sess, "%s/model-epoch-%d.ckpt" % (weights_dir, epoch))
learning_rate_value = self.lr_policy.epoch_update(learning_rate_value, training_history)
log.info("Current learning rate: %f " % learning_rate_value)
end_points_G_val = self.model.generator([self.cnf['batch_size_test'], 100],
False,
True,
batch_size=self.cnf['batch_size_test'])
util.save_images(
'generated_images.jpg', sess.run(end_points_G_val['softmax']), width=128, height=128)
G = sess.run(end_points_G_val['softmax'])
cv2.imwrite('generated_image.jpg', G[0, :, :, :] * 50 + 128)
if self.is_summary:
train_writer.close()
validation_writer.close()
def _feature_matching_loss(self, real_data_features, fake_data_features):
real_data_mean = tf.reduce_mean(real_data_features, axis=0)
fake_data_mean = tf.reduce_mean(fake_data_features, axis=0)
feature_loss = tf.reduce_mean(tf.abs(tf.subtract(real_data_mean, fake_data_mean)))
return feature_loss
def _tower_loss_semi_supervised(self, inputs, targets, gpu_idx=0, num_classes=11,
is_fm_loss=False):
with tf.variable_scope("train_specific"):
avg_error_rate = tf.get_variable(
'avg_error_rate', [], initializer=tf.constant_initializer(0.), trainable=False)
num_error_rate = tf.get_variable(
'num_error_rate', [], initializer=tf.constant_initializer(0.), trainable=False)
batch_size_train = self.cnf['batch_size_train']
batch_size_val = self.cnf['batch_size_test']
self.end_points_G = self.model.generator([batch_size_train, 100], True, None, batch_size_val)
if gpu_idx == 0:
G_means = tf.reduce_mean(self.end_points_G['softmax'], 0, keep_dims=True)
G_vars = tf.reduce_mean(tf.square(self.end_points_G['softmax'] - G_means), 0, keep_dims=True)
G = tf.Print(
self.end_points_G['softmax'],
[tf.reduce_mean(G_means), tf.reduce_mean(G_vars)],
"generator mean and average var",
first_n=1)
inputs_means = tf.reduce_mean(inputs, 0, keep_dims=True)
inputs_vars = tf.reduce_mean(tf.square(inputs - inputs_means), 0, keep_dims=True)
inputs = tf.Print(
inputs,
[tf.reduce_mean(inputs_means), tf.reduce_mean(inputs_vars)],
"image mean and average var",
first_n=1)
joint = tf.concat([inputs, G], 0)
log.info('Input size of unlabelled and generated %s' % (joint.get_shape()))
self.end_points_D = self.model.discriminator(
joint, True, None, num_classes=num_classes, batch_size=batch_size_train)
self.end_points_D_val = self.model.discriminator(
inputs, False, True, num_classes=num_classes, batch_size=batch_size_val)
# For printing layers shape
self.training_end_points = self.end_points_D
self.training_end_points.update(self.end_points_G)
tf.summary.histogram("d", self.end_points_D['D_on_data'])
tf.summary.histogram("d_", self.end_points_D['D_on_G'])
tf.summary.image("G", G)
d_label_smooth = self.cnf['d_label_smooth'] # 0.25
self.d_loss_real = self._sigmoid_kl_with_logits(self.end_points_D['D_on_data_logits'],
1. - d_label_smooth)
class_loss_weight = 1.
self.d_loss_class = class_loss_weight * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.end_points_D['class_logits'], labels=tf.to_int64(targets))
self.test_loss = 1. - \
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(
self.end_points_D_val['logits'], targets, 1)))
self.error_rate = 1. - \
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(
self.end_points_D['class_logits'], targets, 1)))
if gpu_idx == 0:
update = tf.assign(num_error_rate, num_error_rate + 1.)
with tf.control_dependencies([update]):
tc = tf.maximum(.01, 1. / num_error_rate)
update = tf.assign(avg_error_rate, (1. - tc) * avg_error_rate + tc * self.error_rate)
with tf.control_dependencies([update]):
self.d_loss_class = tf.identity(self.d_loss_class)
self.d_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.end_points_D['D_on_G_logits'],
labels=tf.zeros_like(self.end_points_D['D_on_G_logits']))
self.d_loss_class = tf.reduce_mean(self.d_loss_class)
self.d_loss_real = tf.reduce_mean(self.d_loss_real)
self.d_loss_fake = tf.reduce_mean(self.d_loss_fake)
if is_fm_loss:
global_pool_head = self.end_points_D['global_pool']
real_data_features = tf.slice(global_pool_head, [0, 0], [batch_size_train, num_classes])
fake_data_features = tf.slice(global_pool_head, [batch_size_train, 0],
[batch_size_train, num_classes])
self.g_loss = self._feature_matching_loss(real_data_features, fake_data_features)
else:
generator_target_prob = self.cnf['generator_target_prob'] # 0.75 / 2.0
self.g_loss = self._sigmoid_kl_with_logits(self.end_points_D['D_on_G_logits'],
generator_target_prob)
self.g_loss = tf.reduce_mean(self.g_loss)
if gpu_idx == 0:
self.g_losses = []
self.g_losses.append(self.g_loss)
self.d_loss = self.d_loss_real + self.d_loss_fake + self.d_loss_class
if gpu_idx == 0:
self.d_loss_reals = []
self.d_loss_fakes = []
self.d_loss_classes = []
self.d_losses = []
self.d_loss_reals.append(self.d_loss_real)
self.d_loss_fakes.append(self.d_loss_fake)
self.d_loss_classes.append(self.d_loss_class)
self.d_losses.append(self.d_loss)
self.predictions = self.end_points_D_val['predictions']
def _get_vars_semi_supervised(self):
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('d_')]
g_vars = [var for var in t_vars if var.name.startswith('g_')]
for x in d_vars:
assert x not in g_vars
for x in g_vars:
assert x not in d_vars
for x in t_vars:
assert x in g_vars or x in d_vars
return {'d_vars': d_vars, 'g_vars': g_vars}
def sigmoid_kl_with_logits(self, logits, targets):
""" Sigmoid cross entropy with smooth labels
Args:
logits: logits
targets: smooth targets
Returns:
cross entropy loss
"""
assert isinstance(targets, float)
if targets in [0., 1.]:
entropy = 0.
else:
entropy = - targets * np.log(targets) - \
(1. - targets) * np.log(1. - targets)
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(logits) * targets, logits=logits) - entropy
def _setup_model_loss(self, update_ops=None, num_classes=6):
self.learning_rate_d = tf.placeholder(tf.float32, shape=[], name="learning_rate_placeholder")
self.learning_rate_g = tf.placeholder(tf.float32, shape=[], name="learning_rate_placeholder")
d_optimizer = self._optimizer(
self.learning_rate_d,
optname=self.cnf.get('optname', 'momentum'),
**self.cnf.get('opt_kwargs', {'decay': 0.9}))
g_optimizer = self._optimizer(
self.learning_rate_g,
optname=self.cnf.get('optname', 'momentum'),
**self.cnf.get('opt_kwargs', {'decay': 0.9}))
# Get images and labels for ImageNet and split the batch across GPUs.
assert self.cnf['batch_size_train'] % self.cnf.get('num_gpus', 1) == 0, (
'Batch size must be divisible by number of GPUs')
self.inputs = tf.placeholder(
tf.float32,
shape=(None, self.model.image_size[0], self.model.image_size[0], 3),
name="input")
self.labels = tf.placeholder(tf.int32, shape=(None,))
self._tower_loss_semi_supervised(
self.inputs, self.labels, num_classes=num_classes, is_fm_loss=True)
global_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
# Make sure update_ops are computed before total_loss.
if update_ops:
with tf.control_dependencies(update_ops):
barrier = tf.no_op(name='update_barrier')
self.d_losses[-1] = control_flow_ops.with_dependencies([barrier], self.d_losses[-1])
self.g_losses[-1] = control_flow_ops.with_dependencies([barrier], self.g_losses[-1])
self.d_loss_real = control_flow_ops.with_dependencies([barrier], self.d_loss_real)
self.d_loss_fake = control_flow_ops.with_dependencies([barrier], self.d_loss_fake)
self.d_loss_class = control_flow_ops.with_dependencies([barrier], self.d_loss_class)
t_vars = self._get_vars_semi_supervised()
if self.clip_by_global_norm:
self.capped_d_grads = self._clip_grad_global_norms(
t_vars['d_vars'], self.d_losses[-1], d_optimizer, gradient_noise_scale=0.0)
self.capped_g_grads = self._clip_grad_global_norms(
t_vars['g_vars'], self.g_losses[-1], g_optimizer, gradient_noise_scale=0.0)
else:
self.capped_d_grads = self._clip_grad_norms(
d_optimizer.compute_gradients(self.d_losses[-1], t_vars['d_vars']))
self.capped_g_grads = self._clip_grad_norms(
g_optimizer.compute_gradients(self.g_losses[-1], t_vars['g_vars']))
global_step = tf.get_variable(
'global_step', [], initializer=tf.constant_initializer(0), trainable=False)
if self.gradient_multipliers is not None:
with tf.name_scope('multiply_grads'):
self.capped_d_grads = self._multiply_gradients(self.capped_d_grads,
self.gradient_multipliers)
apply_d_gradient_op = d_optimizer.apply_gradients(self.capped_d_grads, global_step=global_step)
apply_g_gradient_op = g_optimizer.apply_gradients(self.capped_g_grads, global_step=global_step)
self.train_op_d = control_flow_ops.with_dependencies([apply_d_gradient_op], self.d_losses[-1])
self.train_op_g = control_flow_ops.with_dependencies([apply_g_gradient_op], self.g_losses[-1])
| 47.548035
| 101
| 0.675575
|
79509d54d7a58ca9fbded46d6b2e383a874989cd
| 2,521
|
py
|
Python
|
networkx/algorithms/tests/test_distance_regular.py
|
theaverageguy/networkx
|
b2b74b3ba028ef3788f796aa64b037c8ea446539
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/tests/test_distance_regular.py
|
theaverageguy/networkx
|
b2b74b3ba028ef3788f796aa64b037c8ea446539
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/tests/test_distance_regular.py
|
theaverageguy/networkx
|
b2b74b3ba028ef3788f796aa64b037c8ea446539
|
[
"BSD-3-Clause"
] | 2
|
2016-09-04T10:59:12.000Z
|
2020-02-17T07:43:04.000Z
|
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
import networkx as nx
from networkx import is_strongly_regular
class TestDistanceRegular(object):
def test_is_distance_regular(self):
assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
assert_true(nx.is_distance_regular(nx.petersen_graph()))
assert_true(nx.is_distance_regular(nx.cubical_graph()))
assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
assert_true(nx.is_distance_regular(nx.pappus_graph()))
assert_true(nx.is_distance_regular(nx.heawood_graph()))
assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
# no distance regular
assert_false(nx.is_distance_regular(nx.path_graph(4)))
def test_not_connected(self):
G=nx.cycle_graph(4)
nx.add_cycle(G, [5, 6, 7])
assert_false(nx.is_distance_regular(G))
def test_global_parameters(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
b,c=nx.intersection_array(nx.cycle_graph(3))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
def test_intersection_array(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
assert_equal(b,[2, 1])
assert_equal(c,[1, 1])
b,c=nx.intersection_array(nx.dodecahedral_graph())
assert_equal(b,[3, 2, 1, 1, 1])
assert_equal(c,[1, 1, 1, 2, 3])
b,c=nx.intersection_array(nx.icosahedral_graph())
assert_equal(b,[5, 2, 1])
assert_equal(c,[1, 2, 5])
class TestStronglyRegular(object):
"""Unit tests for the :func:`~networkx.is_strongly_regular`
function.
"""
def test_cycle_graph(self):
"""Tests that the cycle graph on five vertices is strongly
regular.
"""
G = nx.cycle_graph(5)
assert_true(is_strongly_regular(G))
def test_petersen_graph(self):
"""Tests that the Petersen graph is strongly regular."""
G = nx.petersen_graph()
assert_true(is_strongly_regular(G))
def test_path_graph(self):
"""Tests that the path graph is not strongly regular."""
G = nx.path_graph(4)
assert_false(is_strongly_regular(G))
| 34.534247
| 77
| 0.666402
|
79509d6ecedc910ee2cd0dab7fca4ac3060c8027
| 43
|
py
|
Python
|
paintedlife/patterns.py
|
AbigailMcGovern/paintedlife
|
7d178f9311da738f5ab3c3cc789b212cd192526f
|
[
"BSD-3-Clause"
] | 7
|
2020-12-06T15:46:57.000Z
|
2022-01-10T07:29:32.000Z
|
paintedlife/patterns.py
|
AbigailMcGovern/paintedlife
|
7d178f9311da738f5ab3c3cc789b212cd192526f
|
[
"BSD-3-Clause"
] | 1
|
2020-12-11T16:28:58.000Z
|
2020-12-15T23:23:33.000Z
|
paintedlife/patterns.py
|
AbigailMcGovern/paintedlife
|
7d178f9311da738f5ab3c3cc789b212cd192526f
|
[
"BSD-3-Clause"
] | 1
|
2020-12-06T15:46:41.000Z
|
2020-12-06T15:46:41.000Z
|
#import numpy as np
# library of patterns
| 10.75
| 21
| 0.744186
|
79509e0da59087724c7ad32862f4a10871238e6b
| 4,518
|
py
|
Python
|
anchorgql/runlocal.py
|
vybenetwork/anchorgql
|
d8a8a3fa332e0076f20061689951645c0dae1642
|
[
"MIT"
] | 1
|
2022-02-20T22:05:26.000Z
|
2022-02-20T22:05:26.000Z
|
anchorgql/runlocal.py
|
vybenetwork/anchorgql
|
d8a8a3fa332e0076f20061689951645c0dae1642
|
[
"MIT"
] | null | null | null |
anchorgql/runlocal.py
|
vybenetwork/anchorgql
|
d8a8a3fa332e0076f20061689951645c0dae1642
|
[
"MIT"
] | null | null | null |
import json
import subprocess
import asyncio
from solana.rpc.async_api import AsyncClient
from solana.publickey import PublicKey
from anchorpy import Program, Provider, Wallet
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def build_and_start_server(project_name, prd_mode):
print(f'{bcolors.OKCYAN}INFO: Starting test for {project_name}')
completed_process_result = subprocess.run(
"npm run prod", shell=True)
if completed_process_result.returncode != 0:
print(
f'{bcolors.FAIL}ERROR: Failed to generate Apollo GraphQL project for project: {project_name}{bcolors.ENDC}')
return False
print(f'{bcolors.OKGREEN}DONE: Project creation successful for project: {project_name}{bcolors.ENDC}')
server_directory = "./src/server"
new_process = subprocess.run(
"npm start", cwd=server_directory, shell=True)
if new_process.returncode != 0:
print(
f'{bcolors.FAIL}ERROR: Failed to start newly generated Apollo GraphQL server for project: {project_name}{bcolors.ENDC}')
return False
print(f'{bcolors.OKGREEN}DONE: Project startup successful for project: {project_name}{bcolors.ENDC}')
return True
def create_project_config(path, content):
with open(path, 'w') as f:
f.write(json.dumps(content))
return
async def check_and_replace_with_new_idl(program_id, idl_path, anchor_provider_url):
try:
client = AsyncClient(anchor_provider_url)
provider = Provider(client, Wallet.local())
program_id = PublicKey(program_id)
idl = await Program.fetch_raw_idl(
program_id, provider
)
except:
await client.close()
return
if idl is not None:
with open(idl_path, 'w') as file:
json.dump(idl, file)
await client.close()
return
def main():
# On Windows, if an error happens where the channels file isn't found, you probably opened the project
# from the wrong directory. Either try reopening the project from the correct directory or play with the
# line below.
# os.chdir('./anchorgql')
config = json.load(open('channels.json'))
channels_config = config['channels']
results = []
for channel in channels_config:
project_name = channel['PROJECT_NAME']
program_id = channel['PROGRAM_ID']
anchor_provider_url = channel['ANCHOR_PROVIDER_URL']
idl_path = channel['IDL_PATH']
asyncio.run(check_and_replace_with_new_idl(
program_id, idl_path, anchor_provider_url))
content = {
"projectName": project_name,
"protocol": channel["PROTOCOL"],
"network": channel["NETWORK"],
"programID": program_id,
"anchorProviderURL": anchor_provider_url,
"idlPath": idl_path,
"anchorVersion": config['anchorVersion'],
"idl": config['idl'],
"port": config['port'],
"packageJsonTemplateFile": config['packageJsonTemplateFile'],
"indexTemplateFile": config['indexTemplateFile'],
"typeDefTemplateFile": config['typeDefTemplateFile'],
"configFile": config['configFile'],
"testMode": config["testMode"],
"prdMode": config["prdMode"]
}
create_project_config('./src/config.json', content)
passed = build_and_start_server(project_name, config["prdMode"])
results.append({
"projectName": project_name,
"passed": passed
})
print()
print("===================================================")
print("===================================================")
print("===================================================")
print()
print(f'{bcolors.OKBLUE}INFO: Test results:{bcolors.ENDC}')
for result in results:
if result['passed']:
print(
f'{bcolors.OKGREEN}{result["projectName"]}: Passed{bcolors.ENDC}')
else:
print(
f'{bcolors.FAIL}{result["projectName"]}: Failed{bcolors.ENDC}')
print()
print("===================================================")
print("=================== End of Run ====================")
print("===================================================")
if __name__ == '__main__':
main()
| 36.144
| 132
| 0.588092
|
79509f9a9888958dd7bdbfb28f7f58a5b83cb280
| 5,784
|
py
|
Python
|
cmaes/solver_cma.py
|
mcmips/osim-rl
|
610b95cf0c4484f1acecd31187736b0113dcfb73
|
[
"MIT"
] | 867
|
2017-01-21T20:53:36.000Z
|
2022-03-20T09:47:08.000Z
|
cmaes/solver_cma.py
|
mcmips/osim-rl
|
610b95cf0c4484f1acecd31187736b0113dcfb73
|
[
"MIT"
] | 197
|
2017-01-22T21:27:36.000Z
|
2022-01-10T16:18:35.000Z
|
cmaes/solver_cma.py
|
mcmips/osim-rl
|
610b95cf0c4484f1acecd31187736b0113dcfb73
|
[
"MIT"
] | 277
|
2017-02-01T18:42:18.000Z
|
2022-03-23T11:30:31.000Z
|
# Copyright (c) 2015, Disney Research
# All rights reserved.
#
# Author(s): Sehoon Ha <sehoon.ha@disneyresearch.com>
# Disney Research Robotics Group
#
# adapted by Seungmoon Song <seungmoon.song@gmail.com>
from __future__ import division # '/' always means non-truncating division
from cmaes.solver import Solver
import numpy as np
import cma
import scipy.optimize
import time
from datetime import datetime
import sys
class CMASolver(Solver):
def __init__(self, prob):
Solver.__init__(self, prob)
opts = cma.CMAOptions()
# for k, v in opts.iteritems():
# print k, v
# exit(0)
self.p_dir = 'optim_data/cma/'
opts.set('verb_disp', 1)
opts.set('popsize', 8)
opts.set('verb_filenameprefix', self.p_dir)
opts.set('maxiter', 2000)
self.options = opts
self.cen = None
self.rng = None
def set_verbose(self, verbose):
self.verbose = verbose
if verbose:
self.options['verb_disp'] = 1
else:
self.options['verb_disp'] = 0
def create_directory(self):
verbose = (self.options['verb_disp'] > 0)
import os
path = self.p_dir
if verbose:
print('cma path = ', path)
if not os.path.exists(path):
if verbose:
print('CMA-ES: create directory [%s]' % path)
os.makedirs(path)
def eval_f(self, y):
x = self.unnormalize(y)
ret = super(CMASolver, self).eval_f(x)
# for i in range(self.prob.num_eq_constraints()):
# ret_eq_i = self.prob.c_eq(x, i)
# # ret += 100.0 * (ret_eq_i ** 2)
# ret += 10.0 * (ret_eq_i) # Assume the quadratic form
# for i in range(self.prob.num_ineq_constraints()):
# ret_ineq_i = self.prob.c_ineq(x, i)
# if ret_ineq_i < 0:
# ret += 100.0 * (ret_ineq_i ** 2)
return ret
def clip(self, x):
if self.rng is None:
return x
return np.clip(x, self.cen-self.rng, self.cen+self.rng)
# normalize between [-1, 1]
def normalize(self, x):
if self.rng is None:
return x
return (x - self.cen) / self.rng
def unnormalize(self, y):
if self.rng is None:
return y
x = self.cen + y * self.rng
return x
def solve(self, x0=None, sigma=1.0):
verbose = (self.options['verb_disp'] > 0)
begin = time.time()
if verbose:
print('Optimization method = CMA-ES')
if x0 is None:
if verbose:
print('Optimization: set x0 as zeros')
if self.cen is not None:
x0 = self.cen
else:
x0 = np.zeros(self.prob.dim)
self.create_directory()
if verbose:
print('CMA-ES: cen = ', self.cen)
print('CMA-ES: rng = ', self.rng)
print('Optimization begins at ', str(datetime.now()))
#print('normalized_center = ', self.normalize(x0))
# for k, v in self.options.iteritems():
# print(k, '\t', v)
res = cma.fmin(self.eval_f,
self.normalize(x0),
sigma,
options=self.options)
if verbose:
print('Optimization ends at ', str(datetime.now()))
print('Total times = %.2fs' % (time.time() - begin))
ret = scipy.optimize.OptimizeResult()
ret['y'] = res[0]
ret['x'] = self.unnormalize(res[0])
ret['fun'] = res[1]
# assert(np.allclose(res[1], self.prob.f(ret['x'])))
ret['nfev'] = self.eval_counter
# ret['jac'] = self.eval_g(ret['x'])
ret['message'] = 'Optimization terminated successfully.'
ret['status'] = 0
ret['success'] = True
return ret
class CMASolverPar(CMASolver):
def solve(self, x0=None, sigma=1.0):
verbose = (self.options['verb_disp'] > 0)
begin = time.time()
if verbose:
print('Optimization method = CMA-ES')
if x0 is None:
if verbose:
print('Optimization: set x0 as zeros')
if self.cen is not None:
x0 = self.cen
else:
x0 = np.zeros(self.prob.dim)
self.create_directory()
if verbose:
print('CMA-ES: cen = ', self.cen)
print('CMA-ES: rng = ', self.rng)
print('Optimization begins at ', str(datetime.now()))
#print('normalized_center = ', self.normalize(x0))
# for k, v in self.options.iteritems():
# print(k, '\t', v)
res = cma.fmin(None,
self.normalize(x0),
sigma,
parallel_objective=self.eval_f,
options=self.options)
if verbose:
print('Optimization ends at ', str(datetime.now()))
print('Total times = %.2fs' % (time.time() - begin))
ret = scipy.optimize.OptimizeResult()
ret['y'] = res[0]
ret['x'] = self.unnormalize(res[0])
ret['fun'] = res[1]
# assert(np.allclose(res[1], self.prob.f(ret['x'])))
ret['nfev'] = self.eval_counter
# ret['jac'] = self.eval_g(ret['x'])
ret['message'] = 'Optimization terminated successfully.'
ret['status'] = 0
ret['success'] = True
return ret
if __name__ == '__main__':
import optimization.test_problems
import numpy as np
# prob = test_problems.QuadProb()
prob = optimization.test_problems.Rosen()
x0 = np.random.rand(prob.dim) - 0.5
solver = CMASolver(prob)
res = solver.solve(x0)
print(res)
| 31.955801
| 75
| 0.529737
|
7950a08760ea20c4d125ff75975becb9ed5efaa3
| 2,096
|
py
|
Python
|
plugins/hw_wallet/plugin.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | null | null | null |
plugins/hw_wallet/plugin.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | 1
|
2021-11-15T17:47:29.000Z
|
2021-11-15T17:47:29.000Z
|
plugins/hw_wallet/plugin.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | 1
|
2017-11-13T23:19:46.000Z
|
2017-11-13T23:19:46.000Z
|
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from electrum_rby.plugins import BasePlugin, hook
from electrum_rby.i18n import _
class HW_PluginBase(BasePlugin):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.device = self.keystore_class.device
self.keystore_class.plugin = self
def is_enabled(self):
return self.libraries_available
def device_manager(self):
return self.parent.device_manager
@hook
def close_wallet(self, wallet):
for keystore in wallet.get_keystores():
if isinstance(keystore, self.keystore_class):
self.device_manager().unpair_xpub(keystore.xpub)
| 38.814815
| 73
| 0.739504
|
7950a101c93e726b033e2b08c35d6b12c017e302
| 239
|
py
|
Python
|
admin/admin_goods/serializers.py
|
Tomtao626/dailyfresh
|
337e29ded8f9be438d69c97ce86954e58672bd62
|
[
"Apache-2.0"
] | null | null | null |
admin/admin_goods/serializers.py
|
Tomtao626/dailyfresh
|
337e29ded8f9be438d69c97ce86954e58672bd62
|
[
"Apache-2.0"
] | null | null | null |
admin/admin_goods/serializers.py
|
Tomtao626/dailyfresh
|
337e29ded8f9be438d69c97ce86954e58672bd62
|
[
"Apache-2.0"
] | null | null | null |
from apps.goods.models import GoodsType
from rest_framework import serializers
class GoodsTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = GoodsType
fields = ['id', 'name', 'logo', 'image']
| 26.555556
| 66
| 0.728033
|
7950a11b90561ea96e7e153e1e052747b81d84bf
| 1,210
|
py
|
Python
|
Cut.py
|
MingjunMa/PixEditor
|
1d32c06e1689e1ed7e8bd0f9d30e50bed4e24562
|
[
"MIT"
] | 7
|
2020-05-21T03:44:25.000Z
|
2021-11-19T04:44:13.000Z
|
Cut.py
|
MingjunMa/PixEditor
|
1d32c06e1689e1ed7e8bd0f9d30e50bed4e24562
|
[
"MIT"
] | 5
|
2021-02-02T22:44:40.000Z
|
2022-03-12T00:31:15.000Z
|
Cut.py
|
MingjunMa/PixEditor
|
1d32c06e1689e1ed7e8bd0f9d30e50bed4e24562
|
[
"MIT"
] | 1
|
2020-05-21T03:59:49.000Z
|
2020-05-21T03:59:49.000Z
|
import cv2
global img
global point1, point2
def on_mouse(event, x, y, flags, param):
global img, point1, point2, path
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
cv2.circle(img2, point1, 10, (0, 255, 0), 5)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (255, 0, 0), 5)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), 5)
cv2.imshow('image', img2)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
cut_img = img[min_y:min_y + height, min_x:min_x + width]
cv2.imwrite(path, cut_img)
def main(get_path):
global img, path
path = get_path
img = cv2.imread(path)
cv2.namedWindow('image')
cv2.setMouseCallback('image', on_mouse)
cv2.imshow('image', img)
cv2.waitKey(0)
if __name__ == '__main__':
main()
| 30.25
| 86
| 0.572727
|
7950a17c304b9ac0035e17e045389155e5d1da27
| 2,269
|
py
|
Python
|
src/Filtering/AnisotropicSmoothing/ComputeGradientAnisotropicDiffusion/Code.py
|
kian-weimer/ITKSphinxExamples
|
ff614cbba28831d1bf2a0cfaa5a2f1949a627c1b
|
[
"Apache-2.0"
] | 34
|
2015-01-26T19:38:36.000Z
|
2021-02-04T02:15:41.000Z
|
src/Filtering/AnisotropicSmoothing/ComputeGradientAnisotropicDiffusion/Code.py
|
kian-weimer/ITKSphinxExamples
|
ff614cbba28831d1bf2a0cfaa5a2f1949a627c1b
|
[
"Apache-2.0"
] | 142
|
2016-01-22T15:59:25.000Z
|
2021-03-17T15:11:19.000Z
|
src/Filtering/AnisotropicSmoothing/ComputeGradientAnisotropicDiffusion/Code.py
|
kian-weimer/ITKSphinxExamples
|
ff614cbba28831d1bf2a0cfaa5a2f1949a627c1b
|
[
"Apache-2.0"
] | 32
|
2015-01-26T19:38:41.000Z
|
2021-03-17T15:28:14.000Z
|
#!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itk
import argparse
parser = argparse.ArgumentParser(description="Compute Gradient Anisotropic Diffusion.")
parser.add_argument("input_image")
parser.add_argument("output_image")
parser.add_argument("number_of_iterations", type=int)
parser.add_argument("time_step", type=float)
parser.add_argument("conductance", type=float)
args = parser.parse_args()
InputPixelType = itk.F
OutputPixelType = itk.UC
Dimension = 2
InputImageType = itk.Image[InputPixelType, Dimension]
OutputImageType = itk.Image[OutputPixelType, Dimension]
ReaderType = itk.ImageFileReader[InputImageType]
reader = ReaderType.New()
reader.SetFileName(args.input_image)
FilterType = itk.GradientAnisotropicDiffusionImageFilter[InputImageType, InputImageType]
gradientAnisotropicDiffusionFilter = FilterType.New()
gradientAnisotropicDiffusionFilter.SetInput(reader.GetOutput())
gradientAnisotropicDiffusionFilter.SetNumberOfIterations(args.number_of_iterations)
gradientAnisotropicDiffusionFilter.SetTimeStep(args.time_step)
gradientAnisotropicDiffusionFilter.SetConductanceParameter(args.conductance)
RescaleFilterType = itk.RescaleIntensityImageFilter[InputImageType, OutputImageType]
rescaler = RescaleFilterType.New()
rescaler.SetInput(gradientAnisotropicDiffusionFilter.GetOutput())
outputPixelTypeMinimum = itk.NumericTraits[OutputPixelType].min()
outputPixelTypeMaximum = itk.NumericTraits[OutputPixelType].max()
rescaler.SetOutputMinimum(outputPixelTypeMinimum)
rescaler.SetOutputMaximum(outputPixelTypeMaximum)
WriterType = itk.ImageFileWriter[OutputImageType]
writer = WriterType.New()
writer.SetFileName(args.output_image)
writer.SetInput(rescaler.GetOutput())
writer.Update()
| 36.015873
| 88
| 0.827237
|
7950a32d1c5ead779c35f2687033c45650fe6a98
| 1,247
|
py
|
Python
|
pynws/backports/enum.py
|
lymanepp/pynws
|
92eff6484f680217d90acca3f5711d71dbf0ab3e
|
[
"MIT"
] | 1
|
2022-02-04T19:55:00.000Z
|
2022-02-04T19:55:00.000Z
|
pynws/backports/enum.py
|
lymanepp/pynws
|
92eff6484f680217d90acca3f5711d71dbf0ab3e
|
[
"MIT"
] | null | null | null |
pynws/backports/enum.py
|
lymanepp/pynws
|
92eff6484f680217d90acca3f5711d71dbf0ab3e
|
[
"MIT"
] | null | null | null |
"""Python 3.11 Enum backports from https://github.com/home-assistant/core/tree/dev/homeassistant/backports"""
from __future__ import annotations
from enum import Enum
from typing import Any, List, TypeVar
_StrEnumT = TypeVar("_StrEnumT", bound="StrEnum")
class StrEnum(str, Enum):
"""Partial backport of Python 3.11's StrEnum for our basic use cases."""
def __new__(
cls: type[_StrEnumT], value: str, *args: Any, **kwargs: Any
) -> _StrEnumT:
"""Create a new StrEnum instance."""
if not isinstance(value, str):
raise TypeError(f"{value!r} is not a string")
return super().__new__(cls, value, *args, **kwargs)
def __str__(self) -> str:
"""Return self.value."""
return str(self.value)
@staticmethod
def _generate_next_value_( # pylint: disable=arguments-differ # https://github.com/PyCQA/pylint/issues/5371
name: str, start: int, count: int, last_values: List[Any]
) -> Any:
"""
Make `auto()` explicitly unsupported.
We may revisit this when it's very clear that Python 3.11's
`StrEnum.auto()` behavior will no longer change.
"""
raise TypeError("auto() is not supported by this implementation")
| 34.638889
| 112
| 0.647153
|
7950a3350f66ff4c97a3cb183018065649d9e327
| 2,834
|
py
|
Python
|
user_mgmt/cache.py
|
WIPACrepo/keycloak-rest-services
|
2661b0db2dd320bdb8eefc62c805188bec52ecc7
|
[
"MIT"
] | 1
|
2021-09-23T14:39:36.000Z
|
2021-09-23T14:39:36.000Z
|
user_mgmt/cache.py
|
WIPACrepo/keycloak-rest-services
|
2661b0db2dd320bdb8eefc62c805188bec52ecc7
|
[
"MIT"
] | 38
|
2020-08-31T22:53:09.000Z
|
2022-03-28T20:55:39.000Z
|
user_mgmt/cache.py
|
WIPACrepo/keycloak-rest-services
|
2661b0db2dd320bdb8eefc62c805188bec52ecc7
|
[
"MIT"
] | null | null | null |
import logging
from cachetools import TTLCache
from krs.groups import list_groups, group_info, group_info_by_id, get_group_membership_by_id
from krs.institutions import list_insts
logger = logging.getLogger('cache')
class KeycloakGroupCache:
"""
A TTL cache for Keycloak group requests.
Args:
ttl (int): number of seconds to keep items in cache (default: 3600)
krs_client (RestClient): rest client for talking to Keycloak
"""
def __init__(self, ttl=3600, krs_client=None):
self._ttl = ttl
self._krs_client = krs_client
self._group_ids = TTLCache(1000000, ttl*24) # group ids (shouldn't change)
self._group_info = TTLCache(10000000, ttl*24) # group info by id (shouldn't change)
self._group_list = TTLCache(10000000, ttl/60) # group_path list for all groups
self._group_members = TTLCache(10000000, ttl) # group memberships
async def list_groups(self):
if 'groups' not in self._group_list:
logger.info('list_groups() is not cached')
ret = await list_groups(rest_client=self._krs_client)
self._group_list['groups'] = ret
return self._group_list['groups']
async def list_institutions(self):
if 'inst' not in self._group_list:
logger.info('list_institutions() is not cached')
ret = await list_insts(rest_client=self._krs_client)
self._group_list['inst'] = ret
return self._group_list['inst']
async def get_group_id(self, group_path):
if group_path not in self._group_ids:
logger.info(f'get_group_id({group_path}) is not cached')
ret = await group_info(group_path, rest_client=self._krs_client)
self._group_ids[group_path] = ret['id']
return self._group_ids[group_path]
async def get_group_info_from_id(self, group_id):
if group_id not in self._group_info:
logger.info(f'get_group_info_from_id({group_id}) is not cached')
ret = await group_info_by_id(group_id, rest_client=self._krs_client)
self._group_info[group_id] = ret
return self._group_info[group_id]
async def get_members(self, group_path):
if group_path not in self._group_members:
logger.info(f'get_members({group_path}) is not cached')
group_id = await self.get_group_id(group_path)
ret = await get_group_membership_by_id(group_id, rest_client=self._krs_client)
self._group_members[group_path] = ret
return self._group_members[group_path]
def invalidate(self, path=None):
if not path:
self._group_members.clear()
else:
for k in list(self._group_members):
if k.startswith(path):
del self._group_members[k]
| 39.915493
| 92
| 0.664785
|
7950a3c74e3f614799b2f8aebbdb2781074127af
| 3,037
|
py
|
Python
|
with_redis.py
|
tistaharahap/oauth1-provider
|
c7059bce68734744d0aa3b83ecb218865a5c1341
|
[
"MIT"
] | 1
|
2017-06-26T07:36:03.000Z
|
2017-06-26T07:36:03.000Z
|
with_redis.py
|
tistaharahap/oauth1-provider
|
c7059bce68734744d0aa3b83ecb218865a5c1341
|
[
"MIT"
] | null | null | null |
with_redis.py
|
tistaharahap/oauth1-provider
|
c7059bce68734744d0aa3b83ecb218865a5c1341
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify
from oauth1.authorize import Oauth1
from oauth1.errors.oauth import Oauth1Errors
from oauth1.store.nosql import Oauth1StoreRedis
from oauth1.store.base import Oauth1StoreBase
BASE_URL = "http://localhost:5000/"
app = Flask(__name__)
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://root:@127.0.0.1:3306/oauth" # Change this to a valid URI
app.auth = None
app.config['REDIS_HOST'] = '127.0.0.1'
app.config['REDIS_PORT'] = 6379
app.config['REDIS_DB'] = 0
app.config['REDIS_NS'] = 'oauth1-provider-nosql'
class RedisProvider(Oauth1):
def __init__(self):
store = Oauth1StoreRedis(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'],
db=app.config['REDIS_DB'], namespace=app.config['REDIS_NS'])
super(RedisProvider, self).__init__(base_url=BASE_URL, store=store)
def _verify_xauth_credentials(self, username, password):
return username == 'username' and password == 'password'
@app.before_first_request
def after_run():
global app
app.auth = RedisProvider()
oauth_app = app.auth.store.create_new_consumer_app(app_name='Test App %d' % Oauth1StoreBase.get_unix_time(),
app_desc='Just Testing', app_platform='CLI', app_url=BASE_URL)
print "OAuth App: ", oauth_app
tokens = app.auth.store.create_new_consumer_tokens(app_id=oauth_app['app_id'])
print "OAuth Tokens: ", tokens
@app.route('/oauth/', methods=['GET', 'POST'])
@app.route('/oauth/<action>', methods=['POST'])
def oauth(action=None):
if app.auth is None:
return Oauth1Errors.server_error(msg='The auth object is not initialized properly')
if action == 'access_token':
cons_check = app.auth.authorize_consumer()
if isinstance(cons_check, str):
return Oauth1Errors.bad_request(cons_check)
authorized = app.auth.authorize_request(uri='oauth/access_token')
if isinstance(authorized, str):
return Oauth1Errors.unauthorized(authorized)
# Check username/password from XAuth
x_check = app.auth.authorize_xauth()
if isinstance(x_check, str):
return Oauth1Errors.bad_request(x_check)
return jsonify(status='ok')
else:
return Oauth1Errors.not_found('There is no valid resource here')
@app.route('/user/<user_uri>', methods=['GET', 'POST'])
def user(user_uri=None):
if not user_uri:
return Oauth1Errors.bad_request('You must supply a User URI')
else:
cons_check = app.auth.authorize_consumer()
if isinstance(cons_check, str):
return Oauth1Errors.forbidden(cons_check)
authorized = app.auth.authorize_request(uri='oauth/access_token')
if isinstance(authorized, str):
return Oauth1Errors.unauthorized(authorized)
return jsonify(uri=user_uri)
@app.errorhandler(404)
def not_found(error):
return Oauth1Errors.not_found()
if __name__ == "__main__":
app.run()
| 34.123596
| 117
| 0.680606
|
7950a53d9d1421fee329f8e92a5976df13a6df80
| 1,543
|
py
|
Python
|
pyqtgraph/widgets/PathButton.py
|
cycomanic/pyqtgraph
|
108b115aa56c5fc8fd7a398f26e705b8f8717c0f
|
[
"MIT"
] | 5
|
2019-03-08T05:30:20.000Z
|
2021-05-15T07:33:50.000Z
|
pyqtgraph/widgets/PathButton.py
|
cycomanic/pyqtgraph
|
108b115aa56c5fc8fd7a398f26e705b8f8717c0f
|
[
"MIT"
] | 1
|
2019-01-14T09:00:21.000Z
|
2019-01-14T09:00:21.000Z
|
pyqtgraph/widgets/PathButton.py
|
cycomanic/pyqtgraph
|
108b115aa56c5fc8fd7a398f26e705b8f8717c0f
|
[
"MIT"
] | 1
|
2022-02-01T12:45:29.000Z
|
2022-02-01T12:45:29.000Z
|
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
__all__ = ['PathButton']
class PathButton(QtGui.QPushButton):
"""Simple PushButton extension which paints a QPainterPath on its face"""
def __init__(self, parent=None, path=None, pen='default', brush=None, size=(30,30)):
QtGui.QPushButton.__init__(self, parent)
self.path = None
if pen == 'default':
pen = 'k'
self.setPen(pen)
self.setBrush(brush)
if path is not None:
self.setPath(path)
if size is not None:
self.setFixedWidth(size[0])
self.setFixedHeight(size[1])
def setBrush(self, brush):
self.brush = pg.mkBrush(brush)
def setPen(self, pen):
self.pen = pg.mkPen(pen)
def setPath(self, path):
self.path = path
self.update()
def paintEvent(self, ev):
QtGui.QPushButton.paintEvent(self, ev)
margin = 7
geom = QtCore.QRectF(0, 0, self.width(), self.height()).adjusted(margin, margin, -margin, -margin)
rect = self.path.boundingRect()
scale = min(geom.width() / float(rect.width()), geom.height() / float(rect.height()))
p = QtGui.QPainter(self)
p.setRenderHint(p.Antialiasing)
p.translate(geom.center())
p.scale(scale, scale)
p.translate(-rect.center())
p.setPen(self.pen)
p.setBrush(self.brush)
p.drawPath(self.path)
p.end()
| 30.86
| 106
| 0.567725
|
7950a63a417d930e02f64bdebf0c972cc30a67c2
| 732
|
py
|
Python
|
Server/Tornado/TornadoBoilerplate/app.py
|
dltech-xyz/PythonSkillTree
|
88fad516b22811205a49b6438d48a6535e8a5441
|
[
"Apache-2.0"
] | 26
|
2016-07-11T00:54:39.000Z
|
2022-01-11T13:41:47.000Z
|
Server/Tornado/TornadoBoilerplate/app.py
|
w4n9H/PythonSkillTree
|
88fad516b22811205a49b6438d48a6535e8a5441
|
[
"Apache-2.0"
] | null | null | null |
Server/Tornado/TornadoBoilerplate/app.py
|
w4n9H/PythonSkillTree
|
88fad516b22811205a49b6438d48a6535e8a5441
|
[
"Apache-2.0"
] | 7
|
2016-07-14T08:02:37.000Z
|
2020-06-28T15:27:21.000Z
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mango
@contact: w4n9@sina.com
@create: 16/6/30
hail hydra!
"""
__author__ = "mango"
__version__ = "0.1"
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.options import options
from settings import settings
from urls import url_patterns
# noinspection PyAbstractClass
class TornadoBoilerplate(tornado.web.Application):
def __init__(self):
tornado.web.Application.__init__(self, url_patterns, **settings)
def main():
app = TornadoBoilerplate()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| 19.263158
| 72
| 0.728142
|
7950a6b1fa492222496f13336452232a22ff54cc
| 28,975
|
py
|
Python
|
tests/python/pants_test/backend/jvm/tasks/test_classpath_products.py
|
AllClearID/pants
|
c4fdf00a3bdf9f26f876e85c46909d0729f7132c
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/jvm/tasks/test_classpath_products.py
|
AllClearID/pants
|
c4fdf00a3bdf9f26f876e85c46909d0729f7132c
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/jvm/tasks/test_classpath_products.py
|
AllClearID/pants
|
c4fdf00a3bdf9f26f876e85c46909d0729f7132c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.classpath_products import (ArtifactClasspathEntry, ClasspathEntry,
ClasspathProducts,
MissingClasspathEntryError)
from pants.base.exceptions import TaskError
from pants.build_graph.target import Target
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import relativize_paths
from pants_test.subsystem.subsystem_util import init_subsystem
from pants_test.test_base import TestBase
from pants_test.testutils.file_test_util import check_file_content, contains_exact_files
def resolved_example_jar_at(path, org='com.example', name='lib'):
return ResolvedJar(M2Coordinate(org=org, name=name),
cache_path=os.path.join('resolver-cache-dir', path),
pants_path=path)
class ClasspathProductsTest(TestBase):
def setUp(self):
super(ClasspathProductsTest, self).setUp()
init_subsystem(Target.Arguments)
def test_single_classpath_element_no_excludes(self):
a = self.make_target('a', JvmTarget)
classpath_product = ClasspathProducts(self.pants_workdir)
path = self.path('jar/path')
self.add_jar_classpath_element_for_path(classpath_product, a, path)
self.assertEqual([('default', path)], classpath_product.get_for_target(a))
def test_copy(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product,
a,
self._example_jar_path())
classpath_product.add_for_target(a, [('default', self.path('a/path'))])
copied = classpath_product.copy()
a_closure = a.closure(bfs=True)
self.assertEqual([('default', resolved_jar.pants_path), ('default', self.path('a/path'))],
classpath_product.get_for_targets(a_closure))
self.assertEqual([('default', resolved_jar.pants_path), ('default', self.path('a/path'))],
copied.get_for_targets(a_closure))
self.add_excludes_for_targets(copied, b, a)
self.assertEqual([('default', resolved_jar.pants_path), ('default', self.path('a/path'))],
classpath_product.get_for_targets(a_closure))
self.assertEqual([('default', self.path('a/path'))],
copied.get_for_targets(a_closure))
copied.add_for_target(b, [('default', self.path('b/path'))])
self.assertEqual([('default', resolved_jar.pants_path), ('default', self.path('a/path'))],
classpath_product.get_for_targets(a_closure))
self.assertEqual([('default', self.path('a/path')), ('default', self.path('b/path'))],
copied.get_for_targets(a_closure))
def test_fails_if_paths_outside_buildroot(self):
a = self.make_target('a', JvmTarget)
classpath_product = ClasspathProducts(self.pants_workdir)
with self.assertRaises(TaskError) as cm:
classpath_product.add_for_target(a, [('default', '/dev/null')])
self.assertEqual(
'Classpath entry /dev/null for target a:a is located outside the working directory "{}".'.format(self.pants_workdir),
str(cm.exception))
def test_fails_if_jar_paths_outside_buildroot(self):
a = self.make_target('a', JvmTarget)
classpath_product = ClasspathProducts(self.pants_workdir)
with self.assertRaises(TaskError) as cm:
classpath_product.add_jars_for_targets([a], 'default', [(resolved_example_jar_at('/dev/null'))])
self.assertEqual(
'Classpath entry /dev/null for target a:a is located outside the working directory "{}".'.format(self.pants_workdir),
str(cm.exception))
def test_excluded_classpath_element(self):
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_transitive_dependencies_excluded_classpath_element(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_jar_classpath_element_for_path(classpath_product, a, self._example_jar_path())
self.add_excludes_for_targets(classpath_product, b, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_intransitive_dependencies_excluded_classpath_element(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
classpath_product.add_for_target(a, [('default', example_jar_path)])
classpath_product.add_excludes_for_targets([a, b])
intransitive_classpath = classpath_product.get_for_target(a)
self.assertEqual([('default', example_jar_path)], intransitive_classpath)
def test_parent_exclude_excludes_dependency_jar(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, dependencies=[b], excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
self.add_jar_classpath_element_for_path(classpath_product, b, example_jar_path)
self.add_excludes_for_targets(classpath_product, b, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_exclude_leaves_other_jars_unaffected(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
com_example_jar_path = self._example_jar_path()
org_example_jar_path = self.path('ivy/jars/org.example/lib/123.4.jar')
classpath_product.add_jars_for_targets([a], 'default',
[resolved_example_jar_at(com_example_jar_path),
resolved_example_jar_at(org_example_jar_path,
org='org.example')])
self.add_excludes_for_targets(classpath_product, b)
classpath = classpath_product.get_for_target(a)
self.assertEqual([('default', org_example_jar_path)], classpath)
def test_parent_excludes_ignored_for_resolving_child_target(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, dependencies=[b], excludes=[Exclude('com.example', 'lib')])
example_jar_path = self._example_jar_path()
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_jar_classpath_element_for_path(classpath_product, b, example_jar_path)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(b)
self.assertEqual([('default', example_jar_path)], classpath)
def test_excludes_used_across_targets(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_example_jar_classpath_element_for(classpath_product, b)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_excludes_similar_org_name(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.exam')], dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_example_jar_classpath_element_for(classpath_product, b)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_targets(a.closure(bfs=True))
self.assertEqual([('default', self._example_jar_path())], classpath)
def test_excludes_org_name(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example')], dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_example_jar_classpath_element_for(classpath_product, b)
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_target(a)
self.assertEqual([], classpath)
def test_jar_provided_by_transitive_target_excluded(self):
provider = self.make_target('provider', ExportableJvmLibrary,
provides=Artifact('com.example', 'lib', Repository()))
consumer = self.make_target('consumer', JvmTarget)
root = self.make_target('root', JvmTarget, dependencies=[provider, consumer])
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_example_jar_classpath_element_for(classpath_product, consumer)
self.add_excludes_for_targets(classpath_product, consumer, provider, root)
classpath = classpath_product.get_for_target(root)
self.assertEqual([], classpath)
def test_jar_provided_exclude_with_similar_name(self):
# note exclude 'jars/com.example/l' should not match jars/com.example/lib/jars/123.4.jar
provider = self.make_target('provider', ExportableJvmLibrary,
provides=Artifact('com.example', 'li', Repository()))
root = self.make_target('root', JvmTarget, dependencies=[provider])
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_example_jar_classpath_element_for(classpath_product, root)
self.add_excludes_for_targets(classpath_product, provider, root)
classpath = classpath_product.get_for_target(root)
self.assertEqual([('default', self._example_jar_path())], classpath)
def test_jar_provided_exclude_with_similar_org(self):
provider = self.make_target('provider', ExportableJvmLibrary,
provides=Artifact('com.example.lib', '', Repository()))
root = self.make_target('root', JvmTarget, dependencies=[provider])
classpath_product = ClasspathProducts(self.pants_workdir)
self.add_example_jar_classpath_element_for(classpath_product, root)
self.add_excludes_for_targets(classpath_product, provider, root)
classpath = classpath_product.get_for_target(root)
self.assertEqual([('default', self._example_jar_path())], classpath)
def test_jar_in_classpath_not_a_resolved_jar_ignored_by_excludes(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example')], dependencies=[b])
example_jar_path = self._example_jar_path()
classpath_product = ClasspathProducts(self.pants_workdir)
classpath_product.add_for_target(b, [('default', example_jar_path)])
self.add_excludes_for_targets(classpath_product, a)
classpath = classpath_product.get_for_targets(a.closure(bfs=True))
self.assertEqual([('default', example_jar_path)], classpath)
def test_jar_missing_pants_path_fails_adding(self):
b = self.make_target('b', JvmTarget)
classpath_products = ClasspathProducts(self.pants_workdir)
with self.assertRaises(TaskError) as cm:
classpath_products.add_jars_for_targets([b], 'default',
[ResolvedJar(M2Coordinate(org='org', name='name'),
cache_path='somewhere',
pants_path=None)])
self.assertEqual(
'Jar: org:name: has no specified path.',
str(cm.exception))
def test_get_product_target_mappings_for_targets_respect_excludes(self):
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
self.add_excludes_for_targets(classpath_product, a)
classpath_by_product = classpath_product.get_product_target_mappings_for_targets([a])
self.assertEqual([], classpath_by_product)
def test_get_product_target_mappings_for_targets_ignore_excludes(self):
a = self.make_target('a', JvmTarget, excludes=[Exclude('com.example', 'lib')])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path,
conf='fred-conf')
self.add_excludes_for_targets(classpath_product, a)
classpath_target_tuples = classpath_product.get_product_target_mappings_for_targets([a], respect_excludes=False)
expected_entry = ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path)
self.assertEqual([(('fred-conf', expected_entry), a)], classpath_target_tuples)
def test_get_product_target_mappings_for_targets_transitive(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
classpath_product.add_for_target(b, [('default', self.path('b/loose/classes/dir'))])
classpath_product.add_for_target(a, [('default', self.path('a/loose/classes/dir')),
('default', self.path('an/internally/generated.jar'))])
classpath_target_tuples = classpath_product.get_product_target_mappings_for_targets(a.closure(bfs=True))
self.assertEqual([
(('default', ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path)), a),
(('default', ClasspathEntry(self.path('a/loose/classes/dir'))), a),
(('default', ClasspathEntry(self.path('an/internally/generated.jar'))), a),
(('default', ClasspathEntry(self.path('b/loose/classes/dir'))), b)],
classpath_target_tuples)
def test_get_product_target_mappings_for_targets_intransitive(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
classpath_product.add_for_target(b, [('default', self.path('b/loose/classes/dir'))])
classpath_product.add_for_target(a, [('default', self.path('a/loose/classes/dir')),
('default', self.path('an/internally/generated.jar'))])
classpath_target_tuples = classpath_product.get_product_target_mappings_for_targets([a])
self.assertEqual([
(('default', ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path)), a),
(('default', ClasspathEntry(self.path('a/loose/classes/dir'))), a),
(('default', ClasspathEntry(self.path('an/internally/generated.jar'))), a)],
classpath_target_tuples)
def test_get_classpath_entries_for_targets_dedup(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
# resolved_jar is added for both a and b but should return only as one classpath entry
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path,
conf='fred-conf')
self.add_jar_classpath_element_for_path(classpath_product, b, example_jar_path,
conf='fred-conf')
classpath_target_tuples = classpath_product.get_classpath_entries_for_targets([a], respect_excludes=False)
expected_entry = ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path)
self.assertEqual([('fred-conf', expected_entry)], classpath_target_tuples)
def test_get_artifact_classpath_entries_for_targets(self):
b = self.make_target('b', JvmTarget, excludes=[Exclude('com.example', 'lib')])
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
example_jar_path = self._example_jar_path()
resolved_jar = self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
# These non-artifact classpath entries should be ignored.
classpath_product.add_for_target(b, [('default', self.path('b/loose/classes/dir'))])
classpath_product.add_for_target(a, [('default', self.path('a/loose/classes/dir')),
('default', self.path('an/internally/generated.jar'))])
classpath = classpath_product.get_artifact_classpath_entries_for_targets([a])
self.assertEqual([('default', ArtifactClasspathEntry(example_jar_path,
resolved_jar.coordinate,
resolved_jar.cache_path))],
classpath)
def test_get_internal_classpath_entries_for_targets(self):
b = self.make_target('b', JvmTarget)
a = self.make_target('a', JvmTarget, dependencies=[b])
classpath_product = ClasspathProducts(self.pants_workdir)
# This artifact classpath entry should be ignored.
example_jar_path = self._example_jar_path()
self.add_jar_classpath_element_for_path(classpath_product, a, example_jar_path)
classpath_product.add_for_target(b, [('default', self.path('b/loose/classes/dir'))])
classpath_product.add_for_target(a, [('default', self.path('a/loose/classes/dir')),
('default', self.path('an/internally/generated.jar'))])
classpath = classpath_product.get_internal_classpath_entries_for_targets(a.closure(bfs=True))
self.assertEqual([('default', ClasspathEntry(self.path('a/loose/classes/dir'))),
('default', ClasspathEntry(self.path('an/internally/generated.jar'))),
('default', ClasspathEntry(self.path('b/loose/classes/dir')))],
classpath)
def test_create_canonical_classpath(self):
a = self.make_target('a/b', JvmTarget)
jar_path = 'ivy/jars/org.x/lib/x-1.0.jar'
classpath_products = ClasspathProducts(self.pants_workdir)
resolved_jar = ResolvedJar(M2Coordinate(org='org', name='x', rev='1.0'),
cache_path='somewhere',
pants_path=self._create_file(jar_path))
classpath_products.add_for_target(a, [('default', self._create_file('a.jar')),
('default', self._create_file('resources'))])
classpath_products.add_jars_for_targets([a], 'default', [resolved_jar])
with temporary_dir() as base_dir:
self._test_canonical_classpath_helper(classpath_products,
[a],
base_dir,
[
'a.b.b-0.jar',
'a.b.b-1',
'a.b.b-2.jar',
],
{
'a.b.b-classpath.txt':
'{}/a.jar:{}/resources:{}/{}\n'
.format(self.pants_workdir, self.pants_workdir,
self.pants_workdir, jar_path)
},
excludes={Exclude(org='org', name='y')})
# incrementally delete the resource dendendency
classpath_products = ClasspathProducts(self.pants_workdir)
classpath_products.add_for_target(a, [('default', self._create_file('a.jar'))])
self._test_canonical_classpath_helper(classpath_products,
[a],
base_dir,
[
'a.b.b-0.jar',
],
{
'a.b.b-classpath.txt':
'{}/a.jar\n'.format(self.pants_workdir)
})
# incrementally add another jar dependency
classpath_products = ClasspathProducts(self.pants_workdir)
classpath_products.add_for_target(a, [('default', self._create_file('a.jar')),
('default', self._create_file('b.jar'))])
self._test_canonical_classpath_helper(classpath_products,
[a],
base_dir,
[
'a.b.b-0.jar',
'a.b.b-1.jar'
],
{
'a.b.b-classpath.txt':
'{}/a.jar:{}/b.jar\n'.format(self.pants_workdir,
self.pants_workdir)
})
def test_create_canonical_classpath_with_broken_classpath(self):
"""Test exception is thrown when the jar file is missing."""
a = self.make_target('a/b', JvmTarget)
classpath_products = ClasspathProducts(self.pants_workdir)
jar_path = 'ivy/jars/org.x/lib/x-1.0.jar'
# this sets the path for the artifact without actually materializing it.
resolved_jar = ResolvedJar(M2Coordinate(org='org', name='x', rev='1.0'),
cache_path='somewhere',
pants_path=os.path.join(self.pants_workdir, jar_path))
classpath_products.add_jars_for_targets([a], 'default', [resolved_jar])
with temporary_dir() as base_dir:
with self.assertRaises(MissingClasspathEntryError):
self._test_canonical_classpath_helper(classpath_products,
[a],
base_dir,
[],
{})
def test_create_canonical_classpath_no_duplicate_entry(self):
"""Test no more than one symlink are created for the same classpath entry."""
jar_path = 'ivy/jars/org.x/lib/x-1.0.jar'
resolved_jar = ResolvedJar(M2Coordinate(org='org', name='x', rev='1.0'),
cache_path='somewhere',
pants_path=self._create_file(jar_path))
target_a = self.make_target('a', JvmTarget)
target_b = self.make_target('b', JvmTarget)
classpath_products = ClasspathProducts(self.pants_workdir)
# Both target a and target b depend on the same jar library
classpath_products.add_jars_for_targets([target_a], 'default', [resolved_jar])
classpath_products.add_jars_for_targets([target_b], 'default', [resolved_jar])
with temporary_dir() as base_dir:
# Only target a generates symlink to jar library, target b skips creating the
# symlink for the same jar library. Both targets' classpath.txt files should
# still contain the jar library.
self._test_canonical_classpath_helper(classpath_products,
[target_a, target_b],
base_dir,
['a.a-0.jar'],
{
'a.a-classpath.txt':
'{}/{}\n'.format(self.pants_workdir, jar_path),
'b.b-classpath.txt':
'{}/{}\n'.format(self.pants_workdir, jar_path),
})
def _test_canonical_classpath_helper(self,
classpath_products,
targets,
libs_dir,
expected_canonical_classpath,
expected_classspath_files,
excludes=None):
"""
Helper method to call `create_canonical_classpath` and verify generated canonical classpath.
:param ClasspathProducts classpath_products: Classpath products.
:param list targets: List of targets to generate canonical classpath from.
:param string libs_dir: Directory where canonical classpath are to be generated.
:param list expected_canonical_classpath: List of canonical classpath relative to a base directory.
:param dict expected_classspath_files: A dict of classpath.txt path to its expected content.
"""
canonical_classpath = ClasspathProducts.create_canonical_classpath(
classpath_products, targets, libs_dir, save_classpath_file=True,
internal_classpath_only=False, excludes=excludes)
# check canonical path returned
self.assertEquals(expected_canonical_classpath,
relativize_paths(canonical_classpath, libs_dir))
# check canonical path created contain the exact set of files, no more, no less
self.assertTrue(contains_exact_files(libs_dir,
expected_canonical_classpath +
expected_classspath_files.keys()))
# check the content of classpath.txt
for classpath_file in expected_classspath_files:
self.assertTrue(check_file_content(os.path.join(libs_dir, classpath_file),
expected_classspath_files[classpath_file]))
def _example_jar_path(self):
return self.path('ivy/jars/com.example/lib/jars/123.4.jar')
def path(self, p):
return os.path.join(self.pants_workdir, p)
def _create_file(self, p):
return self.create_workdir_file(p)
@staticmethod
def add_jar_classpath_element_for_path(classpath_product,
target,
example_jar_path,
conf=None):
resolved_jar = resolved_example_jar_at(example_jar_path)
classpath_product.add_jars_for_targets(targets=[target],
conf=conf or 'default',
resolved_jars=[resolved_jar])
return resolved_jar
@staticmethod
def add_excludes_for_targets(classpath_product, *targets):
classpath_product.add_excludes_for_targets(targets)
def add_example_jar_classpath_element_for(self, classpath_product, target):
self.add_jar_classpath_element_for_path(classpath_product, target, self._example_jar_path())
| 49.785223
| 123
| 0.640483
|
7950a6ee7e394183068e5dc2c19ec6a1a8d8971d
| 6,134
|
py
|
Python
|
tests/utils/test_amazon_s3.py
|
sean-dingxu/sciwing
|
75eca1ea43be165eab20cf8bd81bbc19cecda74c
|
[
"MIT"
] | 50
|
2019-09-13T10:32:29.000Z
|
2022-02-14T16:52:53.000Z
|
tests/utils/test_amazon_s3.py
|
sean-dingxu/sciwing
|
75eca1ea43be165eab20cf8bd81bbc19cecda74c
|
[
"MIT"
] | 31
|
2019-09-03T11:06:03.000Z
|
2021-08-20T14:57:09.000Z
|
tests/utils/test_amazon_s3.py
|
sean-dingxu/sciwing
|
75eca1ea43be165eab20cf8bd81bbc19cecda74c
|
[
"MIT"
] | 9
|
2019-09-16T03:25:15.000Z
|
2021-05-11T10:28:25.000Z
|
import pytest
from sciwing.utils.amazon_s3 import S3Util
import sciwing.constants as constants
import os
import json
import pathlib
import shutil
PATHS = constants.PATHS
AWS_CRED_DIR = PATHS["AWS_CRED_DIR"]
TESTS_DIR = PATHS["TESTS_DIR"]
OUTPUT_DIR = PATHS["OUTPUT_DIR"]
WING_NUS_AWS_CREDENTIALS = os.path.join(AWS_CRED_DIR, "aws_s3_credentials.json")
@pytest.fixture
def setup_s3_util():
util = S3Util(aws_cred_config_json_filename=WING_NUS_AWS_CREDENTIALS)
return util
@pytest.fixture
def create_dummy_folder_in_test_folder():
dummy_folder_path = pathlib.Path(TESTS_DIR, "utils", "dummy_folder")
dummy_folder_path.mkdir(exist_ok=True)
dummy_file_path = dummy_folder_path.joinpath("dummy_file.txt")
with open(dummy_file_path, "w") as fp:
fp.write("dummy line")
return dummy_folder_path
@pytest.fixture
def create_dummy_file_in_test_folder():
dummy_file_path = pathlib.Path(TESTS_DIR, "utils", "dummy_file.txt")
with open(dummy_file_path, "w") as fp:
fp.write("dummy line \n")
return dummy_file_path
@pytest.mark.skipif(
not pathlib.Path(WING_NUS_AWS_CREDENTIALS).is_file(),
reason="No aws credentials file",
)
class TestS3Util:
def test_credentials_file_not_empty(self, setup_s3_util):
aws_util = setup_s3_util
config_filename = aws_util.aws_cred_config_json_filename
with open(config_filename, "r") as fp:
cred = json.load(fp)
assert cred["aws_access_key_id"] is not None
assert cred["aws_access_secret"] is not None
assert cred["region"] is not None
def test_credentials_not_empty(self, setup_s3_util):
aws_util = setup_s3_util
aws_credentials = aws_util.credentials
assert aws_credentials.access_key is not None
assert aws_credentials.access_secret is not None
assert aws_credentials.region is not None
assert aws_credentials.bucket_name is not None
def test_s3_connects_succesfully(self, setup_s3_util):
aws_util = setup_s3_util
try:
aws_util.get_client()
except:
pytest.fail("Failed to get s3 client")
def test_s3_resource_gets_successfully(self, setup_s3_util):
aws_util = setup_s3_util
try:
aws_util.get_resource()
except:
pytest.fail("Failed to get s3 resource")
def test_s3_bucket_names(self, setup_s3_util):
"""
Test whether s3 has expected buckets
"""
aws_util = setup_s3_util
client = aws_util.s3_client
bucket_names = []
for bucket_dict in client.list_buckets().get("Buckets"):
bucket_name = bucket_dict["Name"]
bucket_names.append(bucket_name)
assert "parsect-models" in bucket_names
def test_upload_file_doesnot_raise_error(
self, setup_s3_util, create_dummy_file_in_test_folder
):
aws_util = setup_s3_util
dummy_path = create_dummy_file_in_test_folder
aws_util.upload_file(filename=str(dummy_path), obj_name=dummy_path.name)
def test_upload_with_directory(
self, setup_s3_util, create_dummy_file_in_test_folder
):
aws_util = setup_s3_util
dummy_file_path = create_dummy_file_in_test_folder
aws_util.upload_file(str(dummy_file_path), f"dummy_folder/dummy_file.txt")
def test_upload_folder(self, setup_s3_util, create_dummy_folder_in_test_folder):
aws_util = setup_s3_util
dummy_folder = create_dummy_folder_in_test_folder
aws_util.upload_folder(dummy_folder, base_folder_name=dummy_folder)
def test_download_file(self, setup_s3_util):
util = setup_s3_util
output_dir_path = pathlib.Path(OUTPUT_DIR)
try:
util.download_file(
"dummy_file.txt", f"{str(output_dir_path)}/dummy_file.txt"
)
except:
pytest.fail(f"Failed to download file dummy_file.txt")
def test_download_folder(self, setup_s3_util):
util = setup_s3_util
try:
util.download_folder("dummy_folder")
except:
pytest.fail(f"Could not downlaod dummy_folder from s3")
@pytest.mark.skip
def test_download_debug_random(self, setup_s3_util):
"""Test whether a dummy model folder can be downloaded"""
util = setup_s3_util
output_dir_path = pathlib.Path(OUTPUT_DIR)
parsect_bow_random_debug_folder = output_dir_path.joinpath(
"debug_parsect_bow_random_emb_lc"
)
if parsect_bow_random_debug_folder.is_dir():
shutil.rmtree(parsect_bow_random_debug_folder)
util.download_folder("debug_parsect_bow_random_emb_lc")
assert parsect_bow_random_debug_folder.is_dir()
assert parsect_bow_random_debug_folder.joinpath("config.json").is_file()
assert parsect_bow_random_debug_folder.joinpath("vocab.json").is_file()
def test_download_folder_if_not_exists_raises_error(self, setup_s3_util):
util = setup_s3_util
with pytest.raises(FileNotFoundError):
util.download_folder("debug_not_exists")
@pytest.mark.skip
def test_download_only_best_model(self, setup_s3_util):
"""Test whether a dummy model folder can be downloaded"""
util = setup_s3_util
output_dir_path = pathlib.Path(OUTPUT_DIR)
parsect_bow_random_debug_folder = output_dir_path.joinpath(
"debug_parsect_bow_random_emb_lc"
)
if parsect_bow_random_debug_folder.is_dir():
shutil.rmtree(parsect_bow_random_debug_folder)
util.download_folder(
"debug_parsect_bow_random_emb_lc", download_only_best_checkpoint=True
)
files = [
file
for file in parsect_bow_random_debug_folder.joinpath(
"checkpoints"
).iterdir()
]
assert len(files) == 1
assert files[0].name == "best_model.pt"
assert parsect_bow_random_debug_folder.joinpath("vocab.json").is_file()
assert parsect_bow_random_debug_folder.joinpath("config.json").is_file()
| 34.460674
| 84
| 0.696446
|
7950a7beb6c5fb6d22193b4238d4a4d4fd0767d3
| 1,866
|
py
|
Python
|
scripts/create_access_key.py
|
sinofseven/fukuoka-de-longi-bot-prepare
|
daf1e43e73ac033b45081401570c9f73ef66d953
|
[
"MIT"
] | null | null | null |
scripts/create_access_key.py
|
sinofseven/fukuoka-de-longi-bot-prepare
|
daf1e43e73ac033b45081401570c9f73ef66d953
|
[
"MIT"
] | null | null | null |
scripts/create_access_key.py
|
sinofseven/fukuoka-de-longi-bot-prepare
|
daf1e43e73ac033b45081401570c9f73ef66d953
|
[
"MIT"
] | null | null | null |
import sys
from typing import List, Tuple
from configparser import ConfigParser
import boto3
from botocore.client import BaseClient
from prompt_toolkit.shortcuts import confirm
def main():
iam = boto3.client("iam")
cfn = boto3.client("cloudformation")
username = get_username(cfn)
print(f"\nusername: {username}")
key_names = get_access_key_names(username, iam)
if len(key_names) > 0:
if not confirm_delete():
print("\nfinish\n")
return
delete_access_keys(username, key_names, iam)
access_key, secret_key = create_access_key(username, iam)
print(f"\nAccessKeyId: {access_key}")
print(f"SecretAccessKey: {secret_key}")
def get_username(cfn) -> str:
args = sys.argv
stack_name = args[1]
logical_id = args[2]
option = {
'StackName': stack_name,
'LogicalResourceId': logical_id
}
resp = cfn.describe_stack_resource(**option)
return resp['StackResourceDetail']['PhysicalResourceId']
def get_access_key_names(username: str, iam: BaseClient) -> List[str]:
option = {"UserName": username}
resp = iam.list_access_keys(**option)
return [x["AccessKeyId"] for x in resp.get("AccessKeyMetadata", [])]
def confirm_delete() -> bool:
print("\nThe user has access keys.")
return confirm("Do you want to delete the access keys?")
def delete_access_keys(username: str, access_key_names: List[str], iam: BaseClient):
for key_id in access_key_names:
option = {"UserName": username, "AccessKeyId": key_id}
iam.delete_access_key(**option)
def create_access_key(username: str, iam: BaseClient) -> Tuple[str, str]:
option = {"UserName": username}
resp = iam.create_access_key(**option)
return resp["AccessKey"]["AccessKeyId"], resp["AccessKey"]["SecretAccessKey"]
if __name__ == "__main__":
main()
| 28.707692
| 84
| 0.683816
|
7950a7fa49d0ba65c17b8498a4ea38d25ebe8a10
| 44,644
|
py
|
Python
|
modin/engines/ray/generic/io.py
|
xrmx/modin
|
7f19fa2200993a0b8f009b6b603afb4a4022cec8
|
[
"Apache-2.0"
] | null | null | null |
modin/engines/ray/generic/io.py
|
xrmx/modin
|
7f19fa2200993a0b8f009b6b603afb4a4022cec8
|
[
"Apache-2.0"
] | null | null | null |
modin/engines/ray/generic/io.py
|
xrmx/modin
|
7f19fa2200993a0b8f009b6b603afb4a4022cec8
|
[
"Apache-2.0"
] | null | null | null |
import pandas
from pandas.io.common import _infer_compression
from pandas.io.parsers import _validate_usecols_arg
from pandas.core.dtypes.cast import find_common_type
import inspect
import os
import py
import re
import sys
import numpy as np
import math
import warnings
from modin.error_message import ErrorMessage
from modin.engines.base.io import BaseIO
from modin.data_management.utils import compute_chunksize
from modin import __execution_engine__
if __execution_engine__ == "Ray":
import ray
PQ_INDEX_REGEX = re.compile("__index_level_\d+__") # noqa W605
S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
def file_exists(file_path):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
exists = False
try:
exists = s3fs.exists(file_path) or exists
except NoCredentialsError:
pass
s3fs = S3FS.S3FileSystem(anon=True)
return exists or s3fs.exists(file_path)
return os.path.exists(file_path)
def file_open(file_path, mode="rb", compression="infer"):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match:
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
try:
return s3fs.open(file_path)
except NoCredentialsError:
s3fs = S3FS.S3FileSystem(anon=True)
return s3fs.open(file_path)
elif compression == "gzip":
import gzip
return gzip.open(file_path, mode=mode)
elif compression == "bz2":
import bz2
return bz2.BZ2File(file_path, mode=mode)
elif compression == "xz":
import lzma
return lzma.LZMAFile(file_path, mode=mode)
elif compression == "zip":
import zipfile
zf = zipfile.ZipFile(file_path, mode=mode.replace("b", ""))
if zf.mode == "w":
return zf
elif zf.mode == "r":
zip_names = zf.namelist()
if len(zip_names) == 1:
f = zf.open(zip_names.pop())
return f
elif len(zip_names) == 0:
raise ValueError(
"Zero files found in ZIP file {}".format(file_path)
)
else:
raise ValueError(
"Multiple files found in ZIP file."
" Only one file per ZIP: {}".format(zip_names)
)
return open(file_path, mode=mode)
def file_size(f):
cur_pos = f.tell()
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(cur_pos, os.SEEK_SET)
return size
class RayIO(BaseIO):
frame_partition_cls = None
query_compiler_cls = None
frame_cls = None
# IMPORTANT NOTE
#
# Specify these in the child classes to extend the functionality from this class.
# The tasks must return a very specific set of objects in the correct order to be
# correct. The following must be returned from these remote tasks:
# 1.) A number of partitions equal to the `num_partitions` value. If there is not
# enough data to fill the number of partitions, returning empty partitions is
# okay as well.
# 2.) The index object if the index is anything but the default type (`RangeIndex`),
# otherwise return the length of the object in the remote task and the logic
# will build the `RangeIndex` correctly. May of these methods have a `index_col`
# parameter that will tell you whether or not to use the default index.
read_parquet_remote_task = None
# For reading parquet files in parallel, this task should read based on the `cols`
# value in the task signature. Each task will read a subset of the columns.
#
# Signature: (path, cols, num_splits, kwargs)
read_csv_remote_task = None
# For reading CSV files and other text files in parallel, this task should read
# based on the offsets in the signature (`start` and `stop` are byte offsets).
# `prefix_id` is the `b""` prefix for reading with a `BytesIO` object and it will
# also contain encoding information in the string.
#
# Signature: (filepath, num_splits, start, stop, kwargs, prefix_id)
read_json_remote_task = None
# For reading JSON files and other text files in parallel, this task should read
# based on the offsets in the signature (`start` and `stop` are byte offsets).
#
# Signature: (filepath, num_splits, start, stop, kwargs)
read_hdf_remote_task = None
# For reading HDF5 files in parallel, this task should read based on the `columns`
# parameter in the task signature. Each task will read a subset of the columns.
#
# Signature: (path_or_buf, columns, num_splits, kwargs)
read_feather_remote_task = None
# For reading Feather file format in parallel, this task should read based on the
# `columns` parameter in the task signature. Each task will read a subset of the
# columns.
#
# Signature: (path, columns, num_splits)
read_sql_remote_task = None
# For reading SQL tables in parallel, this task should read a number of rows based
# on the `sql` string passed to the task. Each task will be given a different LIMIT
# and OFFSET as a part of the `sql` query string, so the tasks should perform only
# the logic required to read the SQL query and determine the Index (information
# above).
#
# Signature: (num_splits, sql, con, index_col, kwargs)
@classmethod
def read_parquet(cls, path, engine, columns, **kwargs):
"""Load a parquet object from the file path, returning a DataFrame.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the parquet file.
We only support local files for now.
engine: Ray only support pyarrow reader.
This argument doesn't do anything for now.
kwargs: Pass into parquet's read_pandas function.
Notes:
ParquetFile API is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/parquet.html
"""
from pyarrow.parquet import ParquetFile, ParquetDataset
if cls.read_parquet_remote_task is None:
return super(RayIO, cls).read_parquet(path, engine, columns, **kwargs)
file_path = path
if os.path.isdir(path):
directory = True
partitioned_columns = set()
# We do a tree walk of the path directory because partitioned
# parquet directories have a unique column at each directory level.
# Thus, we can use os.walk(), which does a dfs search, to walk
# through the different columns that the data is partitioned on
for (root, dir_names, files) in os.walk(path):
if dir_names:
partitioned_columns.add(dir_names[0].split("=")[0])
if files:
# Metadata files, git files, .DSStore
if files[0][0] == ".":
continue
file_path = os.path.join(root, files[0])
break
partitioned_columns = list(partitioned_columns)
else:
directory = False
if not columns:
if directory:
# Path of the sample file that we will read to get the remaining
# columns.
from pyarrow import ArrowIOError
try:
pd = ParquetDataset(file_path)
except ArrowIOError:
pd = ParquetDataset(path)
column_names = pd.schema.names
else:
pf = ParquetFile(path)
column_names = pf.metadata.schema.names
columns = [name for name in column_names if not PQ_INDEX_REGEX.match(name)]
# Cannot read in parquet file by only reading in the partitioned column.
# Thus, we have to remove the partition columns from the columns to
# ensure that when we do the math for the blocks, the partition column
# will be read in along with a non partition column.
if columns and directory and any(col in partitioned_columns for col in columns):
columns = [col for col in columns if col not in partitioned_columns]
# If all of the columns wanted are partition columns, return an
# empty dataframe with the desired columns.
if len(columns) == 0:
return cls.from_pandas(pandas.DataFrame(columns=partitioned_columns))
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
column_widths = [len(c) for c in col_partitions]
# Each item in this list will be a list of columns of original df
# partitioned to smaller pieces along rows.
# We need to transpose the oids array to fit our schema.
# TODO (williamma12): This part can be parallelized even more if we
# separate the partitioned parquet file code path from the default one.
# The workers return multiple objects for each part of the file read:
# - The first n - 2 objects are partitions of data
# - The n - 1 object is the length of the partition.
# - The nth object is the dtypes of the partition. We combine these to
# form the final dtypes below.
blk_partitions = np.array(
[
cls.read_parquet_remote_task._remote(
args=(path, cols + partitioned_columns, num_splits, kwargs),
num_return_vals=num_splits + 2,
)
if directory and cols == col_partitions[len(col_partitions) - 1]
else cls.read_parquet_remote_task._remote(
args=(path, cols, num_splits, kwargs),
num_return_vals=num_splits + 2,
)
for cols in col_partitions
]
).T
# Metadata
index_len = ray.get(blk_partitions[-2][0])
index = pandas.RangeIndex(index_len)
index_chunksize = compute_chunksize(
pandas.DataFrame(index=index), num_splits, axis=0
)
if index_chunksize > index_len:
row_lengths = [index_len] + [0 for _ in range(num_splits - 1)]
else:
row_lengths = [
index_chunksize
if i != num_splits - 1
else index_len - (index_chunksize * (num_splits - 1))
for i in range(num_splits)
]
remote_partitions = np.array(
[
[
cls.frame_partition_cls(
blk_partitions[i][j],
length=row_lengths[i],
width=column_widths[j],
)
for j in range(len(blk_partitions[i]))
]
for i in range(len(blk_partitions[:-2]))
]
)
# Compute dtypes concatenating the results from each of the columns splits
# determined above. This creates a pandas Series that contains a dtype for every
# column.
dtypes_ids = list(blk_partitions[-1])
dtypes = pandas.concat(ray.get(dtypes_ids), axis=0)
if directory:
columns += partitioned_columns
dtypes.index = columns
new_query_compiler = cls.query_compiler_cls(
cls.frame_cls(
remote_partitions,
index,
columns,
row_lengths,
column_widths,
dtypes=dtypes,
)
)
return new_query_compiler
# CSV
@classmethod
def _skip_header(cls, f, kwargs={}):
lines_read = 0
comment = kwargs.get("comment", None)
skiprows = kwargs.get("skiprows", None)
encoding = kwargs.get("encoding", None)
header = kwargs.get("header", "infer")
names = kwargs.get("names", None)
if header is None:
return lines_read
elif header == "infer":
if names is not None:
return lines_read
else:
header = 0
# Skip lines before the header
if isinstance(skiprows, int):
lines_read += skiprows
for _ in range(skiprows):
f.readline()
skiprows = None
header_lines = header + 1 if isinstance(header, int) else max(header) + 1
header_lines_skipped = 0
# Python 2 files use a read-ahead buffer which breaks our use of tell()
for line in iter(f.readline, ""):
lines_read += 1
skip = False
if not skip and comment is not None:
if encoding is not None:
skip |= line.decode(encoding)[0] == comment
else:
skip |= line.decode()[0] == comment
if not skip and callable(skiprows):
skip |= skiprows(lines_read)
elif not skip and hasattr(skiprows, "__contains__"):
skip |= lines_read in skiprows
if not skip:
header_lines_skipped += 1
if header_lines_skipped == header_lines:
return lines_read
return lines_read
@classmethod
def _read_csv_from_file_ray(cls, filepath, kwargs={}):
"""Constructs a DataFrame from a CSV file.
Args:
filepath (str): path to the CSV file.
npartitions (int): number of partitions for the DataFrame.
kwargs (dict): args excluding filepath provided to read_csv.
Returns:
DataFrame or Series constructed from CSV file.
"""
names = kwargs.get("names", None)
index_col = kwargs.get("index_col", None)
if names is None:
# For the sake of the empty df, we assume no `index_col` to get the correct
# column names before we build the index. Because we pass `names` in, this
# step has to happen without removing the `index_col` otherwise it will not
# be assigned correctly
kwargs["index_col"] = None
names = pandas.read_csv(
filepath, **dict(kwargs, nrows=0, skipfooter=0)
).columns
kwargs["index_col"] = index_col
empty_pd_df = pandas.read_csv(filepath, **dict(kwargs, nrows=0, skipfooter=0))
column_names = empty_pd_df.columns
skipfooter = kwargs.get("skipfooter", None)
skiprows = kwargs.pop("skiprows", None)
usecols = kwargs.get("usecols", None)
usecols_md = _validate_usecols_arg(kwargs.get("usecols", None))
if usecols is not None and usecols_md[1] != "integer":
del kwargs["usecols"]
all_cols = pandas.read_csv(
file_open(filepath, "rb"), **dict(kwargs, nrows=0, skipfooter=0)
).columns
usecols = all_cols.get_indexer_for(list(usecols_md[0]))
parse_dates = kwargs.pop("parse_dates", False)
partition_kwargs = dict(
kwargs,
header=None,
names=names,
skipfooter=0,
skiprows=None,
parse_dates=parse_dates,
usecols=usecols,
)
with file_open(filepath, "rb", kwargs.get("compression", "infer")) as f:
# Get the BOM if necessary
prefix = b""
if kwargs.get("encoding", None) is not None:
prefix = f.readline()
partition_kwargs["skiprows"] = 1
f.seek(0, os.SEEK_SET) # Return to beginning of file
prefix_id = ray.put(prefix)
partition_kwargs_id = ray.put(partition_kwargs)
# Skip the header since we already have the header information and skip the
# rows we are told to skip.
kwargs["skiprows"] = skiprows
cls._skip_header(f, kwargs)
# Launch tasks to read partitions
partition_ids = []
index_ids = []
dtypes_ids = []
total_bytes = file_size(f)
# Max number of partitions available
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
# This is the number of splits for the columns
num_splits = min(len(column_names), num_partitions)
# This is the chunksize each partition will read
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
# Metadata
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(column_names):
column_widths = [len(column_names)]
# This prevents us from unnecessarily serializing a bunch of empty
# objects.
num_splits = 1
else:
column_widths = [
column_chunksize
if len(column_names) > (column_chunksize * (i + 1))
else 0
if len(column_names) < (column_chunksize * i)
else len(column_names) - (column_chunksize * i)
for i in range(num_splits)
]
while f.tell() < total_bytes:
start = f.tell()
f.seek(chunk_size, os.SEEK_CUR)
f.readline() # Read a whole number of lines
# The workers return multiple objects for each part of the file read:
# - The first n - 2 objects are partitions of data
# - The n - 1 object is the length of the partition or the index if
# `index_col` is specified. We compute the index below.
# - The nth object is the dtypes of the partition. We combine these to
# form the final dtypes below.
partition_id = cls.read_csv_remote_task._remote(
args=(
filepath,
num_splits,
start,
f.tell(),
partition_kwargs_id,
prefix_id,
),
num_return_vals=num_splits + 2,
)
partition_ids.append(partition_id[:-2])
index_ids.append(partition_id[-2])
dtypes_ids.append(partition_id[-1])
# Compute the index based on a sum of the lengths of each partition (by default)
# or based on the column(s) that were requested.
if index_col is None:
row_lengths = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
else:
index_objs = ray.get(index_ids)
row_lengths = [len(o) for o in index_objs]
new_index = index_objs[0].append(index_objs[1:])
new_index.name = empty_pd_df.index.name
# Compute dtypes by getting collecting and combining all of the partitions. The
# reported dtypes from differing rows can be different based on the inference in
# the limited data seen by each worker. We use pandas to compute the exact dtype
# over the whole column for each column. The index is set below.
dtypes = (
pandas.concat(ray.get(dtypes_ids), axis=1)
.apply(lambda row: find_common_type(row.values), axis=1)
.squeeze(axis=0)
)
partition_ids = [
[
cls.frame_partition_cls(
partition_ids[i][j], length=row_lengths[i], width=column_widths[j]
)
for j in range(len(partition_ids[i]))
]
for i in range(len(partition_ids))
]
# If parse_dates is present, the column names that we have might not be
# the same length as the returned column names. If we do need to modify
# the column names, we remove the old names from the column names and
# insert the new one at the front of the Index.
if parse_dates is not None:
# Check if is list of lists
if isinstance(parse_dates, list) and isinstance(parse_dates[0], list):
for group in parse_dates:
new_col_name = "_".join(group)
column_names = column_names.drop(group).insert(0, new_col_name)
# Check if it is a dictionary
elif isinstance(parse_dates, dict):
for new_col_name, group in parse_dates.items():
column_names = column_names.drop(group).insert(0, new_col_name)
# Set the index for the dtypes to the column names
if isinstance(dtypes, pandas.Series):
dtypes.index = column_names
else:
dtypes = pandas.Series(dtypes, index=column_names)
new_frame = cls.frame_cls(
partition_ids,
new_index,
column_names,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_query_compiler = cls.query_compiler_cls(new_frame)
if skipfooter:
new_query_compiler = new_query_compiler.drop(
new_query_compiler.index[-skipfooter:]
)
if kwargs.get("squeeze", False) and len(new_query_compiler.columns) == 1:
return new_query_compiler[new_query_compiler.columns[0]]
if index_col is None:
new_query_compiler._modin_frame._apply_index_objs(axis=0)
return new_query_compiler
@classmethod
def _read_csv_from_pandas(cls, filepath_or_buffer, kwargs):
# TODO: Should we try to be smart about how we load files here, or naively default to pandas?
pd_obj = pandas.read_csv(filepath_or_buffer, **kwargs)
if isinstance(pd_obj, pandas.DataFrame):
return cls.from_pandas(pd_obj)
elif isinstance(pd_obj, pandas.io.parsers.TextFileReader):
# Overwriting the read method should return a ray DataFrame for calls
# to __next__ and get_chunk
pd_read = pd_obj.read
pd_obj.read = lambda *args, **kwargs: cls.from_pandas(
pd_read(*args, **kwargs)
)
return pd_obj
@classmethod
def read_csv(
cls,
filepath_or_buffer,
sep=",",
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal=b".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
kwargs = {
"filepath_or_buffer": filepath_or_buffer,
"sep": sep,
"delimiter": delimiter,
"header": header,
"names": names,
"index_col": index_col,
"usecols": usecols,
"squeeze": squeeze,
"prefix": prefix,
"mangle_dupe_cols": mangle_dupe_cols,
"dtype": dtype,
"engine": engine,
"converters": converters,
"true_values": true_values,
"false_values": false_values,
"skipinitialspace": skipinitialspace,
"skiprows": skiprows,
"nrows": nrows,
"na_values": na_values,
"keep_default_na": keep_default_na,
"na_filter": na_filter,
"verbose": verbose,
"skip_blank_lines": skip_blank_lines,
"parse_dates": parse_dates,
"infer_datetime_format": infer_datetime_format,
"keep_date_col": keep_date_col,
"date_parser": date_parser,
"dayfirst": dayfirst,
"cache_dates": cache_dates,
"iterator": iterator,
"chunksize": chunksize,
"compression": compression,
"thousands": thousands,
"decimal": decimal,
"lineterminator": lineterminator,
"quotechar": quotechar,
"quoting": quoting,
"escapechar": escapechar,
"comment": comment,
"encoding": encoding,
"dialect": dialect,
"error_bad_lines": error_bad_lines,
"warn_bad_lines": warn_bad_lines,
"skipfooter": skipfooter,
"doublequote": doublequote,
"delim_whitespace": delim_whitespace,
"low_memory": low_memory,
"memory_map": memory_map,
"float_precision": float_precision,
}
if cls.read_csv_remote_task is None:
return super(RayIO, cls).read_csv(**kwargs)
return cls._read(**kwargs)
@classmethod
def _read(cls, filepath_or_buffer, **kwargs):
"""Read csv file from local disk.
Args:
filepath_or_buffer:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas.read_csv
"""
# The intention of the inspection code is to reduce the amount of
# communication we have to do between processes and nodes. We take a quick
# pass over the arguments and remove those that are default values so we
# don't have to serialize and send them to the workers. Because the
# arguments list is so long, this does end up saving time based on the
# number of nodes in the cluster.
try:
args, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.read_csv)
defaults = dict(zip(args[2:], defaults))
filtered_kwargs = {
kw: kwargs[kw]
for kw in kwargs
if kw in defaults
and not isinstance(kwargs[kw], type(defaults[kw]))
or kwargs[kw] != defaults[kw]
}
# This happens on Python2, we will just default to serializing the entire dictionary
except AttributeError:
filtered_kwargs = kwargs
if isinstance(filepath_or_buffer, str):
if not file_exists(filepath_or_buffer):
ErrorMessage.default_to_pandas("File path could not be resolved")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
elif not isinstance(filepath_or_buffer, py.path.local):
read_from_pandas = True
# Pandas read_csv supports pathlib.Path
try:
import pathlib
if isinstance(filepath_or_buffer, pathlib.Path):
read_from_pandas = False
except ImportError: # pragma: no cover
pass
if read_from_pandas:
ErrorMessage.default_to_pandas("Reading from buffer.")
return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)
if (
_infer_compression(filepath_or_buffer, kwargs.get("compression"))
is not None
):
compression_type = _infer_compression(
filepath_or_buffer, kwargs.get("compression")
)
if (
compression_type == "gzip"
or compression_type == "bz2"
or compression_type == "xz"
):
filtered_kwargs["compression"] = compression_type
elif (
compression_type == "zip"
and sys.version_info[0] == 3
and sys.version_info[1] >= 7
):
# need python3.7 to .seek and .tell ZipExtFile
filtered_kwargs["compression"] = compression_type
else:
ErrorMessage.default_to_pandas("Unsupported compression detected.")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
ErrorMessage.default_to_pandas("Reading chunks from a file.")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
ErrorMessage.default_to_pandas("skiprows parameter not optimized yet.")
return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)
# TODO: replace this by reading lines from file.
if kwargs.get("nrows") is not None:
ErrorMessage.default_to_pandas("`read_csv` with `nrows`")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
else:
return cls._read_csv_from_file_ray(filepath_or_buffer, filtered_kwargs)
@classmethod
def read_json(
cls,
path_or_buf=None,
orient=None,
typ="frame",
dtype=True,
convert_axes=True,
convert_dates=True,
keep_default_dates=True,
numpy=False,
precise_float=False,
date_unit=None,
encoding=None,
lines=False,
chunksize=None,
compression="infer",
):
kwargs = {
"path_or_buf": path_or_buf,
"orient": orient,
"typ": typ,
"dtype": dtype,
"convert_axes": convert_axes,
"convert_dates": convert_dates,
"keep_default_dates": keep_default_dates,
"numpy": numpy,
"precise_float": precise_float,
"date_unit": date_unit,
"encoding": encoding,
"lines": lines,
"chunksize": chunksize,
"compression": compression,
}
if cls.read_json_remote_task is None:
return super(RayIO, cls).read_json(**kwargs)
if not lines:
ErrorMessage.default_to_pandas(
"`read_json` only optimized with `lines=True`"
)
return super(RayIO, cls).read_json(**kwargs)
else:
# TODO: Pick up the columns in an optimized way from all data
# All rows must be read because some rows may have missing data
# Currently assumes all rows have the same columns
from io import BytesIO
columns = pandas.read_json(
BytesIO(b"" + open(path_or_buf, "rb").readline()), lines=True
).columns
kwargs["columns"] = columns
empty_pd_df = pandas.DataFrame(columns=columns)
path_or_buf = kwargs.pop("path_or_buf")
with file_open(path_or_buf, "rb", kwargs.get("compression", "infer")) as f:
total_bytes = file_size(f)
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
num_splits = min(len(columns), num_partitions)
chunk_size = max(1, (total_bytes - f.tell()) // num_partitions)
partition_ids = []
index_ids = []
dtypes_ids = []
column_chunksize = compute_chunksize(empty_pd_df, num_splits, axis=1)
if column_chunksize > len(columns):
column_widths = [len(columns)]
num_splits = 1
else:
column_widths = [
column_chunksize
if i != num_splits - 1
else len(columns) - (column_chunksize * (num_splits - 1))
for i in range(num_splits)
]
while f.tell() < total_bytes:
start = f.tell()
f.seek(chunk_size, os.SEEK_CUR)
f.readline()
partition_id = cls.read_json_remote_task._remote(
args=(path_or_buf, num_splits, start, f.tell(), kwargs),
num_return_vals=num_splits + 3,
)
partition_ids.append(partition_id[:-3])
index_ids.append(partition_id[-3])
dtypes_ids.append(partition_id[-2])
row_lengths = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(row_lengths))
dtypes = (
pandas.concat(ray.get(dtypes_ids), axis=1)
.apply(lambda row: find_common_type(row.values), axis=1)
.squeeze(axis=0)
)
partition_ids = [
[
cls.frame_partition_cls(
partition_ids[i][j],
length=row_lengths[i],
width=column_widths[j],
)
for j in range(len(partition_ids[i]))
]
for i in range(len(partition_ids))
]
if isinstance(dtypes, pandas.Series):
dtypes.index = columns
else:
dtypes = pandas.Series(dtypes, index=columns)
new_frame = cls.frame_cls(
np.array(partition_ids),
new_index,
columns,
row_lengths,
column_widths,
dtypes=dtypes,
)
new_frame._apply_index_objs(axis=0)
return cls.query_compiler_cls(new_frame)
@classmethod
def _validate_hdf_format(cls, path_or_buf):
s = pandas.HDFStore(path_or_buf)
groups = s.groups()
if len(groups) == 0:
raise ValueError("No dataset in HDF5 file.")
candidate_only_group = groups[0]
format = getattr(candidate_only_group._v_attrs, "table_type", None)
s.close()
return format
@classmethod
def read_hdf(cls, path_or_buf, **kwargs):
"""Load a h5 file from the file path or buffer, returning a DataFrame.
Args:
path_or_buf: string, buffer or path object
Path to the file to open, or an open :class:`pandas.HDFStore` object.
kwargs: Pass into pandas.read_hdf function.
Returns:
DataFrame constructed from the h5 file.
"""
if cls.read_hdf_remote_task is None:
return super(RayIO, cls).read_hdf(path_or_buf, **kwargs)
format = cls._validate_hdf_format(path_or_buf=path_or_buf)
if format is None:
ErrorMessage.default_to_pandas(
"File format seems to be `fixed`. For better distribution consider saving the file in `table` format. "
"df.to_hdf(format=`table`)."
)
return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs))
columns = kwargs.get("columns", None)
if not columns:
start = kwargs.pop("start", None)
stop = kwargs.pop("stop", None)
empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0, **kwargs)
kwargs["start"] = start
kwargs["stop"] = stop
columns = empty_pd_df.columns
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
blk_partitions = np.array(
[
cls.read_hdf_remote_task._remote(
args=(path_or_buf, cols, num_splits, kwargs),
num_return_vals=num_splits + 1,
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[cls.frame_partition_cls(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_query_compiler = cls.query_compiler_cls(
cls.frame_cls(remote_partitions, index, columns)
)
return new_query_compiler
@classmethod
def read_feather(cls, path, columns=None, use_threads=True):
"""Read a pandas.DataFrame from Feather format.
Ray DataFrame only supports pyarrow engine for now.
Args:
path: The filepath of the feather file.
We only support local files for now.
multi threading is set to True by default
columns: not supported by pandas api, but can be passed here to read only
specific columns
use_threads: Whether or not to use threads when reading
Notes:
pyarrow feather is used. Please refer to the documentation here
https://arrow.apache.org/docs/python/api.html#feather-format
"""
if cls.read_feather_remote_task is None:
return super(RayIO, cls).read_feather(
path, columns=columns, use_threads=use_threads
)
if columns is None:
from pyarrow.feather import FeatherReader
fr = FeatherReader(path)
columns = [fr.get_column_name(i) for i in range(fr.num_columns)]
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
num_splits = min(len(columns), num_partitions)
# Each item in this list will be a list of column names of the original df
column_splits = (
len(columns) // num_partitions
if len(columns) % num_partitions == 0
else len(columns) // num_partitions + 1
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, len(columns), column_splits)
]
blk_partitions = np.array(
[
cls.read_feather_remote_task._remote(
args=(path, cols, num_splits), num_return_vals=num_splits + 1
)
for cols in col_partitions
]
).T
remote_partitions = np.array(
[
[cls.frame_partition_cls(obj) for obj in row]
for row in blk_partitions[:-1]
]
)
index_len = ray.get(blk_partitions[-1][0])
index = pandas.RangeIndex(index_len)
new_query_compiler = cls.query_compiler_cls(
cls.frame_cls(remote_partitions, index, columns)
)
return new_query_compiler
@classmethod
def to_sql(cls, qc, **kwargs):
"""Write records stored in a DataFrame to a SQL database.
Args:
qc: the query compiler of the DF that we want to run to_sql on
kwargs: parameters for pandas.to_sql(**kwargs)
"""
# we first insert an empty DF in order to create the full table in the database
# This also helps to validate the input against pandas
# we would like to_sql() to complete only when all rows have been inserted into the database
# since the mapping operation is non-blocking, each partition will return an empty DF
# so at the end, the blocking operation will be this empty DF to_pandas
empty_df = qc.head(1).to_pandas().head(0)
empty_df.to_sql(**kwargs)
# so each partition will append its respective DF
kwargs["if_exists"] = "append"
columns = qc.columns
def func(df):
df.columns = columns
df.to_sql(**kwargs)
return pandas.DataFrame()
result = qc._modin_frame._fold_reduce(1, func)
# blocking operation
result.to_pandas()
@classmethod
def read_sql(cls, sql, con, index_col=None, **kwargs):
"""Reads a SQL query or database table into a DataFrame.
Args:
sql: string or SQLAlchemy Selectable (select or text object) SQL query to be
executed or a table name.
con: SQLAlchemy connectable (engine/connection) or database string URI or
DBAPI2 connection (fallback mode)
index_col: Column(s) to set as index(MultiIndex).
kwargs: Pass into pandas.read_sql function.
"""
if cls.read_sql_remote_task is None:
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
import sqlalchemy as sa
# In the case that we are given a SQLAlchemy Connection or Engine, the objects
# are not pickleable. We have to convert it to the URL string and connect from
# each of the workers.
if isinstance(con, (sa.engine.Engine, sa.engine.Connection)):
warnings.warn(
"To use parallel implementation of `read_sql`, pass the "
"connection string instead of {}.".format(type(con))
)
return super(RayIO, cls).read_sql(sql, con, index_col=index_col, **kwargs)
row_cnt_query = "SELECT COUNT(*) FROM ({}) as foo".format(sql)
row_cnt = pandas.read_sql(row_cnt_query, con).squeeze()
cols_names_df = pandas.read_sql(
"SELECT * FROM ({}) as foo LIMIT 0".format(sql), con, index_col=index_col
)
cols_names = cols_names_df.columns
from modin.pandas import DEFAULT_NPARTITIONS
num_partitions = DEFAULT_NPARTITIONS
partition_ids = []
index_ids = []
limit = math.ceil(row_cnt / num_partitions)
for part in range(num_partitions):
offset = part * limit
query = "SELECT * FROM ({}) as foo LIMIT {} OFFSET {}".format(
sql, limit, offset
)
partition_id = cls.read_sql_remote_task._remote(
args=(num_partitions, query, con, index_col, kwargs),
num_return_vals=num_partitions + 1,
)
partition_ids.append(
[cls.frame_partition_cls(obj) for obj in partition_id[:-1]]
)
index_ids.append(partition_id[-1])
if index_col is None: # sum all lens returned from partitions
index_lens = ray.get(index_ids)
new_index = pandas.RangeIndex(sum(index_lens))
else: # concat index returned from partitions
index_lst = [x for part_index in ray.get(index_ids) for x in part_index]
new_index = pandas.Index(index_lst).set_names(index_col)
new_frame = cls.frame_cls(np.array(partition_ids), new_index, cols_names)
new_frame._apply_index_objs(axis=0)
return cls.query_compiler_cls(new_frame)
| 39.75423
| 119
| 0.575419
|
7950a8741b8bef3d39272a917e918066f9f79f05
| 1,694
|
py
|
Python
|
chrome/content/papermachines/processors/geoparser_heatmap.py
|
papermachines/papermachines
|
6afcde40621bbe6a0554647a27101af83e5f61cf
|
[
"BSD-2-Clause"
] | 134
|
2015-01-04T11:29:04.000Z
|
2022-03-30T22:39:51.000Z
|
chrome/content/papermachines/processors/geoparser_heatmap.py
|
wisliyao/papermachines
|
6afcde40621bbe6a0554647a27101af83e5f61cf
|
[
"BSD-2-Clause"
] | 20
|
2015-02-06T18:42:31.000Z
|
2021-11-07T05:19:03.000Z
|
chrome/content/papermachines/processors/geoparser_heatmap.py
|
wisliyao/papermachines
|
6afcde40621bbe6a0554647a27101af83e5f61cf
|
[
"BSD-2-Clause"
] | 16
|
2015-02-14T18:46:58.000Z
|
2020-07-24T02:38:11.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import json
import csv
import re
import shutil
import logging
import traceback
import base64
import time
import codecs
import cPickle as pickle
import geoparser
class GeoparserHeatmap(geoparser.Geoparser):
"""
Export geoparsing results
"""
def _basic_params(self):
self.name = 'geoparser_heatmap'
self.dry_run = False
self.require_stopwords = False
def process(self):
"""
Heatmap using Google Maps and heatmaps.js
"""
data = []
counts = {}
max_count = 0
csv_input = os.path.join(self.out_dir, 'geoparser_export'
+ self.collection + '.csv')
if not os.path.exists(csv_input):
import geoparser_export
subprocessor = geoparser_export.GeoparserExport()
subprocessor.process()
for rowdict in self.parse_csv(csv_input):
coords = ','.join([rowdict['lat'], rowdict['lng']])
if coords not in counts:
counts[coords] = 0
counts[coords] += 1
if counts[coords] > max_count:
max_count = counts[coords]
for (coords, count) in counts.iteritems():
(lat, lng) = coords.split(',')
data.append({'lat': lat, 'lng': lng, 'count': count})
intensity = {'max': max_count, 'data': data}
params = {'INTENSITY': intensity}
self.write_html(params)
if __name__ == '__main__':
try:
processor = GeoparserHeatmap(track_progress=True)
processor.process()
except:
logging.error(traceback.format_exc())
| 24.911765
| 65
| 0.584416
|
7950a8ac76721ef327db2ad2c89e1b7064f6e458
| 192
|
py
|
Python
|
thirvu_soft/thirvu_soft/report/script_salary_slip/script_salary_slip.py
|
carefulkarthik/Query-report
|
0677a063fc8a89bba5836ee72e9b95b24ca4c8ae
|
[
"MIT"
] | null | null | null |
thirvu_soft/thirvu_soft/report/script_salary_slip/script_salary_slip.py
|
carefulkarthik/Query-report
|
0677a063fc8a89bba5836ee72e9b95b24ca4c8ae
|
[
"MIT"
] | null | null | null |
thirvu_soft/thirvu_soft/report/script_salary_slip/script_salary_slip.py
|
carefulkarthik/Query-report
|
0677a063fc8a89bba5836ee72e9b95b24ca4c8ae
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013, Navin S R and contributors
# For license information, please see license.txt
# import frappe
def execute(filters=None):
columns, data = [], []
return columns, data
| 17.454545
| 49
| 0.713542
|
7950a906fdf79d3732fc7102dec4659068aadf12
| 7,820
|
py
|
Python
|
vitrage-4.3.1/vitrage/tests/unit/datasources/zabbix/test_zabbix_transformer.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
vitrage-4.3.1/vitrage/tests/unit/datasources/zabbix/test_zabbix_transformer.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
vitrage-4.3.1/vitrage/tests/unit/datasources/zabbix/test_zabbix_transformer.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from testtools import matchers
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceOpts as DSOpts
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import EdgeLabel
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import GraphAction
from vitrage.common.constants import UpdateMethod
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.alarm_properties import AlarmProperties as AlarmProps
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.datasources.nova.host.transformer import HostTransformer
from vitrage.datasources import transformer_base as tbase
from vitrage.datasources.transformer_base import TransformerBase
from vitrage.datasources.zabbix.properties import ZabbixProperties\
as ZabbixProps
from vitrage.datasources.zabbix.properties import ZabbixTriggerSeverity
from vitrage.datasources.zabbix.properties import ZabbixTriggerValue
from vitrage.datasources.zabbix.transformer import ZabbixTransformer
from vitrage.datasources.zabbix import ZABBIX_DATASOURCE
from vitrage.tests import base
from vitrage.tests.mocks import mock_driver as mock_sync
from vitrage.utils.datetime import format_unix_timestamp
LOG = logging.getLogger(__name__)
# noinspection PyProtectedMember
class ZabbixTransformerTest(base.BaseTest):
OPTS = [
cfg.StrOpt(DSOpts.UPDATE_METHOD,
default=UpdateMethod.PULL),
]
# noinspection PyAttributeOutsideInit,PyPep8Naming
@classmethod
def setUpClass(cls):
super(ZabbixTransformerTest, cls).setUpClass()
cls.transformers = {}
cls.conf = cfg.ConfigOpts()
cls.conf.register_opts(cls.OPTS, group=ZABBIX_DATASOURCE)
cls.transformers[NOVA_HOST_DATASOURCE] = \
HostTransformer(cls.transformers, cls.conf)
def test_create_entity_key(self):
LOG.debug('Test get key from nova instance transformer')
# Test setup
spec_list = mock_sync.simple_zabbix_alarm_generators(host_num=1,
events_num=1)
zabbix_alarms = mock_sync.generate_sequential_events_list(spec_list)
transformer = ZabbixTransformer(self.transformers, self.conf)
event = zabbix_alarms[0]
self.enrich_event(event)
# Test action
observed_key = transformer._create_entity_key(event)
# Test assertions
observed_key_fields = observed_key.split(
TransformerBase.KEY_SEPARATOR)
self.assertEqual(EntityCategory.ALARM, observed_key_fields[0])
self.assertEqual(event[DSProps.ENTITY_TYPE], observed_key_fields[1])
self.assertEqual(event[ZabbixProps.RESOURCE_NAME],
observed_key_fields[2])
self.assertEqual(event[ZabbixProps.TRIGGER_ID],
observed_key_fields[3])
def test_zabbix_alarm_transform(self):
LOG.debug('Zabbix alarm transformer test: transform entity event')
# Test setup
spec_list = mock_sync.simple_zabbix_alarm_generators(host_num=4,
events_num=10)
zabbix_alarms = mock_sync.generate_sequential_events_list(spec_list)
for alarm in zabbix_alarms:
# Test action
self.enrich_event(alarm, format_timestamp=False)
wrapper = ZabbixTransformer(self.transformers, self.conf)\
.transform(alarm)
self._validate_vertex(wrapper.vertex, alarm)
neighbors = wrapper.neighbors
self.assertThat(neighbors, matchers.HasLength(1))
neighbor = neighbors[0]
# Right now we are support only host as a resource
if neighbor.vertex[VProps.VITRAGE_TYPE] == NOVA_HOST_DATASOURCE:
self._validate_host_neighbor(neighbors[0], alarm)
self._validate_action(alarm, wrapper)
def _validate_action(self, alarm, wrapper):
ds_action = alarm[DSProps.DATASOURCE_ACTION]
if ds_action in (DatasourceAction.SNAPSHOT, DatasourceAction.UPDATE):
if alarm[ZabbixProps.VALUE] == ZabbixTriggerValue.OK:
self.assertEqual(GraphAction.DELETE_ENTITY, wrapper.action)
else:
self.assertEqual(GraphAction.UPDATE_ENTITY, wrapper.action)
else:
self.assertEqual(GraphAction.CREATE_ENTITY, wrapper.action)
def _validate_vertex(self, vertex, event):
self.assertEqual(EntityCategory.ALARM, vertex[VProps.VITRAGE_CATEGORY])
self.assertEqual(event[DSProps.ENTITY_TYPE],
vertex[VProps.VITRAGE_TYPE])
self.assertEqual(event[ZabbixProps.DESCRIPTION],
vertex[VProps.NAME])
event_status = event[ZabbixProps.VALUE]
if event_status == ZabbixTriggerValue.OK:
self.assertEqual(AlarmProps.INACTIVE_STATE,
vertex[VProps.STATE])
else:
self.assertEqual(AlarmProps.ACTIVE_STATE,
vertex[VProps.STATE])
event_severity = ZabbixTriggerSeverity.str(
event[ZabbixProps.PRIORITY])
self.assertEqual(event_severity, vertex[VProps.SEVERITY])
self.assertFalse(vertex[VProps.VITRAGE_IS_DELETED])
self.assertFalse(vertex[VProps.VITRAGE_IS_PLACEHOLDER])
def _validate_host_neighbor(self, neighbor, event):
host_vertex = neighbor.vertex
observed_key = host_vertex.vertex_id
expected_key = tbase.build_key((EntityCategory.RESOURCE,
NOVA_HOST_DATASOURCE,
event[ZabbixProps.RESOURCE_NAME]))
expected_uuid = \
TransformerBase.uuid_from_deprecated_vitrage_id(expected_key)
self.assertEqual(expected_uuid, observed_key)
self.assertEqual(expected_uuid,
host_vertex.properties.get(VProps.VITRAGE_ID))
self.assertFalse(host_vertex[VProps.VITRAGE_IS_DELETED])
self.assertTrue(host_vertex[VProps.VITRAGE_IS_PLACEHOLDER])
self.assertEqual(EntityCategory.RESOURCE,
host_vertex[VProps.VITRAGE_CATEGORY])
self.assertEqual(event[ZabbixProps.RESOURCE_NAME],
host_vertex[VProps.ID])
self.assertEqual(NOVA_HOST_DATASOURCE,
host_vertex[VProps.VITRAGE_TYPE])
edge = neighbor.edge
self.assertEqual(EdgeLabel.ON, edge.label)
alarm_key = ZabbixTransformer(self.transformers, self.conf).\
_create_entity_key(event)
alarm_uuid = TransformerBase.uuid_from_deprecated_vitrage_id(alarm_key)
self.assertEqual(alarm_uuid, edge.source_id)
self.assertEqual(host_vertex.vertex_id, edge.target_id)
@staticmethod
def enrich_event(event, format_timestamp=True):
if format_timestamp:
event[ZabbixProps.TIMESTAMP] = format_unix_timestamp(
event[ZabbixProps.LAST_CHANGE], tbase.TIMESTAMP_FORMAT)
| 41.818182
| 79
| 0.696803
|
7950a94a92238ba8e71ccf2fb285a833d8661854
| 3,120
|
py
|
Python
|
util/chplenv/chpl_llvm.py
|
e-kayrakli/chapel
|
41cc7d7897f56d5de7684fb98effc17fd12af399
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
util/chplenv/chpl_llvm.py
|
e-kayrakli/chapel
|
41cc7d7897f56d5de7684fb98effc17fd12af399
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
util/chplenv/chpl_llvm.py
|
e-kayrakli/chapel
|
41cc7d7897f56d5de7684fb98effc17fd12af399
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-03-05T19:48:04.000Z
|
2020-03-05T19:48:04.000Z
|
#!/usr/bin/env python3
import optparse
import os
import sys
import chpl_bin_subdir, chpl_arch, chpl_compiler, chpl_platform, overrides
from chpl_home_utils import get_chpl_third_party
from utils import memoize, run_command
@memoize
def get_uniq_cfg_path_for(llvm_val):
if llvm_val == "llvm":
# put platform-arch-compiler for included llvm
host_bin_subdir = chpl_bin_subdir.get('host')
host_compiler = chpl_compiler.get('host')
llvm_target_dir = '{0}-{1}'.format(host_bin_subdir, host_compiler)
else:
# just put 'system' for system llvm
llvm_target_dir = llvm_val
return llvm_target_dir
@memoize
def get_uniq_cfg_path():
llvm_val = get()
return get_uniq_cfg_path_for(llvm_val)
def is_included_llvm_built():
chpl_third_party = get_chpl_third_party()
llvm_target_dir = get_uniq_cfg_path_for('llvm')
llvm_subdir = os.path.join(chpl_third_party, 'llvm', 'install',
llvm_target_dir)
llvm_header = os.path.join(llvm_subdir, 'include', 'llvm',
'PassSupport.h')
if os.path.exists(llvm_header):
return True
else:
return False
def compatible_platform_for_llvm_default():
target_arch = chpl_arch.get('target')
return (target_arch != "i368")
def has_compatible_installed_llvm():
preferred_vers_file = os.path.join(get_chpl_third_party(),
'llvm', 'LLVM_VERSION')
preferred_vers = ""
with open(preferred_vers_file, 'r') as file:
preferred_vers = file.read().strip()
find_llvm_config = os.path.join(get_chpl_third_party(),
'llvm', 'find-llvm-config.sh')
got = run_command([find_llvm_config, preferred_vers])
got = got.strip()
if got and got != "missing-llvm-config":
return True
else:
return False
@memoize
def get():
llvm_val = overrides.get('CHPL_LLVM')
if not llvm_val:
llvm_val = 'none'
if is_included_llvm_built():
llvm_val = 'llvm'
elif ("CHPL_LLVM_BY_DEFAULT" in os.environ and
os.environ["CHPL_LLVM_BY_DEFAULT"] != "0" and
# CHPL_LLVM_BY_DEFAULT is an enviro var to help us transition
compatible_platform_for_llvm_default()):
if has_compatible_installed_llvm():
llvm_val = 'system'
else:
llvm_val = 'llvm'
return llvm_val
def _main():
llvm_val = get()
parser = optparse.OptionParser(usage='usage: %prog [--needs-llvm-runtime]')
parser.add_option('--needs-llvm-runtime', dest='needsllvm',
action='store_const',
const='needsllvm', default='')
(options, args) = parser.parse_args()
#if --needs-llvm-runtime is set, print out llvm if runtime is needed,
# and print out nothing if it is not.
if options.needsllvm:
if llvm_val == 'system' or llvm_val == 'llvm':
sys.stdout.write("llvm\n");
else:
sys.stdout.write("{0}\n".format(llvm_val))
if __name__ == '__main__':
_main()
| 30.891089
| 79
| 0.633654
|
7950aa8187be140078faf4cde91f12725541459b
| 14,468
|
py
|
Python
|
house-rocket.py
|
rsoliveirac/projeto-insights-houses
|
ca76a6a76bf280ca1e0c3e3777f24cf6a9dae302
|
[
"MIT"
] | null | null | null |
house-rocket.py
|
rsoliveirac/projeto-insights-houses
|
ca76a6a76bf280ca1e0c3e3777f24cf6a9dae302
|
[
"MIT"
] | null | null | null |
house-rocket.py
|
rsoliveirac/projeto-insights-houses
|
ca76a6a76bf280ca1e0c3e3777f24cf6a9dae302
|
[
"MIT"
] | 7
|
2021-06-19T17:52:16.000Z
|
2022-02-11T01:00:09.000Z
|
import pandas as pd
import streamlit as st
import folium
import numpy as np
from streamlit_folium import folium_static
import plotly.express as px
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def get_data(path):
data = pd.read_csv(path)
return data
def remove_duplicates (df):
df = df.drop_duplicates(subset = ['id'], keep = 'last')
return df
def remove_value (df):
df = df.drop(15870)
return df
def data_overview(df):
if st.checkbox('Mostre o dataset'):
st.write(df.head(50))
st.sidebar.title('Projeto House Hocket')
st.sidebar.write('House Rocket é uma empresa que trabalha com a compra e venda de imóveis. '
'O Cientista de dados da empresa deverá ajudar a encontrar as melhores '
'oportunidades de negócio.')
st.sidebar.write("Para mais informações sobre o projeto, acesse: "
"[GitHub](https://github.com/rsoliveirac/house-rocket-project)")
return None
def metricas (df):
st.markdown("<h1 style='text-align: center; color: black;'> Análise Descritiva</h1>", unsafe_allow_html=True)
atri_num = df.select_dtypes(include = ['int64', 'float64'])
#deletando a coluna 'ID'
atri_num = atri_num.iloc[:, 1: ]
#medidas de tendencia central
df_mean = pd.DataFrame(atri_num.apply(np.mean)).T
df_median = pd.DataFrame(atri_num.apply(np.median)).T
#medidas de dispersão
df_std = pd.DataFrame(atri_num.apply(np.std)).T
df_min = pd.DataFrame(atri_num.apply(np.min)).T
df_max = pd.DataFrame(atri_num.apply(np.max)).T
#concatenando
est = pd.concat( [df_mean, df_median, df_std, df_min, df_max ] ).T.reset_index()
#alterando o nome das colunas
est.columns = [ 'atributos','media', 'mediana', 'std', 'min', 'max']
st.dataframe(est, width = 1000)
return df
def hipoteses (df):
st.markdown("<h1 style='text-align: center; color: black;'>Testando Hipóteses de Negócio</h1>", unsafe_allow_html=True)
c1,c2 = st.beta_columns(2)
c1.subheader('Hipótese 1: Imóveis com vista para a água são em média 30% mais caros')
h1 = df[['price', 'waterfront', 'sqft_lot']].groupby('waterfront').mean().reset_index()
h1['waterfront'] = h1['waterfront'].astype(str)
fig = px.bar(h1, x='waterfront', y = 'price', color = 'waterfront', labels={"waterfront": "Visão para água",
"price": "Preço"},
template= 'simple_white')
fig.update_layout(showlegend = False)
c1.plotly_chart(fig, use_container_width= True)
#=========================================
# ========== H2 ==========
#==========================================
c2.subheader('Hipótese 2: Imóveis com data de construção menor que 1955 são em média 50% mais baratos')
df['construcao'] = df['yr_built'].apply(lambda x: '> 1955' if x > 1955
else '< 1955')
h2 = df[['construcao', 'price', 'sqft_lot']].groupby('construcao').mean().reset_index()
fig2 = px.bar(h2, x='construcao', y = 'price', color = 'construcao', labels = {"contrucao":"Ano da Construção",
'price': 'Preço'},
template='simple_white')
fig2.update_layout(showlegend = False)
c2.plotly_chart(fig2, use_container_width= True)
#=========================================
# ========== H3 ==========
#==========================================
c3,c4 = st.beta_columns(2)
c3.subheader('Hipótese 3: Imóveis sem porão com maior área total são 40% mais caros do que imóveis com porão')
df['porao'] = df['sqft_basement'].apply(lambda x: 'nao' if x == 0
else 'sim')
h3 = df[['porao', 'sqft_lot', 'price']].groupby('porao').sum().reset_index()
fig3 = px.bar(h3, x='porao', y = 'price', color = 'porao', labels = {'price': 'Preço',
'sqft_lot': 'Área Total'},
template= 'simple_white')
fig3.update_layout(showlegend = False)
c3.plotly_chart(fig3, use_container_width= True)
#=========================================
# ========== H4 ==========
#==========================================
c4.subheader('Hipótese 4: Imóveis que nunca foram reformadas são em média 20% mais baratos')
df['renovacao'] = df['yr_renovated'].apply(lambda x: 'sim' if x > 0 else
'nao' )
h6 = df[['price', 'renovacao', 'sqft_lot']].groupby('renovacao').mean().reset_index()
fig4 = px.bar(h6, x='renovacao', y = 'price', color = 'renovacao', labels = {'renovacao':'Renovação',
'price': 'Preço'}, template = 'simple_white')
fig4.update_layout(showlegend = False)
c4.plotly_chart(fig4, use_container_width= True)
#=========================================
# ========== H5 ==========
#==========================================
c5, c6 = st.beta_columns(2)
c5.subheader('Hipótese 5: Imóveis em más condições mas com boa vista são 10% mais caros')
h71 = df[df['condition'] == 1]
h7 = h71[['price', 'view', 'sqft_lot']].groupby('view').sum().reset_index()
fig5 = px.bar(h7, x= 'view', y = 'price', color = 'view', labels = {'price':'Preço','view': 'Qualidade da Vista'},
template = 'simple_white')
fig5.update_layout(coloraxis_showscale=False)
c5.plotly_chart(fig5, use_container_width= True)
#=========================================
# ========== H4 ==========
#==========================================
c6.subheader('Hipótse 6: Imóveis antigos e não renovados são 40% mais baratos')
df['renovacao'] = df['yr_renovated'].apply(lambda x: 'sim' if x > 0 else
'nao')
df['contrucao'] = df['yr_built'].apply(lambda x: 'antigo' if (x < 1951) else
'atual')
h8 = df[df['renovacao'] == 1]
h8 = df[['contrucao', 'price', 'sqft_lot']].groupby('contrucao').sum().reset_index()
fig6 = px.bar(h8, x ='contrucao', y = 'price', color = 'contrucao', labels = {'price':'Preço','contrucao': 'Tempo de Imóveis não renovados'} ,
template= 'simple_white')
fig6.update_layout(showlegend = False)
c6.plotly_chart(fig6, use_container_width= True)
#=========================================
# ========== H7 ==========
#==========================================
c7, c8 = st.beta_columns(2)
c7.subheader('Hipótese 7: Imóveis com mais banheiros são em média 5% mais caros')
df['banheiro'] = df['bathrooms'].apply(lambda x: '0-3' if (x > 0 ) & (x < 3) else
'3-5' if (x > 3) & (x < 5) else
'5-8')
h9 = df[['banheiro', 'price', 'sqft_lot']].groupby('banheiro').mean().reset_index()
fig7 = px.bar(h9, x = 'banheiro', y = 'price', color = 'banheiro', labels = {'price':'Preço','banheiro':
'Quantidade de banheiros'},
template= 'simple_white')
fig7.update_layout(showlegend = False)
c7.plotly_chart(fig7, use_container_width= True)
#=========================================
# ========== H8 ==========
#==========================================
c8.subheader('Hipótese 8: Imóveis renovados recentemente são 35% mais caros')
df['contrucao'] = df['yr_built'].apply(lambda x: 'antigo' if (x < 1951) else
'atual')
h10 = df[['contrucao', 'price', 'sqft_lot']].groupby('contrucao').mean().reset_index()
fig8 = px.bar(h10, x = 'contrucao', y = 'price', color = 'contrucao', labels = {'price':'Preço', 'contrucao': 'Tempo de renovação'},
template = 'simple_white')
fig8.update_layout(showlegend = False)
c8.plotly_chart(fig8, use_container_width= True)
#=========================================
# ========== H9 ==========
#==========================================
st.subheader('Hipótese 9: O crescimento do preço dos imóveis mês após mês no ano de 2014 é de 10% ')
df['date'] = pd.to_datetime(df['date'])
df['mes'] = df['date'].dt.month
df['ano'] = df['date'].dt.year
year_df = df[df['ano'] == 2014]
h41 = year_df[['mes', 'price', 'sqft_lot']].groupby('mes').sum().reset_index()
fig41 = px.line(h41, x='mes', y = 'price', color_discrete_sequence= ['teal'], template = 'simple_white',
labels={'mes':'Mês', 'price': 'Preço'})
st.plotly_chart(fig41, use_container_width= True)
#=========================================
# ========== 10==========
#==========================================
st.subheader('Hipótese 10: Imóveis com 3 banheiros tem um crescimento mês após mês de 15 %')
h5 = df[(df['bathrooms'] == 3)]
h5 = h5[['mes', 'price', 'sqft_lot']].groupby('mes').sum().reset_index()
fig5 = px.line(h5, x = 'mes', y = 'price', color_discrete_sequence= ['teal'], template = 'simple_white',
labels= {'mes':'Mês', 'price': 'Preço'})
st.plotly_chart(fig5, x='mes', y='price', use_container_width= True)
def questoes_negocio (df):
st.markdown("<h1 style='text-align: center; color: black;'> Questões de Negócio</h1>", unsafe_allow_html=True)
st.subheader('1. Quais são os imóveis que a House Rocket deveria comprar e por qual preço?')
#Respondendo
a = df[['zipcode', 'price']].groupby('zipcode').median().reset_index()
df2 = pd.merge(a, df, on='zipcode', how = 'inner')
df2 = df2.rename(columns = {'price_y' : 'price', 'price_x' : 'price_median'} ) #alterando nome das colunas
#criando coluna
for i, row in df2.iterrows():
if (row['price_median'] >= row['price']) & (row['condition'] < 3):
df2.loc[i,'pay'] = 'sim'
else:
df2.loc[i, 'pay'] = 'nao'
#criar coluna com cor
for i, row in df2.iterrows():
if (row['pay'] == 'sim'):
df2.loc[i,'marker_color'] = 'green'
else:
df2.loc[i, 'marker_color'] = 'red'
############################################
st.markdown('Mapa - Quais imóveis devem ser comprados?')
st.markdown("""
<style>
.big-font {
font-size:14px !important;
}
</style>
""", unsafe_allow_html=True)
st.markdown('<p class="big-font"> Em verde os imóveis indicados '
'para compra <br> Em vermelho os imóveis não indicados para compra </p>', unsafe_allow_html=True)
mapa = folium.Map(width = 600, height = 300,
location = [df['lat'].mean(),df[ 'long'].mean()],
default_zoom_start=30)
features = {}
for row in pd.unique(df2['marker_color']):
features[row] = folium.FeatureGroup(name=row)
for index, row in df2.head(10000).iterrows():
circ = folium.Circle([row['lat'], row['long']],
radius=150, color=row['marker_color'], fill_color=row['marker_color'],
fill_opacity = 1, popup= 'Compra: {0}, Preço: {1}'.format(row['pay'],
row['price']))
circ.add_to(features[row['marker_color']])
for row in pd.unique(df2["marker_color"]):
features[row].add_to(mapa)
folium.LayerControl().add_to(mapa)
folium_static(mapa)
############
# QUESTÃO 2 #
############
st.subheader('2. Uma vez comprado, qual é o melhor momento para vendê-lo e por qual preço?')
df3 = df2.copy()
df3['season'] = df3['mes'].apply(lambda x: 'summer' if (x > 5) & (x < 8) else
'spring' if (x > 2) & (x < 5) else
'fall' if (x > 8) & (x < 12) else
'winter')
df3 = df3[df3['pay'] == 'sim']
df4 = df3[['season', 'zipcode', 'price']].groupby(['zipcode', 'season']).median().reset_index()
df4 = df4.rename(columns = {'price' : 'price_medi_season', 'season': 'season_median'} )
df5 = pd.merge(df3, df4, on='zipcode', how = 'inner')
for i, row in df5.iterrows():
if (row['price_medi_season'] > row['price']):
df5.loc[i, 'sale'] = row['price'] * 0.1
else:
df5.loc[i, 'sale'] = row['price'] * 0.3
df5= df5[['price_medi_season', 'price', 'sale', 'price_median', 'season', 'zipcode']]
fig11 = px.bar(df5, x = 'season', y = 'sale', color = 'season', labels={'season':'Estação do Ano', 'sale': 'Preço de Venda'},
template = 'simple_white')
fig11.update_layout(showlegend = False)
st.plotly_chart(fig11, x='season', y='sale', use_container_width= True)
return None
def tabela (df):
st.markdown("<h1 style='text-align: center; color: black;'> Resumo sobre as Hipóteses </h1>", unsafe_allow_html=True)
hipoteses = pd.DataFrame({
'.': ['Verdadeira', 'Falsa', 'Verdadeira', 'Verdadeira', 'Falsa', 'Verdadeira', 'Falsa', 'Falsa',
'Falsa', 'Falsa']}, index=['H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'H10'])
hipoteses = hipoteses.style.set_table_styles([dict(selector='th', props=[('text-align', 'center')])])
hipoteses.set_properties(**{'text-align': 'center'}).hide_index()
st.table(hipoteses)
return None
if __name__ == "__main__":
path = 'kc_house_data.csv'
df = get_data(path)
data_overview(df)
metricas(df)
hipoteses(df)
questoes_negocio(df)
tabela(df)
| 43.70997
| 146
| 0.501659
|
7950aadb09524bb838ee684f97bfc15550c83952
| 13,972
|
py
|
Python
|
Lmap.py
|
retr0-13/LMap
|
c0832882b48abc33283e598f374df48f0c28b63e
|
[
"Apache-2.0"
] | 55
|
2021-11-05T06:25:36.000Z
|
2022-03-11T02:59:25.000Z
|
Lmap.py
|
retr0-13/LMap
|
c0832882b48abc33283e598f374df48f0c28b63e
|
[
"Apache-2.0"
] | 3
|
2021-11-08T02:44:00.000Z
|
2022-01-18T12:01:18.000Z
|
Lmap.py
|
retr0-13/LMap
|
c0832882b48abc33283e598f374df48f0c28b63e
|
[
"Apache-2.0"
] | 11
|
2021-11-05T06:25:42.000Z
|
2022-01-18T07:03:07.000Z
|
from ipaddress import ip_address,ip_network
import asyncio
import re
from lxml import etree
import argparse
import json
from configparser import ConfigParser
from pathlib import Path
import sys
from rich.progress import Progress
from rich.console import Console
from httpx import AsyncClient
from prettytable import PrettyTable
console = Console()
def banner():
console.print('Lmap V2.0 By Lion', style='bold cyan', justify='center')
console.print('A tool combined with the advantages of masscan and nmap', style='bold cyan', justify='center')
console.print('Enjoy~', style='bold cyan', justify='center')
def create_ini(masscan_path, nmap_path):
config = ConfigParser()
config['Masscan'] = {'path': masscan_path, 'rate': '500', 'ConcurrentLimit': '3', 'PortGap': '11000', 'IpGap': '10',
'waf-threshold': '50'}
config['Nmap'] = {'path': nmap_path, 'ConcurrentLimit': '10'}
config['Httpx'] = {'ConcurrentLimit': '100'}
configfile = (Path(sys.argv[0]).parent / 'config.ini')
config.write(configfile.open('w+', encoding='utf-8'))
def split_ip(ips):
ip_list = []
if (',' in ips):
ip_list = ips.split(',')
elif ('/' in ips):
net = ip_network(ips)
for ip in zip(net):
ip_list.append(str(ip[0]))
elif ('-' in ips):
start_ip,end_ip = ips.split('-')
start_ip = ip_address(start_ip)
end_ip = ip_address(end_ip)
while start_ip <= end_ip:
ip_list.append(str(start_ip))
start_ip += 1
else:
ip_list.append(ips)
return ip_list
async def async_exec_cmd(cmd, sem=None):
if (sem != None):
async with sem:
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
info = stdout.decode() if stdout else stderr.decode()
return info
else:
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
info = stdout.decode() if stdout else stderr.decode()
return info
async def masscan(path, ips, ports, nmap_queue, sem, waf_threshold,httpx_queue):
cmd = f'{path} {ips} -p {ports} --rate {masscan_rate}'
info = await async_exec_cmd(cmd, sem)
re_obj1 = re.compile('Discovered open port \d+/')
re_obj2 = re.compile('on \d*.\d*.\d*.\d*')
port_list = [element[21:-1] for element in re_obj1.findall(info)]
ip_list = [element[3:] for element in re_obj2.findall(info)]
# 定义临时字典防止waf用
tmp_dict = {}
if (len(ip_list) >= 1):
for i in range(len(ip_list)):
ip = ip_list[i]
port = port_list[i]
print(f'\033[32m{port} on {ip} is open\033[0m')
# 放入临时字典里
if (ip not in tmp_dict.keys()):
tmp_dict[ip] = {}
if ('count' not in tmp_dict[ip].keys()):
tmp_dict[ip]['count'] = 0
tmp_dict[ip]['count'] += 1
if (tmp_dict[ip]['count'] > int(waf_threshold)):
waf_ip.append(ip)
if (len(ip_list) >= 1):
for i in range(len(ip_list)):
ip = ip_list[i]
port = port_list[i]
# 如果在有waf的ip列表里就忽略
if (ip in waf_ip):
continue
# 放入全局结果字典
if (ip not in result_dic.keys()):
result_dic[ip] = {}
if ('count' not in result_dic[ip].keys()):
result_dic[ip]['count'] = 0
result_dic[ip]['portlist'] = []
result_dic[ip]['count'] += 1
result_dic[ip]['portlist'].append({'port': port, 'service': '-', 'product': '-','title':'-'})
await nmap_queue.put({'ip': ip, 'port': port})
if (httpx_queue != None):
await httpx_queue.put({'ip': ip, 'port': port})
progress_bar.update(masscan_progress, advance=1)
# 通过生产者消费者模型,一旦扫描出开放端口就用nmap进行版本探测
async def nmap(path, nmap_queue, nmap_args='-sS -Pn -n'):
while True:
data = await nmap_queue.get()
ip = data['ip']
port = data['port']
xml_file = f'temp/{ip}:{port}.xml'
cmd = f'{path} {nmap_args} {ip} -p {port} -oX {xml_file}'
nmap_progress = progress_bar.add_task(f'[cyan]nmap service on {ip}:{port}')
try:
await asyncio.wait_for(async_exec_cmd(cmd), timeout=60)
root = etree.parse(xml_file)
state = root.xpath("//state/@state")[0]
service = root.xpath("//service/@name")
product = root.xpath("//service/@product")
if (state == 'open'):
if (service != []):
for port_data in result_dic[ip]['portlist']:
if (port_data['port'] == port):
port_data['service'] = service[0]
print(f'\033[32mservice on {ip}:{port} is {service[0]}\033[0m')
if (product != []):
port_data['product'] = product[0]
print(f'\033[32mproduct on {ip}:{port} is {product[0]}\033[0m')
except Exception:
pass
finally:
progress_bar.update(nmap_progress, completed=True, visible=False)
nmap_queue.task_done()
# 通过生产者消费者模型,一旦扫描出开放端口就尝试获取web标题
async def async_request_get(headers, httpx_queue,sem):
while True:
data = await httpx_queue.get()
ip = data['ip']
port = data['port']
title = '-'
async with AsyncClient(verify=False) as async_client:
# 限制并发量
async with sem:
try:
url = f'http://{ip}:{port}'
res = await async_client.get(url, headers=headers, timeout=5, follow_redirects=True)
if (res.status_code == 200):
html = etree.HTML(res.text, etree.HTMLParser())
if (len(html.xpath('//head/title/text()')) > 0):
title = html.xpath('//head/title/text()')[0]
except Exception:
pass
# 如果访问失败,使用https再次尝试
if (title == '-'):
try:
url = f'https://{ip}:{port}'
res = await async_client.get(url, headers=headers, timeout=5,follow_redirects=True)
if (res.status_code == 200):
html = etree.HTML(res.text, etree.HTMLParser())
if (len(html.xpath('//head/title/text()')) > 0):
title = html.xpath('//head/title/text()')[0]
except Exception:
pass
if (title != '-'):
print(f'\033[33mtitle on {url} is {title}\033[0m')
portlist = result_dic[ip]['portlist']
for port_data in portlist:
if (int(port_data['port']) == int(port)):
port_data['title'] = title
httpx_queue.task_done()
async def main():
# 读取输入
ip_list = []
if (file):
for line in open(file, 'r', encoding='utf-8'):
ip_list.append(line.strip('\n'))
else:
ip_list = split_ip(target)
start_port, end_port = [int(i) for i in port_range.split('-')]
# 初始化结果字典
global result_dic
result_dic = {}
global waf_ip
waf_ip = []
ports_list = []
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Mobile Safari/537.36 Edg/96.0.1054.43'
}
# 把端口分组
if (end_port - start_port >= masscan_port_gap):
for i in range((end_port - start_port) // masscan_port_gap):
ports_list.append(f'{start_port + i * masscan_port_gap}-{start_port + (i + 1) * masscan_port_gap - 1}')
ports_list.append(
(f'{start_port + ((end_port - start_port) // masscan_port_gap) * masscan_port_gap}-{end_port}'))
else:
ports_list.append(f'{start_port}-{end_port}')
# 把ip分组
ip_part_list = [ip_list[i:i + masscan_ip_gap] for i in range(0, len(ip_list), masscan_ip_gap)]
# 创建nmap消费者
nmap_queue = asyncio.Queue()
nmap_tasklist = []
for _ in range(nmap_concurrent_limit):
if (scan_version == True):
nmap_tasklist.append(
asyncio.create_task(nmap(path=nmap_path, nmap_queue=nmap_queue, nmap_args='-sV -Pn -n')))
if (scan_version == False):
nmap_tasklist.append(
asyncio.create_task(nmap(path=nmap_path, nmap_queue=nmap_queue, nmap_args='-sS -Pn -n')))
if (scan_title):
# 创建httpx消费者
httpx_queue = asyncio.Queue()
httpx_sem = asyncio.Semaphore(int(httpx_concurrent_limit))
httpx_tasklist = []
httpx_tasklist.append(asyncio.create_task(async_request_get(headers=headers,httpx_queue=httpx_queue,sem=httpx_sem)))
# 创建masscan生产者
global masscan_progress
masscan_progress = progress_bar.add_task('[blue]masscan progressing...',
total=(len(ip_part_list) * len(ports_list)))
masscan_sem = asyncio.Semaphore(int(masscan_concurrent_limit))
masscan_tasklist = []
if (scan_title):
for ip_part in ip_part_list:
for ports in ports_list:
ips = ','.join(ip_part)
masscan_tasklist.append(
asyncio.create_task(masscan(path=masscan_path, ips=ips, ports=ports, nmap_queue=nmap_queue, sem=masscan_sem,
waf_threshold=waf_threshold,httpx_queue=httpx_queue)))
else:
for ip_part in ip_part_list:
for ports in ports_list:
ips = ','.join(ip_part)
masscan_tasklist.append(
asyncio.create_task(masscan(path=masscan_path, ips=ips, ports=ports, nmap_queue=nmap_queue, sem=masscan_sem,
waf_threshold=waf_threshold,httpx_queue=None)))
# 等待各队列结束
await asyncio.gather(*masscan_tasklist)
await nmap_queue.join()
if (scan_title):
await httpx_queue.join()
# 销毁nmap消费者
for nmap_task in nmap_tasklist:
nmap_task.cancel()
# 销毁httpx消费者
if (scan_title):
for httpx_task in httpx_tasklist:
httpx_task.cancel()
progress_bar.update(masscan_progress, completed=True, visible=False)
# 输出内容
if (output_url):
with open(output_url, 'a+', encoding='utf-8') as f:
for ip, data in result_dic.items():
for port_data in data['portlist']:
f.write(f"http://{ip}:{port_data['port']}\n")
if (output_json):
with open(output_json, 'w+', encoding='utf-8') as f:
json.dump(result_dic, f, sort_keys=True, indent=4, separators=(',', ':'))
# 生成表格
table = PrettyTable(['IP', 'Port', 'Service', 'Product','Title'])
for ip, data in result_dic.items():
for port_data in data['portlist']:
table.add_row([ip, port_data['port'], port_data['service'], port_data['product'],port_data['title']])
print(table)
if __name__ == '__main__':
banner()
# 初始化配置文件和临时文件夹
configfile = Path(sys.argv[0]).parent / 'config.ini'
if (configfile.exists() == False):
masscan_path = input('please input masscan path\n')
nmap_path = input('please input nmap path\n')
create_ini(masscan_path, nmap_path)
temp_file = Path(sys.argv[0]).parent / 'temp'
if (temp_file.exists() == False):
temp_file.mkdir()
config = ConfigParser()
config.read_file(configfile.open('r', encoding='utf-8'))
masscan_path = config['Masscan']['path']
nmap_path = config['Nmap']['path']
waf_threshold = config['Masscan']['waf-threshold']
masscan_rate = config['Masscan']['rate']
masscan_concurrent_limit = int(config['Masscan']['ConcurrentLimit'])
masscan_port_gap = int(config['Masscan']['PortGap'])
masscan_ip_gap = int(config['Masscan']['IpGap'])
nmap_concurrent_limit = int(config['Nmap']['ConcurrentLimit'])
httpx_concurrent_limit = int(config['Httpx']['ConcurrentLimit'])
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--target', help='please input ips like 192.168.0.1,192.168.0.2 or 192.168.0.1/24 or 1.1.1.1-2.2.2.2')
parser.add_argument('-p', '--port', help='please input ports like 1-65535', required=True)
parser.add_argument('-f', '--file', help='pleases input your file')
parser.add_argument('-oj', '--output-json', help='please input output json file', default=None)
parser.add_argument('-ou', '--output-url', help='please input output url file', default=None)
parser.add_argument('-sv', '--scan-version', help='please input whether use sv mode,True or False', type=bool,
default=False)
parser.add_argument('-st', '--scan-title', help='please input whether scan title,True or False', type=bool,
default=True)
args = parser.parse_args()
target = args.target
file = args.file
port_range = args.port
output_json = args.output_json
output_url = args.output_url
scan_version = args.scan_version
scan_title = args.scan_title
with Progress() as progress_bar:
asyncio.run(main())
| 43.256966
| 176
| 0.559619
|
7950ab2423979c854385909718e1f4a09cee0749
| 25,962
|
py
|
Python
|
src/vocola2/_vocola_main.py
|
dougransom/Vocola2
|
a2b986031fd4c6c306a47f6e1fdf252d5643e16c
|
[
"MIT"
] | 1
|
2021-01-10T21:56:49.000Z
|
2021-01-10T21:56:49.000Z
|
src/vocola2/_vocola_main.py
|
dougransom/Vocola2
|
a2b986031fd4c6c306a47f6e1fdf252d5643e16c
|
[
"MIT"
] | null | null | null |
src/vocola2/_vocola_main.py
|
dougransom/Vocola2
|
a2b986031fd4c6c306a47f6e1fdf252d5643e16c
|
[
"MIT"
] | null | null | null |
# -*- coding: latin-1 -*-
"""_vocola_main.py - Natlink support for Vocola
Contains:
- "Built-in" voice commands
- Autoloading of changed command files
Copyright (c) 2002-2012 by Rick Mohr.
Portions Copyright (c) 2012-2015 by Hewlett-Packard Development Company, L.P.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import traceback
import os # access to file information
import os.path # to parse filenames
import time # print time in messages
from stat import * # file statistics
import re
import natlink
import natlinkmain
from natlinkutils import *
thisDir = os.path.split(__file__)[0]
##########################################################################
# #
# Configuration #
# #
##########################################################################
try:
import natlinkstatus
Quintijn_installer = True
status = natlinkstatus.NatlinkStatus()
# print('status: %s'% status)
VocolaEnabled = not not status.getVocolaUserDirectory()
print(f'VocolaEnabled: {VocolaEnabled}')
if VocolaEnabled:
VocolaGrammarsDirecory = status.getVocolaGrammarsDirectory()
VocolaUserDirectory = status.getVocolaUserDirectory()
VocolaDirectory = status.getVocolaDirectory()
else:
VocolaGrammarsDirecory = ""
VocolaUserDirectory = ""
VocolaDirectory = ""
# print('VocolaEnabled: %s'% VocolaEnabled)
language = status.getLanguage()
if language != 'enx':
print(' language: %s'% language)
except ImportError:
Quintijn_installer = False
VocolaEnabled = True
language = 'enx'
traceback.print_exc()
if thisDir and os.path.isdir(thisDir):
pass
if VocolaEnabled:
if True: #VocolaDirectory == thisDir:
print(f"thisDir of _vocola_main: {thisDir} not equal to VocolaDirectory from natlinkstatus: {VocolaDirectory}")
else:
raise IOError("no valid directory found for _vocola_main.py: {thisDir}")
# get location of MacroSystem folder:
CoreDirectory = status.getCoreDirectory()
NatlinkFolder = os.path.normpath(os.path.join(CoreDirectory, ".."))
VocolaFolder = thisDir
ExecFolder = os.path.normpath(os.path.join(thisDir, 'exec'))
ExtensionsFolder = os.path.normpath(os.path.join(thisDir, 'extensions'))
# NatlinkFolder = os.path.abspath(NatlinkFolder)
if VocolaEnabled:
print('_vocola_main, Vocola is Enabled, check sys.path for ExecFolder and ExtensionsFolder')
for f in ExecFolder, ExtensionsFolder:
if f not in sys.path:
print(f'_vocola_main, add to sys.path: {f}')
sys.path.append(f)
else:
print('_vocola_main, Vocola is NOT Enabled')
def get_command_folder():
commandFolder = get_top_command_folder()
if commandFolder:
uDir = os.path.join(commandFolder, language)
if os.path.isdir(uDir):
commandFolder = uDir
return commandFolder
def get_top_command_folder():
return VocolaUserDirectory
# configured = None
# try:
# import natlinkstatus
# # Quintijn's's installer:
# configured = status.NatlinkStatus().getVocolaUserDirectory()
# # print('Vocola configured: %s'% configured)
# except ImportError:
# try:
# import RegistryDict
# import win32con
# # Scott's installer:
# r = RegistryDict.RegistryDict(win32con.HKEY_CURRENT_USER,r"Software\Natlink")
# if r:
# configured = r["VocolaUserDirectory"]
# except ImportError:
# pass
# if os.path.isdir(configured):
# return configured
#
# systemCommandFolder = os.path.join(VocolaFolder, 'Commands')
# if os.path.isdir(systemCommandFolder):
# return systemCommandFolder
#
# return None
commandFolder = get_command_folder()
if VocolaEnabled and not commandFolder:
print("Warning: no Vocola command folder found!", file=sys.stderr)
import VocolaUtils
VocolaUtils.Language = language
###########################################################################
# #
# The built-in commands #
# #
###########################################################################
class ThisGrammar(GrammarBase):
gramSpec = """
<NatLinkWindow> exported = [Show] (Natlink|Vocola) Window;
<edit> exported = Edit [Voice] Commands;
<editGlobal> exported = Edit Global [Voice] Commands;
<editMachine> exported = Edit Machine [Voice] Commands;
<editGlobalMachine> exported = Edit Global Machine [Voice] Commands;
<loadAll> exported = Load All [Voice] Commands;
<loadCurrent> exported = Load [Voice] Commands;
<loadGlobal> exported = Load Global [Voice] Commands;
<loadExtensions> exported = Load [Voice] Extensions;
<discardOld> exported = Discard Old [Voice] Commands;
"""
if language == 'nld':
gramSpec = """
<NatLinkWindow> exported = Toon (Natlink|Vocola) venster;
<edit> exported = (Eddit|Bewerk|Sjoo|Toon) [stem|vojs] (Commandoos|Commands);
<editGlobal> exported = (Eddit|Bewerk|Sjoo|Toon) (Global|globale) [stem|vojs] (Commandoos|Commands);
<editMachine> exported = (Eddit|Bewerk|Sjoo|Toon) Machine [stem|vojs] (Commandoos|Commands);
<editGlobalMachine> exported = (Eddit|Bewerk|Sjoo|Toon) (Global|globale) Machine [stem|vojs] (Commandoos|Commands);
<loadAll> exported = (Laad|Lood) alle [stem|vojs] (Commandoos|Commands);
<loadCurrent> exported = (Laad|Lood) [stem|vojs] (Commandoos|Commands);
<loadGlobal> exported = (Laad|Lood) globale [stem|vojs] (Commandoos|Commands);
<loadExtensions> exported = Laad [stem] extensies;
<discardOld> exported = (Discard|Verwijder) (oude|oold) [stem|vojs] (Commandoos|Commands);
"""
elif language == 'fra':
gramSpec = """
<NatLinkWindow> exported = [Afficher] Fenetre (Natlink|Vocola);
<edit> exported = Editer Commandes [Vocales];
<editGlobal> exported = Editer Commandes [Vocales] Globales;
<editMachine> exported = Editer Commandes [Vocales] Machine;
<editGlobalMachine> exported = Editer Commandes [Vocales] Globales Machine;
<loadAll> exported = Charger Toutes Les Commandes [Vocales];
<loadCurrent> exported = Charger Commandes [Vocales];
<loadGlobal> exported = Charger Commandes [Vocales] Globales;
<loadExtensions> exported = Charger Extensions [Vocales];
<discardOld> exported = Effacer Commandes [Vocales] Precedentes;
"""
elif language == 'deu':
gramSpec = """
<NatLinkWindow> exported = [Zeige] (Natlink|Vocola) Fenster;
<edit> exported = Bearbeite [Sprach] Befehle;
<editGlobal> exported = Bearbeite globale [Sprach] Befehle;
<editMachine> exported = Bearbeite Maschinen [Sprach] Befehle;
<editGlobalMachine> exported = Bearbeite globale Maschinen [Sprach] Befehle;
<loadAll> exported = Lade alle [Sprach] Befehle;
<loadCurrent> exported = Lade [Sprach] Befehle;
<loadGlobal> exported = Lade globale [Sprach] Befehle;
<loadExtensions> exported = Lade [Sprach] Extensions;
<discardOld> exported = Verwerfe alte [Sprach] Befehle;
"""
elif language == 'ita':
gramSpec = """
<NatLinkWindow> exported = [Mostra] Finestra Di (Natlink|Vocola);
<edit> exported = Modifica Comandi [Vocali];
<editGlobal> exported = Modifica Comandi [Vocali] Globali;
<editMachine> exported = Modifica Comandi [Vocali] [del] Computer;
<editGlobalMachine> exported = Modifica Comandi [Vocali] Globali [del] Computer;
<loadAll> exported = Carica Tutti I Comandi [Vocali];
<loadCurrent> exported = Carica I Comandi [Vocali];
<loadGlobal> exported = Carica Comandi [Vocali] Gliobali;
<loadExtensions> exported = Carica Estensioni [Vocali];
<discardOld> exported = Annulla Vecchi Comandi [Vocali];
"""
elif language == 'esp':
gramSpec = """
<NatLinkWindow> exported = [Mostrar] Ventana de (Natlink|Vocola) ;
<edit> exported = (Modificar|Editar) Comandos [de voz];
<editGlobal> exported = (Modificar|Editar) Comandos [de voz] Globales ;
<editMachine> exported = (Modificar|Editar) Comandos [de voz] de (este ordenador|la Computadora);
<editGlobalMachine> exported = (Modificar|Editar) Comandos [de voz] Globales de (este ordenador|la Computadora);
<loadAll> exported = (Recargar|Cargar) Todos Los Comandos [de voz];
<loadCurrent> exported = (Recargar|Cargar) Comandos [de voz];
<loadGlobal> exported = (Recargar|Cargar) Comandos [de voz] Globales;
<loadExtensions> exported = (Recargar|Cargar) Extensiones [de voz];
<discardOld> exported = Descartar Comandos [de voz] Viejos;
"""
elif language != 'enx':
print("""\n\n
Vocola Warning: no language "%s" translations for the built-in Vocola
commands (e.g., commands to load voice commands) are currently
available; consider helping translate them -- inquire on
https://www.knowbrainer.com/forums/forum/categories.cfm?catid=25. For
now the English versions, like "Edit Commands" and "Edit Global
Commands" are activated.
""" % language, file=sys.stderr)
def initialize(self):
if 'COMPUTERNAME' in os.environ:
self.machine = os.environ['COMPUTERNAME'].lower()
else: self.machine = 'local'
self.load_extensions()
self.loadAllFiles(False)
self.load(self.gramSpec)
self.activateAll()
def gotBegin(self, moduleInfo):
self.currentModule = moduleInfo
# delay enabling until now to avoid Natlink clobbering our callback:
enable_callback()
# Get app name by stripping folder and extension from currentModule name
def getCurrentApplicationName(self):
"""get the current application name of the foreground window
The same named function in natlinkmain returns the lowercase executable of the running
program, but if "ApplicationFrameHost" is running (Calc, Photos), that name is returned.
"""
appName = natlinkmain.getCurrentApplicationName(self.currentModule)
return appName
### Miscellaneous commands
# "Show Natlink Window" -- print to output window so it appears
def gotResults_NatLinkWindow(self, words, fullResults):
print("This is the Natlink/Vocola output window")
# "Load Extensions" -- scan for new/changed extensions:
def gotResults_loadExtensions(self, words, fullResults):
self.load_extensions(True)
for module in list(sys.modules.keys()):
if module.startswith("vocola_ext_"):
del sys.modules[module]
def load_extensions(self, verbose=False):
#if sys.modules.has_key("scan_extensions"):
# del sys.modules["scan_extensions"]
import scan_extensions
arguments = ["scan_extensions", ExtensionsFolder]
if verbose:
arguments.insert(1, "-v")
scan_extensions.main(arguments)
### Loading Vocola Commands
# "Load All Commands" -- translate all Vocola files
def gotResults_loadAll(self, words, fullResults):
self.loadAllFiles(True)
# "Load Commands" -- translate Vocola files for current application
def gotResults_loadCurrent(self, words, fullResults):
self.loadSpecificFiles(self.getCurrentApplicationName())
# "Load Global Commands" -- translate global Vocola files
def gotResults_loadGlobal(self, words, fullResults):
self.loadSpecificFiles('')
# "Discard Old [Voice] Commands" -- purge output then translate all files
def gotResults_discardOld(self, words, fullResults):
purgeOutput()
self.loadAllFiles(True)
# Load all command files
def loadAllFiles(self, force):
if commandFolder:
# print('loadAllFiles: %s'% commandFolder)
compile_Vocola(commandFolder, force)
# Load command files for specific application
def loadSpecificFiles(self, module):
special = re.compile(r'([][()^$.+*?{\\])')
pattern = "^" + special.sub(r'\\\1', module)
pattern += "(_[^@]*)?(@" + special.sub(r'\\\1', self.machine)
pattern += ")?\.vcl$"
p = re.compile(pattern, re.IGNORECASE)
targets = []
if commandFolder:
targets += [os.path.join(commandFolder,f)
for f in os.listdir(commandFolder) if p.search(f)]
if len(targets) > 0:
for target in targets:
self.loadFile(target)
else:
print(file=sys.stderr)
if module == "":
print("Found no Vocola global command files [for machine '" + \
self.machine + "']", file=sys.stderr)
else:
print("Found no Vocola command files for application '" + module + "' [for machine '" + self.machine + "']", file=sys.stderr)
# Load a specific command file, returning false if not present
def loadFile(self, file):
try:
print(f"try to compile Vocola file: {file}")
os.stat(file)
compile_Vocola(file, False)
return True
except OSError:
return False # file not found
### Editing Vocola Command Files
# "Edit Commands" -- open command file for current application
def gotResults_edit(self, words, fullResults):
app = self.getCurrentApplicationName()
file = app + '.vcl'
comment = 'Voice commands for ' + app
self.openCommandFile(file, comment)
# "Edit Machine Commands" -- open command file for current app & machine
def gotResults_editMachine(self, words, fullResults):
app = self.getCurrentApplicationName()
file = app + '@' + self.machine + '.vcl'
comment = 'Voice commands for ' + app + ' on ' + self.machine
self.openCommandFile(file, comment)
# "Edit Global Commands" -- open global command file
def gotResults_editGlobal(self, words, fullResults):
file = '_vocola.vcl'
comment = 'Global voice commands'
self.openCommandFile(file, comment)
# "Edit Global Machine Commands" -- open global command file for machine
def gotResults_editGlobalMachine(self, words, fullResults):
file = '_vocola@' + self.machine + '.vcl'
comment = 'Global voice commands on ' + self.machine
self.openCommandFile(file, comment)
def FindExistingCommandFile(self, file):
if commandFolder:
f = commandFolder + '\\' + file
if os.path.isfile(f): return f
return ""
# Open a Vocola command file (using the application associated with ".vcl")
def openCommandFile(self, file, comment):
if not commandFolder:
print("Error: Unable to create command file " + \
"because no Vocola command folder found.", file=sys.stderr)
return
path = self.FindExistingCommandFile(file)
if not path:
path = commandFolder + '\\' + file
new = open(path, 'w')
new.write('# ' + comment + '\n\n')
new.close()
#
# Natlink/DNS bug causes os.startfile or wpi32api.ShellExecute
# to crash DNS if allResults is on in *any* grammer (e.g., Unimacro)
#
# Accordingly, use AppBringUp instead:
#
#try:
# os.startfile(path)
#except WindowsError, e:
# print
# print "Unable to open voice command file with associated editor: " + str(e)
# print "Trying to open it with notepad instead."
# prog = os.path.join(os.getenv('WINDIR'), 'notepad.exe')
# os.spawnv(os.P_NOWAIT, prog, [prog, path])
natlink.execScript("AppBringUp \"" + path + "\", \"" + path + "\"")
###########################################################################
# #
# Compiling Vocola files #
# #
###########################################################################
may_have_compiled = False # has the compiler been called?
compile_error = False # has a compiler error occurred?
# Run Vocola compiler, converting command files from "inputFileOrFolder"
# and writing output to Natlink/MacroSystem
def compile_Vocola(inputFileOrFolder, force):
global may_have_compiled, compiler_error
may_have_compiled = True
executable = sys.prefix + r'\python.exe'
arguments = [VocolaFolder + r'\exec\vcl2py.py']
arguments += ['-extensions', ExtensionsFolder + r'\extensions.csv']
if language == "enx":
arguments += ['-numbers',
'zero,one,two,three,four,five,six,seven,eight,nine']
arguments += ["-suffix", "_vcl"]
if force: arguments += ["-f"]
# arguments += [inputFileOrFolder, NatlinkFolder]
arguments += [inputFileOrFolder, VocolaGrammarsDirecory]
print(f"_vocola_main calls vcl2py.py, grammars go to folder: {VocolaGrammarsDirecory}")
# print(f"calling {arguments}")
hidden_call(executable, arguments)
logName = commandFolder + r'\vcl2py_log.txt'
if os.path.isfile(logName):
try:
log = open(logName, 'r')
compiler_error = True
print(log.read(), file=sys.stderr)
log.close()
os.remove(logName)
except IOError: # no log file means no Vocola errors
pass
# Unload all commands, including those of files no longer existing
def purgeOutput():
pattern = re.compile("_vcl\d*\.pyc?$")
[os.remove(os.path.join(NatlinkFolder,f)) for f
in os.listdir(NatlinkFolder) if pattern.search(f)]
#
# Run program with path executable and arguments arguments. Waits for
# the program to finish. Runs the program in a hidden window.
#
def hidden_call(executable, arguments):
args = [executable] + arguments
try:
import subprocess
si = subprocess.STARTUPINFO()
# Location of below constants seems to vary from Python
# version to version so hardcode them:
si.dwFlags = 1 # subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = 0 # subprocess.SW_HIDE
return subprocess.call(args, startupinfo=si)
except ImportError:
pid = os.spawnv(os.P_NOWAIT, executable, args)
pid, exit_code = os.waitpid(pid, 0)
exit_code = exit_code >> 8
return exit_code
lastVocolaFileTime = 0
lastCommandFolderTime = 0
def compile_changed():
global lastVocolaFileTime, lastCommandFolderTime
global compiler_error
current = getLastVocolaFileModTime()
if current > lastVocolaFileTime:
compiler_error = False
thisGrammar.loadAllFiles(False)
if not compiler_error:
lastVocolaFileTime = current
#source_changed = False
#if commandFolder:
# if vocolaGetModTime(commandFolder) > lastCommandFolderTime:
# lastCommandFolderTime = vocolaGetModTime(commandFolder)
# source_changed = True
#if source_changed:
# deleteOrphanFiles()
# Returns the newest modified time of any Vocola command folder file or
# 0 if none:
def getLastVocolaFileModTime():
last = 0
if commandFolder:
last = max([last] +
[vocolaGetModTime(os.path.join(commandFolder,f))
for f in os.listdir(commandFolder)])
return last
# Returns the modification time of a file or 0 if the file does not exist:
def vocolaGetModTime(file):
try: return os.stat(file)[ST_MTIME]
except OSError: return 0 # file not found
def deleteOrphanFiles():
print("checking for orphans...")
for f in os.listdir(NatlinkFolder):
if not re.search("_vcl.pyc?$", f): continue
s = getSourceFilename(f)
if s:
if vocolaGetModTime(s)>0: continue
f = os.path.join(NatlinkFolder, f)
print("Deleting: " + f)
os.remove(f)
def getSourceFilename(output_filename):
m = re.match("^(.*)_vcl.pyc?$", output_filename)
if not m: return None # Not a Vocola file
name = m.group(1)
if not commandFolder: return None
marker = "e_s_c_a_p_e_d__"
m = re.match("^(.*)" + marker + "(.*)$", name) # rightmost marker!
if m:
name = m.group(1)
tail = m.group(2)
tail = re.sub("__a_t__", "@", tail)
tail = re.sub("___", "_", tail)
name += tail
name = re.sub("_@", "@", name)
return commandFolder + "\\" + name + ".vcl"
lastNatLinkModTime = 0
# Check for changes to our output .py files and report status relative
# to last time this routine was called; return code means:
# 0: no changes
# 1: 1 or more existing .py files were modified, but no new .py files created
# 2: one or more new .py files may have been created, plus maybe existing changed
def output_changes():
global lastNatLinkModTime, may_have_compiled
old_may_have_compiled = may_have_compiled
may_have_compiled = False
current = vocolaGetModTime(NatlinkFolder)
if current > lastNatLinkModTime:
lastNatLinkModTime = current
return 2
if old_may_have_compiled:
return 1
else:
return 0
# When speech is heard this function will be called before any others.
#
# Must return result of output_changes() so we can tell Natlink when
# files need to be loaded.
def utterance_start_callback(moduleInfo):
compile_changed()
return output_changes()
###########################################################################
# #
# Callback handling #
# #
###########################################################################
#
# With Quintijn's installer as of February 4, 2008:
#
# _vocola_main is loaded before any other Natlink modules
# vocolaBeginCallback is called directly by natlinkmain before any
# other grammer's gotBegin method
# natlinkmain now guarantees we are not called with CallbackDepth>1
# we return the result of output_changes() directly rather than
# massaging Natlink to deal with new .py files
#
callback_enabled = False
def enable_callback():
global callback_enabled
if not callback_enabled:
callback_enabled = True
if not Quintijn_installer:
# Replace Natlink's "begin" callback function with ours:
natlink.setBeginCallback(vocolaBeginCallback)
def disable_callback():
global callback_enabled
callback_enabled = False
if not Quintijn_installer:
natlink.setBeginCallback(beginCallback)
def vocolaBeginCallback(moduleInfo):
if not callback_enabled:
return 0
changes = 0
if Quintijn_installer or getCallbackDepth()<2:
changes = utterance_start_callback(moduleInfo)
if Quintijn_installer:
return changes
else:
if changes > 1:
# make sure Natlink sees any new .py files:
natlinkmain.findAndLoadFiles()
natlinkmain.loadModSpecific(moduleInfo)
natlinkmain.beginCallback(moduleInfo)
###########################################################################
# #
# Startup/shutdown #
# #
###########################################################################
thisGrammar = None
# remove previous Vocola/Python compilation output as it may be out of
# date (e.g., new compiler, source file deleted, partially written due
# to crash, new machine name, etc.):
purgeOutput()
if not VocolaEnabled:
print("Vocola not active")
else:
print("Vocola version 2.8.6 starting...")
thisGrammar = ThisGrammar()
thisGrammar.initialize()
def unload():
global thisGrammar
disable_callback()
if thisGrammar: thisGrammar.unload()
thisGrammar = None
if __name__ == "__main__":
loadAllFiles(r"C:\Users\Gebruiker\Documents\vocola_qh", force=1)
print('all files loaded')
| 37.680697
| 141
| 0.613859
|
7950abb7b9f23e279f6600b2851f35a52b19d78e
| 1,526
|
py
|
Python
|
var/spack/repos/builtin/packages/py-torch-scatter/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/py-torch-scatter/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/py-torch-scatter/package.py
|
robertodr/spack
|
9b809e01b47d48f01b3d257912fe1b752943cd3d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTorchScatter(PythonPackage):
"""This package consists of a small extension library of
highly optimized sparse update (scatter and segment)
operations for the use in PyTorch, which are missing in the
main package."""
homepage = "https://github.com/rusty1s/pytorch_scatter"
url = "https://github.com/rusty1s/pytorch_scatter/archive/2.0.5.tar.gz"
version('2.0.5', sha256='e29b364beaa9c84a99e0e236be89ed19d4452d89010ff736184ddcce488b47f6')
variant('cuda', default=False, description="Enable CUDA support")
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytest-runner', type='build')
depends_on('py-torch+cuda', when='+cuda')
depends_on('py-torch~cuda', when='~cuda')
def setup_build_environment(self, env):
if '+cuda' in self.spec:
cuda_arches = list(
self.spec['py-torch'].variants['cuda_arch'].value)
for i, x in enumerate(cuda_arches):
cuda_arches[i] = '{0}.{1}'.format(x[0:-1], x[-1])
env.set('TORCH_CUDA_ARCH_LIST', str.join(' ', cuda_arches))
env.set('FORCE_CUDA', '1')
env.set('CUDA_HOME', self.spec['cuda'].prefix)
else:
env.set('FORCE_CUDA', '0')
| 38.15
| 95
| 0.653997
|
7950ac4541048cb6c3f23f7039f94ca71dfd0d3f
| 5,768
|
py
|
Python
|
dqn/exercise/dqn_agent.py
|
primeMover2011/deep-reinforcement-learning
|
a8314b01da15e47c230a3246e5109d49c6618162
|
[
"MIT"
] | 4,317
|
2018-07-06T18:50:33.000Z
|
2022-03-31T19:24:33.000Z
|
dqn/exercise/dqn_agent.py
|
primeMover2011/deep-reinforcement-learning
|
a8314b01da15e47c230a3246e5109d49c6618162
|
[
"MIT"
] | 41
|
2018-07-08T00:07:26.000Z
|
2022-03-17T22:42:19.000Z
|
dqn/exercise/dqn_agent.py
|
primeMover2011/deep-reinforcement-learning
|
a8314b01da15e47c230a3246e5109d49c6618162
|
[
"MIT"
] | 2,366
|
2018-07-06T18:57:22.000Z
|
2022-03-28T00:37:00.000Z
|
import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
| 39.506849
| 127
| 0.619972
|
7950ac845cd9e8ebf8a055d52e2bd0820c427c43
| 2,346
|
py
|
Python
|
tests/test_encoder.py
|
aditya-agrawal-30502/vformer
|
e1f4950f980238442ff1dc39a8f0791e4fbc9dac
|
[
"MIT"
] | 90
|
2021-09-08T10:21:19.000Z
|
2022-03-26T18:11:47.000Z
|
tests/test_encoder.py
|
aditya-agrawal-30502/vformer
|
e1f4950f980238442ff1dc39a8f0791e4fbc9dac
|
[
"MIT"
] | 72
|
2021-09-09T06:54:50.000Z
|
2022-03-31T09:23:31.000Z
|
tests/test_encoder.py
|
aditya-agrawal-30502/vformer
|
e1f4950f980238442ff1dc39a8f0791e4fbc9dac
|
[
"MIT"
] | 21
|
2021-09-09T05:56:03.000Z
|
2022-03-20T08:22:09.000Z
|
import torch
import torch.nn as nn
from vformer.functional import PatchMerging
from vformer.utils import ENCODER_REGISTRY
encoder_modules = ENCODER_REGISTRY.get_list()
def test_VanillaEncoder():
test_tensor = torch.randn(2, 65, 1024)
encoder = ENCODER_REGISTRY.get("VanillaEncoder")(
embedding_dim=1024, depth=6, num_heads=16, head_dim=64, mlp_dim=2048
)
out = encoder(test_tensor)
assert out.shape == test_tensor.shape # shape remains same
del encoder, test_tensor
def test_SwinEncoder():
test_tensor = torch.randn(3, 3136, 96)
# when downsampled
encoder = ENCODER_REGISTRY.get("SwinEncoder")(
dim=96,
input_resolution=(224 // 4, 224 // 4),
depth=2,
num_heads=3,
window_size=7,
downsample=PatchMerging,
)
out = encoder(test_tensor)
assert out.shape == (3, 784, 192)
del encoder
# when not downsampled
encoder = ENCODER_REGISTRY.get("SwinEncoder")(
dim=96,
input_resolution=(224 // 4, 224 // 4),
depth=2,
num_heads=3,
window_size=7,
downsample=None,
use_checkpoint=True,
)
out = encoder(test_tensor)
assert out.shape == (3, 3136, 96)
del encoder
encoder_block = ENCODER_REGISTRY.get("SwinEncoderBlock")(
dim=96, input_resolution=(224 // 4, 224 // 4), num_heads=3, window_size=7
)
out = encoder_block(test_tensor)
assert out.shape == test_tensor.shape
del encoder_block
def test_PVTEncoder():
test_tensor = torch.randn(4, 3136, 64)
encoder = ENCODER_REGISTRY.get("PVTEncoder")(
dim=64,
depth=3,
qkv_bias=True,
qk_scale=0.0,
p_dropout=0.0,
attn_dropout=0.1,
drop_path=[0.0] * 3,
act_layer=nn.GELU,
sr_ratio=1,
linear=False,
use_dwconv=False,
num_heads=1,
mlp_ratio=4,
)
out = encoder(test_tensor, H=56, W=56)
assert out.shape == test_tensor.shape
del encoder
def test_CrossEncoder():
test_tensor1 = torch.randn(3, 5, 128)
test_tensor2 = torch.randn(3, 5, 256)
encoder = ENCODER_REGISTRY.get("CrossEncoder")(128, 256)
out = encoder(test_tensor1, test_tensor2)
assert out[0].shape == test_tensor1.shape
assert out[1].shape == test_tensor2.shape
del encoder
| 24.694737
| 81
| 0.635124
|
7950acf140dd62d26b45926e473622df452b3584
| 2,304
|
py
|
Python
|
docs/conf.py
|
open-molecular-dynamics/moldyn
|
95a8be53981a2159a75e82c7321136bebc674bc8
|
[
"MIT"
] | 2
|
2019-07-15T08:36:50.000Z
|
2019-08-11T11:47:30.000Z
|
docs/conf.py
|
open-molecular-dynamics/moldyn
|
95a8be53981a2159a75e82c7321136bebc674bc8
|
[
"MIT"
] | 1
|
2020-01-13T15:35:35.000Z
|
2020-01-13T15:35:35.000Z
|
docs/conf.py
|
open-molecular-dynamics/moldyn
|
95a8be53981a2159a75e82c7321136bebc674bc8
|
[
"MIT"
] | 1
|
2019-07-15T08:34:29.000Z
|
2019-07-15T08:34:29.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'open-moldyn'
copyright = '2019, Arthur Luciani, Alexandre Faye-Bedrin'
author = 'Arthur Luciani, Alexandre Faye-Bedrin'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_autodoc_typehints'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
autodoc_mock_imports = ['numexpr', 'moderngl', 'numba', 'PyQt5', 'pyqtgraph']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
autoclass_content = "both"
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
source_encoding = 'utf-8-sig'
| 33.391304
| 79
| 0.666233
|
7950aef8e563296cfef82bdc75513e37e374aa8a
| 4,276
|
py
|
Python
|
web/run.py
|
GitOnUp/docker-sendanywhere
|
d360d0dafa0affeed64a1baa9740c8af3ec0ad24
|
[
"Apache-2.0"
] | 54
|
2017-01-08T17:21:49.000Z
|
2021-11-02T08:46:07.000Z
|
web/run.py
|
GitOnUp/docker-sendanywhere
|
d360d0dafa0affeed64a1baa9740c8af3ec0ad24
|
[
"Apache-2.0"
] | 22
|
2017-03-28T06:03:14.000Z
|
2021-07-28T05:43:55.000Z
|
web/run.py
|
GitOnUp/docker-sendanywhere
|
d360d0dafa0affeed64a1baa9740c8af3ec0ad24
|
[
"Apache-2.0"
] | 21
|
2017-01-26T21:12:09.000Z
|
2022-01-31T21:34:59.000Z
|
#!/usr/bin/env python
import os
import time
import sys
import subprocess
import signal
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
def find_files(directory="./"):
for root, dirs, files in os.walk(directory):
for basename in files:
if basename.endswith('.py'):
filename = os.path.join(root, basename)
yield filename
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
try:
os.setpgid(0, 0)
main_func()
except KeyboardInterrupt:
pass
return
procs = None
try:
while True:
print('* Restarting with reloader ' + str(sys.executable))
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
procs = subprocess.Popen(args, env=new_environ)
mtimes = {}
restart = False
while not restart:
for filename in find_files():
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
print('* Detected change in %r, reloading' % filename)
restart = True
break
time.sleep(interval)
killpg(procs.pid, signal.SIGTERM)
except KeyboardInterrupt:
pass
finally:
killpg(procs.pid, signal.SIGTERM)
def killpg(pgid, send_signal=signal.SIGKILL):
print('kill PGID {}'.format(pgid))
try:
os.killpg(pgid, send_signal)
#os.killpg(pgid, signal.SIGKILL)
except:
pass
def create_instance_config():
if not os.path.exists('instance'):
os.makedirs('instance')
with open(os.path.join('instance', 'application.cfg'), 'wb+') as f:
f.write('SECRET_KEY = \'')
f.write("".join("\\x{:02x}".format(ord(c)) for c in os.urandom(24)))
f.write('\'\n')
f.write('VERSION = \'')
if os.path.exists('version'):
with open('version') as fv:
version = fv.read().strip()
f.write(version)
else:
f.write('unknown')
f.write('\'\n')
if '--debug' not in sys.argv:
os.chmod(os.path.join('instance', 'application.cfg'), 0600)
def main():
create_instance_config()
def run_server():
import socket
os.environ['CONFIG'] = CONFIG
from lightop import app
# websocket conflict: WebSocketHandler
if DEBUG or STAGING:
# from werkzeug.debug import DebuggedApplication
app.debug = True
# app = DebuggedApplication(app, evalex=True)
print('Fork monitor programs')
pgid = os.getpgid(0)
procs = []
procs.extend([subprocess.Popen(program, close_fds=True, shell=True)
for program in PROGRAMS])
signal.signal(signal.SIGTERM, lambda *args: killpg(pgid))
signal.signal(signal.SIGHUP, lambda *args: killpg(pgid))
signal.signal(signal.SIGINT, lambda *args: killpg(pgid))
print('Running on port ' + str(PORT))
try:
app.run(host='', port=PORT)
except socket.error as e:
print(e)
DEBUG = True if '--debug' in sys.argv else False
STAGING = True if '--staging' in sys.argv else False
CONFIG = 'config.Development' if DEBUG else 'config.Production'
CONFIG = 'config.Staging' if STAGING else CONFIG
PORT = 6079
PROGRAMS = tuple()
#PROGRAMS = (('sudo nginx -c ${PWD}/nginx.conf'),)
#PROGRAMS = ('python lxc-monitor.py',
# 'python docker-monitor.py')
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
if DEBUG or STAGING:
main = lambda: run_with_reloader(run_server)
else:
main = run_server
main()
if __name__ == "__main__":
main()
| 30.326241
| 78
| 0.550047
|
7950af2f1be6ecb75f785dc592c2cdc8a365745c
| 955
|
py
|
Python
|
app/nettools.py
|
gunawanw9/haproxy-wi
|
3b60d556b430dad7bd0c56bdfe73c3422ebf05bf
|
[
"Apache-2.0"
] | 862
|
2018-04-10T03:53:03.000Z
|
2021-05-01T02:05:44.000Z
|
app/nettools.py
|
gunawanw9/haproxy-wi
|
3b60d556b430dad7bd0c56bdfe73c3422ebf05bf
|
[
"Apache-2.0"
] | 250
|
2018-04-19T02:59:34.000Z
|
2021-04-30T19:08:15.000Z
|
app/nettools.py
|
gunawanw9/haproxy-wi
|
3b60d556b430dad7bd0c56bdfe73c3422ebf05bf
|
[
"Apache-2.0"
] | 184
|
2018-02-10T09:33:38.000Z
|
2021-04-29T14:38:47.000Z
|
#!/usr/bin/env python3
import funct
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates/'), autoescape=True)
template = env.get_template('nettools.html')
form = funct.form
print('Content-type: text/html\n')
funct.check_login()
try:
user, user_id, role, token, servers, user_services = funct.get_users_params(virt=1)
except Exception:
pass
output_from_parsed_template = template.render(h2=1, autorefresh=0,
title="Network tools",
role=role,
user=user,
servers=servers,
versions=funct.versions(),
user_services=user_services,
token=token)
print(output_from_parsed_template)
| 38.2
| 87
| 0.521466
|
7950afcbcb4a965d2df17761430dd12d4e82773b
| 13,365
|
py
|
Python
|
analysis/synthetic_analyses.py
|
jmschrei/discern
|
50b6f03d070604479c160569cca9ef7f031ff38d
|
[
"MIT"
] | 1
|
2020-04-18T09:53:58.000Z
|
2020-04-18T09:53:58.000Z
|
analysis/synthetic_analyses.py
|
jmschrei/discern
|
50b6f03d070604479c160569cca9ef7f031ff38d
|
[
"MIT"
] | null | null | null |
analysis/synthetic_analyses.py
|
jmschrei/discern
|
50b6f03d070604479c160569cca9ef7f031ff38d
|
[
"MIT"
] | null | null | null |
# synthetic_analyses.py
# Contact: Jacob Schreiber
# jmschr@cs.washington.edu
'''
These tests will show the difference between DISCERN, ANOVA, and LNS
on pairs of synthetic Bayesian networks. You can build your own Bayesian
networks by hand (three examples shown below) and then use the barchart
and score_network_pair functions to handle the scoring of these
networks using DISCERN, ANOVA, and LNS in a standardized manner.
'''
import matplotlib
matplotlib.use('pdf')
import numpy
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from yabn import *
from discern import *
from LNS import *
from scipy.stats import f_oneway
random.seed(0)
numpy.random.seed(0)
def barchart( scores, method_names, node_names, title, normalize=True ):
'''
Take in the scores from two different feature selectors and plot them.
'''
sns.set( style='white', context='talk' )
plt.figure( figsize=(12, 6) )
n = len( scores )
items = zip( xrange(n), scores, method_names, sns.color_palette('husl', 3) )
for i, score, name, color in items:
if normalize:
score /= score.sum()
x = np.arange( 0.5, score.shape[0]+0.5 )
plt.bar( x+i*(0.8/n), score, width=0.8/n, alpha=0.5, edgecolor='w', label=name, facecolor=color )
plt.legend()
plt.title( title )
plt.xticks( x+(1.0/n), node_names )
plt.savefig( title + '.pdf' )
def score_network_pair( networka, networkb, node_names, i=100, j=100 ):
'''
This will take in a network and produce DISCERN and ANOVA scores for
each node in the network. The user may set the number of samples
generated for each network through adjusting i and j. Pass in the
order of the node names to get the scores in the proper order.
'''
node_names_a = [ node.name for node in networka.nodes ]
node_names_b = [ node.name for node in networkb.nodes ]
# Get the data from sampling the two networks
a_data = numpy.array([ networka.sample() for n in xrange( i ) ])
b_data = numpy.array([ networkb.sample() for n in xrange( j ) ])
# Convert this data into a dataframe for DISCERN
a_data = pd.DataFrame( a_data, columns=node_names_a )
b_data = pd.DataFrame( b_data, columns=node_names_b )
# Initialize DISCERN and use it on the data
discern = DISCERN()
#l, sse = discern.lambda_opt( a_data[::2], node_names_a, n_cores=6 )
discern.fit_score( a_data[::2], a_data[1::2], b_data[::2], b_data[1::2],
node_names_a, l=0.4, n_cores=8 )
# Get the LNS scores
lns = LNS()
lns.fit_score( a_data, b_data, node_names_a )
# Unpack the two score vectors into a numpy array
discern_scores = numpy.array(discern._scores.ix[ node_names ]['T2'])
anova_scores = numpy.array([ f_oneway( a_data[name], b_data[name] )[0] for name in node_names ])
lns_scores = numpy.array( lns._scores.ix[ node_names ]['r'] )
return discern_scores, anova_scores, lns_scores
def seven_star_tests():
'''
These tests work on a star network, where one node influences a second node,
which then influences three nodes, and there are two independent nods, which
switch identities in the graph. Basically, an influencer no longer influences
and an independent node takes its place.
'''
# Define the two networks we will use
networka = Network( "A" )
networkb = Network( "B" )
# Define all seven nodes, which are the same between the two networks
n1 = Node( NormalDistribution( 12, 0.7 ), name="n1" )
n2 = Node( NormalDistribution( 5, 0.3 ), name="n2" )
n3 = Node( NormalDistribution( 17, 0.9 ), name="n3" )
n4 = Node( NormalDistribution( 22, 1.2 ), name="n4" )
n5 = Node( NormalDistribution( 12, 0.3 ), name="n5" )
n6 = Node( NormalDistribution( 27, 3.2 ), name="n6" )
n7 = Node( NormalDistribution( 88, 1.2 ), name="n7" )
# We'll use a single edge of unit variance for this simple test
e = 1.0
# Add all the nodes to the networks
networka.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
networkb.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
# Add all the edges to network A
networka.add_edge( n1, n3, e )
networka.add_edge( n3, n5, e )
networka.add_edge( n3, n6, e )
networka.add_edge( n3, n7, e )
# Add all the edges to network B
networkb.add_edge( n4, n3, e )
networkb.add_edge( n3, n5, e )
networkb.add_edge( n3, n6, e )
networkb.add_edge( n3, n7, e )
# Finalize the internals of the models
networka.bake()
networkb.bake()
# Define the ordered names
node_names = [ "n1", "n2", "n3", "n4", "n5", "n6", "n7" ]
# Score the network
discern, anova, lns = score_network_pair( networka, networkb, node_names )
# Plot the scores
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, "n4-n3+ n1-n3-" )
# Time for a second test, involving a network where only an edge between
# n4 and n1 is added and nothing is removed.
networkb = Network( 'b' )
# Add the nodes in
networkb.add_nodes( [n1, n2, n3, n4, n5, n6, n7] )
# Add the edges in
networkb.add_edge( n1, n3, e )
networkb.add_edge( n3, n5, e )
networkb.add_edge( n3, n6, e )
networkb.add_edge( n3, n7, e )
networkb.add_edge( n4, n1, e )
# Finalize the model
networkb.bake()
# Score the nodes
discern, anova, lns = score_network_pair( networka, networkb, node_names )
# Plot the scores
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, "n4-n1+" )
def independent_no_perturbation_test( name="independent" ):
'''
This will test a network which has no edges, and no perturbation, to see
that the prediction power is not random.
'''
network = Network( 'independent' )
# Create 12 distributions of random size
e = NormalDistribution( 50, 1.2 )
n1 = Node( e, name="n1" )
n2 = Node( e, name="n2" )
n3 = Node( e, name="n3" )
n4 = Node( e, name="n4" )
n5 = Node( e, name="n5" )
n6 = Node( e, name="n6" )
n7 = Node( e, name="n7" )
n8 = Node( e, name="n8" )
n9 = Node( e, name="n9" )
n10 = Node( e, name="n10" )
n11 = Node( e, name="n11" )
n12 = Node( e, name="n12" )
# Add the nodes and finalize the structure of the data
network.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12] )
network.bake()
node_names = [ 'n{}'.format( i ) for i in xrange( 1, 13 ) ]
# Get the scores
discern, anova, lns = score_network_pair( network, network, node_names )
# Plot it
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name, normalize=False )
def three_component_test( name="three_component"):
'''
This will test a network which has thirteen nodes and several perturbations.
'''
networka = Network( 'a' )
networkb = Network( 'b' )
# Create some nodes
emission = NormalDistribution( 10, 1 )
n1 = Node( emission, name="n1" )
n2 = Node( emission, name="n2" )
n3 = Node( emission, name="n3" )
n4 = Node( emission, name="n4" )
n5 = Node( emission, name="n5" )
n6 = Node( emission, name="n6" )
n7 = Node( emission, name="n7" )
n8 = Node( emission, name="n8" )
n9 = Node( emission, name="n9" )
n10 = Node( emission, name="n10" )
n11 = Node( emission, name="n11" )
n12 = Node( emission, name="n12" )
n13 = Node( emission, name="n13" )
# Unpack nodes
node_names = [ 'n{}'.format( i ) for i in xrange( 1, 14 ) ]
# Add the nodes to the module
networka.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13] )
networkb.add_nodes( [n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13] )
# Define a uniform edge for simplicity
e = 1.0
# Add edges to the models
networka.add_edge( n1, n2, e )
networka.add_edge( n2, n3, e )
networka.add_edge( n4, n2, e )
networka.add_edge( n5, n6, e )
networka.add_edge( n6, n7, e )
networka.add_edge( n7, n9, e )
networka.add_edge( n7, n10, e )
networka.add_edge( n12, n11, e )
networka.add_edge( n13, n12, e )
networkb.add_edge( n1, n2, e )
networkb.add_edge( n4, n2, e )
networkb.add_edge( n5, n6, e )
networkb.add_edge( n6, n7, e )
networkb.add_edge( n7, n9, e )
networkb.add_edge( n7, n10, e )
networkb.add_edge( n12, n11, e )
networkb.add_edge( n13, n12, e )
networkb.add_edge( n4, n11, e )
networkb.add_edge( n5, n8, e )
networkb.add_edge( n8, n7, e )
# Finalize the models
networka.bake()
networkb.bake()
discern, anova, lns = score_network_pair( networka, networkb, node_names )
barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name )
def DCG( relevance ):
'''
Calculates the Discounted Cumulative Gain by comparing a 'true' ranking
to a predicted ranking.
'''
n = len( relevance )
return sum( (2.**relevance[i]-1.) / (i+1) for i in xrange( n ) )
def large_sparse_network( n=5000, m=50, low=1, high=10, name="large_sparse" ):
'''
Create a synthetic large, n nodes, where m of them get perturbed between
the two graphs by changing between ~low~ and ~high~ edges.
'''
# Randomly generate normal distributions for the node emissions
# Means based on a gamma distribution, stds based on a lognormal
# so that they are both bounded by 1
means = [50]*n
stds = [0.5]*n
#means = numpy.random.gamma( 50, 3.0, n )
#stds = numpy.random.lognormal( 0.5, 0.1, n )
# Randomly choose M genes to perturb, and then for each perturbed gene
# randomly choose the number of edges to perturb
perturbed = numpy.random.choice( np.arange( n ), size=m, replace=False )
n_perturbed_edges = numpy.random.randint( low, high, m )
# Randomly generate the graph structure from beta distributions. All
# weights are rounded to 1, instead of being variable.
null_edges = numpy.tril( numpy.around( numpy.random.beta( 1, 3, (n,n) ) ) )
numpy.fill_diagonal( null_edges, 0 )
alternate_edges = null_edges.copy()
perturb_count = { i:0 for i in xrange(n) }
to_perturb_count = { i:0 for i in xrange(n) }
# For each perturbed edge, randomly select between `low` and `high` number
# of edges to perturb, and perturb them--in this case just a binary flip.
for i, k in it.izip( perturbed, n_perturbed_edges ):
perturbed_id = numpy.random.choice( numpy.arange( i ), size=min(k, i), replace=False )
alternate_edges[i, perturbed_id] = numpy.abs( alternate_edges[i, perturbed_id] - 1 )
perturb_count[i] += perturbed_id.shape[0]
for index in perturbed_id:
to_perturb_count[index] += 1
total_perturb = { i: perturb_count[i]+to_perturb_count[i] for i in xrange(n) }
if numpy.triu( alternate_edges ).sum() > 0:
raise SyntaxError( "Matrix is not a DAG.")
# Initiate the network objects
null = Network( "Null" )
alternate = Network( "Alternate" )
# Create all the nodes
nodes = [ Node( NormalDistribution( mu, sigma ), name="n{}".format( i ) ) for i, mu, sigma in it.izip( xrange(n), means, stds ) ]
node_names = [ node.name for node in nodes ]
# Add them to the model
null.add_nodes( nodes )
alternate.add_nodes( nodes )
# Create all the edges, one at a time
for i in xrange( n ):
for j in xrange( n ):
p = null_edges[i, j]
if p > 0:
null.add_edge( nodes[i], nodes[j], p )
p = alternate_edges[i, j]
if p > 0:
alternate.add_edge( nodes[i], nodes[j], p )
# Finalize the internal structure of the network
null.bake()
alternate.bake()
# Score the network pair according to the metrics
discern, anova, lns = score_network_pair( null, alternate, node_names, i=100, j=300 )
# Make a plot of the scores acorss the nodes
#barchart( [discern, anova, lns], ['DISCERN', 'ANOVA', 'LNS'], node_names, name )
scores = pd.DataFrame({ 'DISCERN': discern, 'ANOVA': anova,
'LNS': lns, 'FROM': perturb_count.values(), 'TO': to_perturb_count.values(),
'TOTAL': total_perturb.values() })
# Calculate the Discounted Cumulative Gain matrix. DCG is a way of measuring
# a ranking of items if you know their true ordering. In this case, genes
# should be ordered by the true number of perturbations to them, and we
# compare the ordering we get from DISCERN, ANOVA, and LNS to that. DCG is
# implemented in the DCG function above. In this case we divide nodes into
# FROM nodes, which are the ranking of nodes according to perturbation in
# number of edges LEAVING that nodes, TO nodes, which is perturbation in number
# of edges going TO that node, and TOTAL which includes both. DISCERN is
# predicted to identify FROM nodes better than other techniques, as those
# should be similiar to driver mutations.
DCG_Matrix = pd.DataFrame( { 'FROM': [ DCG( scores.sort( 'DISCERN', ascending=False )['FROM'].values ),
DCG( scores.sort( 'ANOVA', ascending=False )['FROM'].values ),
DCG( scores.sort( 'LNS', ascending=False )['FROM'].values ) ],
'TO': [ DCG( scores.sort( 'DISCERN', ascending=False )['TO'].values ),
DCG( scores.sort( 'ANOVA', ascending=False )['TO'].values ),
DCG( scores.sort( 'LNS', ascending=False )['TO'].values ) ],
'TOTAL': [ DCG( scores.sort( 'DISCERN', ascending=False )['TOTAL'].values ),
DCG( scores.sort( 'ANOVA', ascending=False )['TOTAL'].values ),
DCG( scores.sort( 'LNS', ascending=False )['TOTAL'].values ) ] } )
DCG_Matrix.index = [ 'DISCERN', 'ANOVA', 'LNS' ]
print DCG_Matrix
return scores, DCG_Matrix
if __name__ == '__main__':
# Run the three smaller tests. Graphs will be output automatically.
independent_no_perturbation_test()
three_component_test()
seven_star_tests()
# Run the large sparse network. This example has 1000 nodes, of which
# 25 are perturbed. You can play with these parameters as much as you
# want, and the Discounted Cumulative Gain matrix will be returned.
large_sparse_network( 1000, 25 )
| 33.92132
| 130
| 0.683427
|
7950b0481568299c93326abc368bfe9d394d108d
| 208
|
py
|
Python
|
Calculator/Addition.py
|
mkm99/TeamProject_StatsCalculator
|
81085c1af47f38d3e49b43d667e312016c44ad10
|
[
"MIT"
] | null | null | null |
Calculator/Addition.py
|
mkm99/TeamProject_StatsCalculator
|
81085c1af47f38d3e49b43d667e312016c44ad10
|
[
"MIT"
] | 7
|
2020-03-03T21:37:57.000Z
|
2020-03-06T04:11:42.000Z
|
Calculator/Addition.py
|
mkm99/TeamProject_StatsCalculator
|
81085c1af47f38d3e49b43d667e312016c44ad10
|
[
"MIT"
] | null | null | null |
def addition(augend, addend):
if isinstance(augend, str) or isinstance(addend, str):
return "Trying to use strings in calculator"
solution = float(augend) + float(addend)
return solution
| 29.714286
| 58
| 0.697115
|
7950b1fa97a0f69114306e4e54adc78f6b4ad32a
| 5,616
|
py
|
Python
|
qa/rpc-tests/rpcbind_test.py
|
fflo/lemoncoin
|
1d45a3821afacf24d07827b64712d31f28b75730
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/rpcbind_test.py
|
fflo/lemoncoin
|
1d45a3821afacf24d07827b64712d31f28b75730
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/rpcbind_test.py
|
fflo/lemoncoin
|
1d45a3821afacf24d07827b64712d31f28b75730
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# Add python-bitcoinrpc to module search path:
import os
import sys
import json
import shutil
import subprocess
import tempfile
import traceback
from test_framework.util import *
from test_framework.netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_bitcoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_bitcoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave lemoncoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing lemoncoind/lemoncoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| 36.705882
| 110
| 0.643162
|
7950b240074ba1d6f45b0225e5a467a473327c4f
| 4,568
|
py
|
Python
|
packages/core/minos-microservice-aggregate/tests/test_aggregate/test_value_objects.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 247
|
2022-01-24T14:55:30.000Z
|
2022-03-25T12:06:17.000Z
|
packages/core/minos-microservice-aggregate/tests/test_aggregate/test_value_objects.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 168
|
2022-01-24T14:54:31.000Z
|
2022-03-31T09:31:09.000Z
|
packages/core/minos-microservice-aggregate/tests/test_aggregate/test_value_objects.py
|
bhardwajRahul/minos-python
|
bad7a280ad92680abdeab01d1214688279cf6316
|
[
"MIT"
] | 21
|
2022-02-06T17:25:58.000Z
|
2022-03-27T04:50:29.000Z
|
import unittest
from unittest import (
TestCase,
)
from minos.aggregate import (
Action,
IncrementalSetDiff,
IncrementalSetDiffEntry,
ValueObject,
ValueObjectException,
ValueObjectSet,
)
class _Location(ValueObject):
street: str
class TestValueObject(TestCase):
def setUp(self) -> None:
self.value = _Location(street="street name")
def test_instantiate(self):
self.assertEqual("street name", self.value.street)
def test_raise_when_accessed(self):
with self.assertRaises(ValueObjectException):
self.value["street"] = "this assignment must raise"
class TestValueObjectSet(TestCase):
def setUp(self) -> None:
self.location_1 = _Location(street="street name")
self.location_2 = _Location(street="another street name")
self.fake_value_obj = {self.location_1, self.location_2}
self.fake_value_obj_set = (self.location_1, self.location_2)
def test_data(self):
value_objects = ValueObjectSet(self.fake_value_obj)
self.assertEqual(self.fake_value_obj, value_objects)
def test_from_set(self):
value_objects = ValueObjectSet(self.fake_value_obj_set)
self.assertEqual(self.fake_value_obj, value_objects)
def test_eq_true(self):
observed = ValueObjectSet(self.fake_value_obj)
self.assertEqual(self.fake_value_obj, observed)
def test_eq_false(self):
raw = self.fake_value_obj
observed = ValueObjectSet(raw)
other = {_Location("Test")}
self.assertNotEqual(ValueObjectSet(other), set(raw))
self.assertNotEqual(ValueObjectSet(other), observed)
self.assertNotEqual(other, observed)
def test_len(self):
value_objects = ValueObjectSet(self.fake_value_obj)
self.assertEqual(2, len(value_objects))
def test_iter(self):
value_objects = ValueObjectSet(self.fake_value_obj)
self.assertEqual(self.fake_value_obj, set(value_objects))
def test_contains(self):
raw = {self.location_1}
value_objects = ValueObjectSet(raw)
self.assertIn(self.location_1, value_objects)
self.assertNotIn(self.location_2, value_objects)
self.assertNotIn(1234, value_objects)
def test_add(self):
value_objects = ValueObjectSet()
value_objects.add(self.location_1)
raw = {self.location_1}
self.assertEqual(raw, value_objects)
def test_remove(self):
value_objects = ValueObjectSet(self.fake_value_obj)
value_objects.discard(self.location_1)
raw = {self.location_2}
self.assertEqual(raw, value_objects)
def test_diff(self):
raw = [_Location(street="street name"), _Location(street="another street name")]
entities = ValueObjectSet(raw)
observed = entities.diff(ValueObjectSet([raw[0]]))
expected = IncrementalSetDiff([IncrementalSetDiffEntry(Action.CREATE, raw[1])])
self.assertEqual(observed, expected)
class TestValueObjectSetDiff(TestCase):
def setUp(self) -> None:
self.raw = [_Location(street="street name"), _Location(street="another street name")]
self.old = ValueObjectSet(self.raw)
self.clone = [_Location(street=entity.street) for entity in self.raw]
def test_from_difference_create(self):
entities = ValueObjectSet(self.clone)
new = _Location("San Anton, 23")
entities.add(new)
observed = IncrementalSetDiff.from_difference(entities, self.old)
expected = IncrementalSetDiff([IncrementalSetDiffEntry(Action.CREATE, new)])
self.assertEqual(expected, observed)
def test_from_difference_delete(self):
entities = ValueObjectSet(self.clone)
removed = self.clone[1]
entities.remove(removed)
observed = IncrementalSetDiff.from_difference(entities, self.old)
expected = IncrementalSetDiff([IncrementalSetDiffEntry(Action.DELETE, removed)])
self.assertEqual(expected, observed)
def test_from_difference_combined(self):
entities = ValueObjectSet(self.clone)
new = _Location("Europa, 12")
entities.add(new)
removed = self.clone[1]
entities.remove(removed)
observed = IncrementalSetDiff.from_difference(entities, self.old)
expected = IncrementalSetDiff(
[IncrementalSetDiffEntry(Action.CREATE, new), IncrementalSetDiffEntry(Action.DELETE, removed)]
)
self.assertEqual(expected, observed)
if __name__ == "__main__":
unittest.main()
| 31.287671
| 106
| 0.688266
|
7950b3418afefb3445094edac4459310abe6fa27
| 1,368
|
py
|
Python
|
third_party/android_deps/libs/com_android_support_support_fragment/3pp/fetch.py
|
Ron423c/chromium
|
2edf7b980065b648f8b2a6e52193d83832fe36b7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575
|
2015-06-18T23:58:20.000Z
|
2022-03-23T09:32:39.000Z
|
third_party/android_deps/libs/com_android_support_support_fragment/3pp/fetch.py
|
Ron423c/chromium
|
2edf7b980065b648f8b2a6e52193d83832fe36b7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
third_party/android_deps/libs/com_android_support_support_fragment/3pp/fetch.py
|
DamieFC/chromium
|
54ce2d3c77723697efd22cfdb02aea38f9dfa25c
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52
|
2015-07-14T10:40:50.000Z
|
2022-03-15T01:11:49.000Z
|
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://maven.google.com/com/android/support/support-fragment/28.0.0/support-fragment-28.0.0.aar'
_FILE_NAME = 'support-fragment-28.0.0.aar'
_FILE_VERSION = '28.0.0'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| 24
| 110
| 0.682018
|
7950b4ad09cee3054bc74da5c7de12d5f4671065
| 557
|
py
|
Python
|
StudentManager/migrations/0015_pastoral.py
|
franklinwagbara/Brookstone-Pastoral-Management-System
|
a8a4cd66fbb7284e3cd61539bc313100ebd14f94
|
[
"MIT"
] | null | null | null |
StudentManager/migrations/0015_pastoral.py
|
franklinwagbara/Brookstone-Pastoral-Management-System
|
a8a4cd66fbb7284e3cd61539bc313100ebd14f94
|
[
"MIT"
] | null | null | null |
StudentManager/migrations/0015_pastoral.py
|
franklinwagbara/Brookstone-Pastoral-Management-System
|
a8a4cd66fbb7284e3cd61539bc313100ebd14f94
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2020-09-22 13:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('StudentManager', '0014_account_admin_gate_schooladmin'),
]
operations = [
migrations.CreateModel(
name='Pastoral',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(blank=True, max_length=50, null=True)),
],
),
]
| 26.52381
| 114
| 0.601436
|
7950b4bd2297f719a8873258df1db5e24ea0e550
| 3,391
|
py
|
Python
|
examples/pytorch/mvgrl/graph/model.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
examples/pytorch/mvgrl/graph/model.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
examples/pytorch/mvgrl/graph/model.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
import torch as th
import torch.nn as nn
from dgl.nn.pytorch import GraphConv
from dgl.nn.pytorch.glob import SumPooling
from utils import local_global_loss_
class MLP(nn.Module):
def __init__(self, in_dim, out_dim):
super(MLP, self).__init__()
self.fcs = nn.Sequential(
nn.Linear(in_dim, out_dim),
nn.PReLU(),
nn.Linear(out_dim, out_dim),
nn.PReLU(),
nn.Linear(out_dim, out_dim),
nn.PReLU()
)
self.linear_shortcut = nn.Linear(in_dim, out_dim)
def forward(self, x):
return self.fcs(x) + self.linear_shortcut(x)
class GCN(nn.Module):
def __init__(self, in_dim, out_dim, num_layers, norm):
super(GCN, self).__init__()
self.num_layers = num_layers
self.layers = nn.ModuleList()
self.layers.append(GraphConv(in_dim, out_dim, bias=False, norm=norm, activation = nn.PReLU()))
self.pooling = SumPooling()
for _ in range(num_layers - 1):
self.layers.append(GraphConv(out_dim, out_dim, bias=False, norm=norm, activation = nn.PReLU()))
def forward(self, graph, feat, edge_weight = None):
h = self.layers[0](graph, feat, edge_weight=edge_weight)
hg = self.pooling(graph, h)
for idx in range(self.num_layers - 1):
h = self.layers[idx + 1](graph, h, edge_weight=edge_weight)
hg = th.cat((hg, self.pooling(graph, h)), -1)
return h, hg
class MVGRL(nn.Module):
r"""
mvgrl model
Parameters
-----------
in_dim: int
Input feature size.
out_dim: int
Output feature size.
num_layers: int
Number of the GNN encoder layers.
Functions
-----------
forward(graph1, graph2, feat, edge_weight):
graph1: DGLGraph
The original graph
graph2: DGLGraph
The diffusion graph
feat: tensor
Node features
edge_weight: tensor
Edge weight of the diffusion graph
"""
def __init__(self, in_dim, out_dim, num_layers):
super(MVGRL, self).__init__()
self.local_mlp = MLP(out_dim, out_dim)
self.global_mlp = MLP(num_layers * out_dim, out_dim)
self.encoder1 = GCN(in_dim, out_dim, num_layers, norm='both')
self.encoder2 = GCN(in_dim, out_dim, num_layers, norm='none')
def get_embedding(self, graph1, graph2, feat, edge_weight):
local_v1, global_v1 = self.encoder1(graph1, feat)
local_v2, global_v2 = self.encoder2(graph2, feat, edge_weight=edge_weight)
global_v1 = self.global_mlp(global_v1)
global_v2 = self.global_mlp(global_v2)
return (global_v1 + global_v2).detach()
def forward(self, graph1, graph2, feat, edge_weight, graph_id):
# calculate node embeddings and graph embeddings
local_v1, global_v1 = self.encoder1(graph1, feat)
local_v2, global_v2 = self.encoder2(graph2, feat, edge_weight=edge_weight)
local_v1 = self.local_mlp(local_v1)
local_v2 = self.local_mlp(local_v2)
global_v1 = self.global_mlp(global_v1)
global_v2 = self.global_mlp(global_v2)
# calculate loss
loss1 = local_global_loss_(local_v1, global_v2, graph_id)
loss2 = local_global_loss_(local_v2, global_v1, graph_id)
loss = loss1 + loss2
return loss
| 30.00885
| 107
| 0.625479
|
7950b4d7ed869497fa643f261414612e3dc91cb6
| 2,298
|
py
|
Python
|
casim/params.py
|
pdebuyl/cancer_sim
|
305492d5108e1fb50783e4f13ddf2e1cf5b08976
|
[
"MIT"
] | 1
|
2022-02-16T03:34:44.000Z
|
2022-02-16T03:34:44.000Z
|
casim/params.py
|
pdebuyl/cancer_sim
|
305492d5108e1fb50783e4f13ddf2e1cf5b08976
|
[
"MIT"
] | 12
|
2020-03-16T20:59:21.000Z
|
2020-09-18T08:41:09.000Z
|
casim/params.py
|
pdebuyl/cancer_sim
|
305492d5108e1fb50783e4f13ddf2e1cf5b08976
|
[
"MIT"
] | 3
|
2020-09-16T12:41:19.000Z
|
2021-03-11T23:19:24.000Z
|
################################################################################
# #
# Commented casim parameter input file. #
# Valid settings are indicated in parentheses at the end of each comment line. #
# [0,1] stands for the closed interval from 0 to 1, including the limits; || #
# means "or". #
# #
################################################################################
# Number of mesh points in each dimension (>0)
matrix_size = 1000
# Number of generations to simulate (>0).
number_of_generations = 20
# Probability of cell division per generation ([0,1]).
division_probability = 1
# Probability of division for cells with advantageous mutation ([0,1]).
adv_mutant_division_probability = 1
# Fraction of cells that die per generation ([0,1]).
death_probability = 0.1
# Fraction of cells with advantageous mutation that die per generation ([0,1]).
adv_mutant_death_probability = 0.0
# Probability of mutations ([0,1]).
mutation_probability = 1
# Mutation probability for the adv. cells ([0,1]).
adv_mutant_mutation_probability = 1
# Number of mutations per cell division (>=0).
number_of_mutations_per_division = 10
# Number of generations after which the adv. mutation occurs (>=0).
adv_mutation_wait_time = 10
# Number of mutations present in first cancer cell (>=1).
number_of_initial_mutations = 150
# Tumour multiplicity (one tumour or two tumours simultaneously) ("single" || "double").
tumour_multiplicity = "single"
# Sequencing read depth (read length * number of reads / genome length).
read_depth = 100
# Fraction of cells to be sampled ([0,1]).
sampling_fraction = 0.1
# Sampling position (list of (x,y) coordinates in the range [0,matrix_size-1]).
# If left blank or None, random position will be chosen.
# sampling_positions = None # This will randomly set a single sampling position.
sampling_positions = [(500,500),(490,490)]
# Plot the tumour growth curve (True || False).
plot_tumour_growth = True
# Export the tumour growth data to file (True || False).
export_tumour = True
| 37.064516
| 88
| 0.604439
|
7950b4eb88a800dcad8fbf66345ca3b869bac832
| 2,699
|
py
|
Python
|
setup.py
|
bevy/Opentok-Python-SDK
|
f7b821160752d2383120c08f34b4c7ad66ddb161
|
[
"MIT"
] | null | null | null |
setup.py
|
bevy/Opentok-Python-SDK
|
f7b821160752d2383120c08f34b4c7ad66ddb161
|
[
"MIT"
] | null | null | null |
setup.py
|
bevy/Opentok-Python-SDK
|
f7b821160752d2383120c08f34b4c7ad66ddb161
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import codecs
import os
import re
import sys
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'enum34 ; python_version < "3.4"',
'requests',
'six',
'pytz',
'python-jose'
]
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
install_requires.append('enum34')
setup(
name = 'opentok',
version = find_version('opentok', 'version.py'),
description = 'OpenTok server-side SDK',
long_description_content_type='text/x-rst',
url='https://github.com/opentok/Opentok-Python-SDK/',
long_description=long_description,
author='TokBox, Inc.',
author_email='support@tokbox.com',
license='LICENSE.txt',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Communications',
'Topic :: Communications :: Chat',
'Topic :: Communications :: Conferencing',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords = 'video chat tokbox tok opentok python media webrtc archiving realtime',
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
install_requires=install_requires,
include_package_data = True,
)
| 31.752941
| 86
| 0.641349
|
7950b50b42519a90bf0e992592f1bc57b3e6006e
| 16,839
|
py
|
Python
|
magenta/music/encoder_decoder_test.py
|
veskoch/magenta
|
74f16be4341925341617699dffdbddadd747acad
|
[
"Apache-2.0"
] | 51
|
2016-11-06T16:48:47.000Z
|
2021-11-12T08:59:58.000Z
|
magenta/music/encoder_decoder_test.py
|
veskoch/magenta
|
74f16be4341925341617699dffdbddadd747acad
|
[
"Apache-2.0"
] | 1
|
2022-02-11T19:01:43.000Z
|
2022-02-11T19:01:56.000Z
|
magenta/music/encoder_decoder_test.py
|
veskoch/magenta
|
74f16be4341925341617699dffdbddadd747acad
|
[
"Apache-2.0"
] | 16
|
2016-11-10T06:31:02.000Z
|
2020-01-22T12:07:28.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for encoder_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# internal imports
import numpy as np
import tensorflow as tf
from magenta.common import sequence_example_lib
from magenta.music import encoder_decoder
from magenta.music import testing_lib
class OneHotEventSequenceEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3, num_steps=range(3)))
def testInputSize(self):
self.assertEquals(3, self.enc.input_size)
def testNumClasses(self):
self.assertEqual(3, self.enc.num_classes)
def testEventsToInput(self):
events = [0, 1, 0, 2, 0]
self.assertEqual([1.0, 0.0, 0.0], self.enc.events_to_input(events, 0))
self.assertEqual([0.0, 1.0, 0.0], self.enc.events_to_input(events, 1))
self.assertEqual([1.0, 0.0, 0.0], self.enc.events_to_input(events, 2))
self.assertEqual([0.0, 0.0, 1.0], self.enc.events_to_input(events, 3))
self.assertEqual([1.0, 0.0, 0.0], self.enc.events_to_input(events, 4))
def testEventsToLabel(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.events_to_label(events, 0))
self.assertEqual(1, self.enc.events_to_label(events, 1))
self.assertEqual(0, self.enc.events_to_label(events, 2))
self.assertEqual(2, self.enc.events_to_label(events, 3))
self.assertEqual(0, self.enc.events_to_label(events, 4))
def testClassIndexToEvent(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.class_index_to_event(0, events))
self.assertEqual(1, self.enc.class_index_to_event(1, events))
self.assertEqual(2, self.enc.class_index_to_event(2, events))
def testLabelsToNumSteps(self):
labels = [0, 1, 0, 2, 0]
self.assertEqual(3, self.enc.labels_to_num_steps(labels))
def testEncode(self):
events = [0, 1, 0, 2, 0]
sequence_example = self.enc.encode(events)
expected_inputs = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0]]
expected_labels = [1, 0, 2, 0]
expected_sequence_example = sequence_example_lib.make_sequence_example(
expected_inputs, expected_labels)
self.assertEqual(sequence_example, expected_sequence_example)
def testGetInputsBatch(self):
event_sequences = [[0, 1, 0, 2, 0], [0, 1, 2]]
expected_inputs_1 = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0]]
expected_inputs_2 = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
expected_full_length_inputs_batch = [expected_inputs_1, expected_inputs_2]
expected_last_event_inputs_batch = [expected_inputs_1[-1:],
expected_inputs_2[-1:]]
self.assertListEqual(
expected_full_length_inputs_batch,
self.enc.get_inputs_batch(event_sequences, True))
self.assertListEqual(
expected_last_event_inputs_batch,
self.enc.get_inputs_batch(event_sequences))
def testExtendEventSequences(self):
events1 = [0]
events2 = [0]
events3 = [0]
event_sequences = [events1, events2, events3]
softmax = [[[0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]]]
self.enc.extend_event_sequences(event_sequences, softmax)
self.assertListEqual(list(events1), [0, 2])
self.assertListEqual(list(events2), [0, 0])
self.assertListEqual(list(events3), [0, 1])
def testEvaluateLogLikelihood(self):
events1 = [0, 1, 0]
events2 = [1, 2, 2]
event_sequences = [events1, events2]
softmax = [[[0.0, 0.5, 0.5], [0.3, 0.4, 0.3]],
[[0.0, 0.6, 0.4], [0.0, 0.4, 0.6]]]
p = self.enc.evaluate_log_likelihood(event_sequences, softmax)
self.assertListEqual([np.log(0.5) + np.log(0.3),
np.log(0.4) + np.log(0.6)], p)
class LookbackEventSequenceEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.LookbackEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3, num_steps=range(3)), [1, 2], 2)
def testInputSize(self):
self.assertEqual(13, self.enc.input_size)
def testNumClasses(self):
self.assertEqual(5, self.enc.num_classes)
def testEventsToInput(self):
events = [0, 1, 0, 2, 0]
self.assertEqual([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
1.0, -1.0, 0.0, 0.0],
self.enc.events_to_input(events, 0))
self.assertEqual([0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0,
-1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 1))
self.assertEqual([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0,
1.0, 1.0, 0.0, 1.0],
self.enc.events_to_input(events, 2))
self.assertEqual([0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0,
-1.0, -1.0, 0.0, 0.0],
self.enc.events_to_input(events, 3))
self.assertEqual([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0],
self.enc.events_to_input(events, 4))
def testEventsToLabel(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(4, self.enc.events_to_label(events, 0))
self.assertEqual(1, self.enc.events_to_label(events, 1))
self.assertEqual(4, self.enc.events_to_label(events, 2))
self.assertEqual(2, self.enc.events_to_label(events, 3))
self.assertEqual(4, self.enc.events_to_label(events, 4))
def testClassIndexToEvent(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.class_index_to_event(0, events[:1]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:1]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(3, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(4, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:2]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:2]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:2]))
self.assertEqual(1, self.enc.class_index_to_event(3, events[:2]))
self.assertEqual(0, self.enc.class_index_to_event(4, events[:2]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:3]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:3]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:3]))
self.assertEqual(0, self.enc.class_index_to_event(3, events[:3]))
self.assertEqual(1, self.enc.class_index_to_event(4, events[:3]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:4]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:4]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:4]))
self.assertEqual(2, self.enc.class_index_to_event(3, events[:4]))
self.assertEqual(0, self.enc.class_index_to_event(4, events[:4]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:5]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:5]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:5]))
self.assertEqual(0, self.enc.class_index_to_event(3, events[:5]))
self.assertEqual(2, self.enc.class_index_to_event(4, events[:5]))
def testLabelsToNumSteps(self):
labels = [0, 1, 0, 2, 0]
self.assertEqual(3, self.enc.labels_to_num_steps(labels))
labels = [0, 1, 3, 2, 4]
self.assertEqual(5, self.enc.labels_to_num_steps(labels))
def testEmptyLookback(self):
enc = encoder_decoder.LookbackEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3), [], 2)
self.assertEqual(5, enc.input_size)
self.assertEqual(3, enc.num_classes)
events = [0, 1, 0, 2, 0]
self.assertEqual([1.0, 0.0, 0.0, 1.0, -1.0],
enc.events_to_input(events, 0))
self.assertEqual([0.0, 1.0, 0.0, -1.0, 1.0],
enc.events_to_input(events, 1))
self.assertEqual([1.0, 0.0, 0.0, 1.0, 1.0],
enc.events_to_input(events, 2))
self.assertEqual([0.0, 0.0, 1.0, -1.0, -1.0],
enc.events_to_input(events, 3))
self.assertEqual([1.0, 0.0, 0.0, 1.0, -1.0],
enc.events_to_input(events, 4))
self.assertEqual(0, enc.events_to_label(events, 0))
self.assertEqual(1, enc.events_to_label(events, 1))
self.assertEqual(0, enc.events_to_label(events, 2))
self.assertEqual(2, enc.events_to_label(events, 3))
self.assertEqual(0, enc.events_to_label(events, 4))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:1]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:1]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:2]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:2]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:2]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:3]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:3]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:3]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:4]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:4]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:4]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:5]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:5]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:5]))
class ConditionalEventSequenceEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.ConditionalEventSequenceEncoderDecoder(
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(2)),
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3)))
def testInputSize(self):
self.assertEquals(5, self.enc.input_size)
def testNumClasses(self):
self.assertEqual(3, self.enc.num_classes)
def testEventsToInput(self):
control_events = [1, 1, 1, 0, 0]
target_events = [0, 1, 0, 2, 0]
self.assertEqual(
[0.0, 1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(control_events, target_events, 0))
self.assertEqual(
[0.0, 1.0, 0.0, 1.0, 0.0],
self.enc.events_to_input(control_events, target_events, 1))
self.assertEqual(
[1.0, 0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(control_events, target_events, 2))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0, 1.0],
self.enc.events_to_input(control_events, target_events, 3))
def testEventsToLabel(self):
target_events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.events_to_label(target_events, 0))
self.assertEqual(1, self.enc.events_to_label(target_events, 1))
self.assertEqual(0, self.enc.events_to_label(target_events, 2))
self.assertEqual(2, self.enc.events_to_label(target_events, 3))
self.assertEqual(0, self.enc.events_to_label(target_events, 4))
def testClassIndexToEvent(self):
target_events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.class_index_to_event(0, target_events))
self.assertEqual(1, self.enc.class_index_to_event(1, target_events))
self.assertEqual(2, self.enc.class_index_to_event(2, target_events))
def testEncode(self):
control_events = [1, 1, 1, 0, 0]
target_events = [0, 1, 0, 2, 0]
sequence_example = self.enc.encode(control_events, target_events)
expected_inputs = [[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0]]
expected_labels = [1, 0, 2, 0]
expected_sequence_example = sequence_example_lib.make_sequence_example(
expected_inputs, expected_labels)
self.assertEqual(sequence_example, expected_sequence_example)
def testGetInputsBatch(self):
control_event_sequences = [[1, 1, 1, 0, 0], [1, 1, 1, 0, 0]]
target_event_sequences = [[0, 1, 0, 2], [0, 1]]
expected_inputs_1 = [[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0]]
expected_inputs_2 = [[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0]]
expected_full_length_inputs_batch = [expected_inputs_1, expected_inputs_2]
expected_last_event_inputs_batch = [expected_inputs_1[-1:],
expected_inputs_2[-1:]]
self.assertListEqual(
expected_full_length_inputs_batch,
self.enc.get_inputs_batch(
control_event_sequences, target_event_sequences, True))
self.assertListEqual(
expected_last_event_inputs_batch,
self.enc.get_inputs_batch(
control_event_sequences, target_event_sequences))
def testExtendEventSequences(self):
target_events_1 = [0]
target_events_2 = [0]
target_events_3 = [0]
target_event_sequences = [target_events_1, target_events_2, target_events_3]
softmax = [[[0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]]]
self.enc.extend_event_sequences(target_event_sequences, softmax)
self.assertListEqual(list(target_events_1), [0, 2])
self.assertListEqual(list(target_events_2), [0, 0])
self.assertListEqual(list(target_events_3), [0, 1])
def testEvaluateLogLikelihood(self):
target_events_1 = [0, 1, 0]
target_events_2 = [1, 2, 2]
target_event_sequences = [target_events_1, target_events_2]
softmax = [[[0.0, 0.5, 0.5], [0.3, 0.4, 0.3]],
[[0.0, 0.6, 0.4], [0.0, 0.4, 0.6]]]
p = self.enc.evaluate_log_likelihood(target_event_sequences, softmax)
self.assertListEqual([np.log(0.5) + np.log(0.3),
np.log(0.4) + np.log(0.6)], p)
class OptionalEventSequenceEncoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.OptionalEventSequenceEncoder(
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3)))
def testInputSize(self):
self.assertEquals(4, self.enc.input_size)
def testEventsToInput(self):
events = [(False, 0), (False, 1), (False, 0), (True, 2), (True, 0)]
self.assertEqual(
[0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 0))
self.assertEqual(
[0.0, 0.0, 1.0, 0.0],
self.enc.events_to_input(events, 1))
self.assertEqual(
[0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 2))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0],
self.enc.events_to_input(events, 3))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0],
self.enc.events_to_input(events, 4))
class MultipleEventSequenceEncoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.MultipleEventSequenceEncoder([
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(2)),
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3))])
def testInputSize(self):
self.assertEquals(5, self.enc.input_size)
def testEventsToInput(self):
events = [(1, 0), (1, 1), (1, 0), (0, 2), (0, 0)]
self.assertEqual(
[0.0, 1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 0))
self.assertEqual(
[0.0, 1.0, 0.0, 1.0, 0.0],
self.enc.events_to_input(events, 1))
self.assertEqual(
[0.0, 1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 2))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0, 1.0],
self.enc.events_to_input(events, 3))
self.assertEqual(
[1.0, 0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 4))
if __name__ == '__main__':
tf.test.main()
| 42.203008
| 80
| 0.6441
|
7950b53c7afbbd2a4bb5dec48381e76e9ac2ed8e
| 179
|
py
|
Python
|
django_blog/my-venv/Scripts/django-admin.py
|
mghendi/Portfolio
|
65534e600770803deb603ba5ffb0a5ef70f0db4e
|
[
"MIT"
] | null | null | null |
django_blog/my-venv/Scripts/django-admin.py
|
mghendi/Portfolio
|
65534e600770803deb603ba5ffb0a5ef70f0db4e
|
[
"MIT"
] | null | null | null |
django_blog/my-venv/Scripts/django-admin.py
|
mghendi/Portfolio
|
65534e600770803deb603ba5ffb0a5ef70f0db4e
|
[
"MIT"
] | null | null | null |
#!c:\users\sammy\documents\github\django_blog\my-venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 29.833333
| 72
| 0.793296
|
7950b60fa910fb7245ae3031e2a1cbf20c171085
| 4,086
|
py
|
Python
|
anchore_engine/services/policy_engine/api/models/trigger_param_spec.py
|
Talanor/anchore-engine
|
5e809db1eb681f89670655c5bf9933eba50cf403
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/api/models/trigger_param_spec.py
|
Talanor/anchore-engine
|
5e809db1eb681f89670655c5bf9933eba50cf403
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/api/models/trigger_param_spec.py
|
Talanor/anchore-engine
|
5e809db1eb681f89670655c5bf9933eba50cf403
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class TriggerParamSpec(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, description=None, required=None, validator=None):
"""
TriggerParamSpec - a model defined in Swagger
:param name: The name of this TriggerParamSpec.
:type name: str
:param description: The description of this TriggerParamSpec.
:type description: str
:param required: The required of this TriggerParamSpec.
:type required: bool
:param validator: The validator of this TriggerParamSpec.
:type validator: object
"""
self.swagger_types = {
'name': str,
'description': str,
'required': bool,
'validator': object
}
self.attribute_map = {
'name': 'name',
'description': 'description',
'required': 'required',
'validator': 'validator'
}
self._name = name
self._description = description
self._required = required
self._validator = validator
@classmethod
def from_dict(cls, dikt):
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The TriggerParamSpec of this TriggerParamSpec.
:rtype: TriggerParamSpec
"""
return deserialize_model(dikt, cls)
@property
def name(self):
"""
Gets the name of this TriggerParamSpec.
Parameter name as it appears in policy document
:return: The name of this TriggerParamSpec.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TriggerParamSpec.
Parameter name as it appears in policy document
:param name: The name of this TriggerParamSpec.
:type name: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this TriggerParamSpec.
:return: The description of this TriggerParamSpec.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this TriggerParamSpec.
:param description: The description of this TriggerParamSpec.
:type description: str
"""
self._description = description
@property
def required(self):
"""
Gets the required of this TriggerParamSpec.
Is this a required parameter or optional
:return: The required of this TriggerParamSpec.
:rtype: bool
"""
return self._required
@required.setter
def required(self, required):
"""
Sets the required of this TriggerParamSpec.
Is this a required parameter or optional
:param required: The required of this TriggerParamSpec.
:type required: bool
"""
self._required = required
@property
def validator(self):
"""
Gets the validator of this TriggerParamSpec.
If present, a definition for validation of input. Typically a jsonschema object that can be used to validate an input against.
:return: The validator of this TriggerParamSpec.
:rtype: object
"""
return self._validator
@validator.setter
def validator(self, validator):
"""
Sets the validator of this TriggerParamSpec.
If present, a definition for validation of input. Typically a jsonschema object that can be used to validate an input against.
:param validator: The validator of this TriggerParamSpec.
:type validator: object
"""
self._validator = validator
| 27.422819
| 134
| 0.613803
|
7950b84644fa8f6474b387fd2ee7d0671afa8aae
| 170
|
py
|
Python
|
configmng/__init__.py
|
christian-oreilly/configmng
|
8e0e18d5e39d1bab2fcac4b32ee7b855c28f2f14
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
configmng/__init__.py
|
christian-oreilly/configmng
|
8e0e18d5e39d1bab2fcac4b32ee7b855c28f2f14
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
configmng/__init__.py
|
christian-oreilly/configmng
|
8e0e18d5e39d1bab2fcac4b32ee7b855c28f2f14
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
from .config import Config, ConfigArg
from .configmng import ConfigMng
from .configlevel import ConfigLevel
from .schema import Schema
from .provenance import ConfigProv
| 28.333333
| 37
| 0.841176
|
7950ba19ba672b146edf755184bc33ddb9b716f8
| 2,899
|
py
|
Python
|
tests/unit/config/test_config_experimental.py
|
drohde/deepr
|
672772ea3ce9cf391f9f8efc7ae9c9d438957817
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/config/test_config_experimental.py
|
drohde/deepr
|
672772ea3ce9cf391f9f8efc7ae9c9d438957817
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/config/test_config_experimental.py
|
drohde/deepr
|
672772ea3ce9cf391f9f8efc7ae9c9d438957817
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for config.deepr_experimental"""
from dataclasses import dataclass
import pytest
import deepr as dpr
@dataclass
class A:
x: int = 1
y: float = 1.0
z: bool = True
@dataclass
class B:
a: A
b: str = "b"
class C(A):
def __init__(self, b: str = "b", **kwargs):
super().__init__(**kwargs)
self.b = b
@pytest.mark.parametrize(
"obj, cfg",
[
(
B(A()),
{
"type": "test_config_experimental.B",
"a": {"type": "test_config_experimental.A", "x": 1, "y": 1.0, "z": True},
"b": "b",
},
),
(C(), {"type": "test_config_experimental.C", "x": 1, "y": 1.0, "z": True, "b": "b"}),
],
)
def test_to_config(obj, cfg):
"""Test to_config"""
assert dpr.config.experimental.to_config(obj) == cfg
@pytest.mark.parametrize(
"config, params, expected",
[
({"x": 1}, ["x"], {"x": "$params:x"}),
([{"x": 1}], ["x"], [{"x": "$params:x"}]),
(({"x": 1},), ["x"], ({"x": "$params:x"},)),
({"x": {"y": 1}}, ["y"], {"x": {"y": "$params:y"}}),
({"x": "$other:x"}, ["x"], {"x": "$params:x"}),
([{"x": 1}], ["y"], None),
],
)
def test_add_macro_params(config, params, expected):
if expected is not None:
assert dpr.config.experimental.add_macro_params(config, macro="params", params=params) == expected
else:
with pytest.raises(ValueError):
dpr.config.experimental.add_macro_params(config, macro="params", params=params)
@pytest.mark.parametrize(
"item, values, expected",
[
({"x": 1}, {"x": "y"}, {"x": "y"}),
([{"x": 1}], {"x": "y"}, [{"x": "y"}]),
(({"x": 1},), {"x": "y"}, ({"x": "y"},)),
({"a": {"x": 1}}, {"x": "y"}, {"a": {"x": "y"}}),
({"x": "y"}, {"y": "1"}, {"x": "y"}),
({"x": (1, 2)}, {"x": (2, 3)}, None),
({"x": {}}, {"x": {}}, None),
({"x": []}, {"x": []}, None),
],
)
def test_replace_values(item, values, expected):
if expected is not None:
assert dpr.config.experimental.replace_values(item, values=values) == expected
else:
with pytest.raises(ValueError):
dpr.config.experimental.replace_values(item, values=values)
@pytest.mark.parametrize(
"item, keys, expected",
[
({"x": 1}, ["x"], {"x": 1}),
([{"x": 1}], ["x"], {"x": 1}),
(({"x": 1},), ["x"], {"x": 1}),
({"a": {"x": 1}}, ["x"], {"x": 1}),
({"x": (1, 2)}, ["x"], None),
({"x": {}}, ["x"], None),
({"x": []}, ["x"], None),
],
)
def test_find_values(item, keys, expected):
if expected is not None:
assert dpr.config.experimental.find_values(item, keys=keys) == expected
else:
with pytest.raises(ValueError):
dpr.config.experimental.find_values(item, keys=keys)
| 27.349057
| 106
| 0.461538
|
7950bb6bb89edfcc06f32fc35e6501774beff9c3
| 1,230
|
py
|
Python
|
tests/test_celery_task_view.py
|
LaudateCorpus1/squest
|
98304f20c1d966fb3678d348ffd7c5be438bb6be
|
[
"Apache-2.0"
] | null | null | null |
tests/test_celery_task_view.py
|
LaudateCorpus1/squest
|
98304f20c1d966fb3678d348ffd7c5be438bb6be
|
[
"Apache-2.0"
] | null | null | null |
tests/test_celery_task_view.py
|
LaudateCorpus1/squest
|
98304f20c1d966fb3678d348ffd7c5be438bb6be
|
[
"Apache-2.0"
] | 1
|
2022-03-24T03:37:12.000Z
|
2022-03-24T03:37:12.000Z
|
from django_celery_results.models import TaskResult
from rest_framework import status
from rest_framework.reverse import reverse
from tests.test_service_catalog.base import BaseTest
class TestCeleryTaskAPIViews(BaseTest):
def setUp(self):
super(TestCeleryTaskAPIViews, self).setUp()
test_task = TaskResult.objects.create(task_id=1, status="PENDING")
self.url = reverse('get_task_result', args=[test_task.id])
def test_admin_can_get_task(self):
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue("id" in response.json())
self.assertTrue("status" in response.json())
self.assertEqual(response.data["status"], "PENDING")
def test_cannot_get_task_when_not_admin(self):
self.client.force_login(user=self.standard_user_2)
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_get_task_when_logout(self):
self.client.logout()
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 37.272727
| 74
| 0.730081
|
7950bbbfa87e5809c6036d892c97c56404c7425f
| 571
|
py
|
Python
|
badlinkfinder/utilities.py
|
jonchun/badlinkfinder
|
f61c42eaad7bdebea639df69da58954924c38c20
|
[
"MIT"
] | 1
|
2021-11-09T10:16:42.000Z
|
2021-11-09T10:16:42.000Z
|
badlinkfinder/utilities.py
|
jonchun/badlinkfinder
|
f61c42eaad7bdebea639df69da58954924c38c20
|
[
"MIT"
] | null | null | null |
badlinkfinder/utilities.py
|
jonchun/badlinkfinder
|
f61c42eaad7bdebea639df69da58954924c38c20
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
import logging
from url_normalize import url_normalize
def normalize_url(url):
normalized_url = url_normalize(url).split('#')[0]
return normalized_url
"""
Convert string to log level. Borrowed from Fail2Ban.
https://github.com/fail2ban/fail2ban/blob/1b4ba602bac38a067b5abb9a941feab53c36c915/fail2ban/helpers.py#L136
"""
def str2LogLevel(value):
value = str(value)
try:
ll = getattr(logging, value.upper())
except AttributeError:
raise ValueError("Invalid log level %r" % value)
return ll
| 24.826087
| 107
| 0.725044
|
7950bc0f59c2f0f351e1bd2c316829cceb81f4bc
| 9,730
|
py
|
Python
|
tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_nd.py
|
mingxiaoh/chainer_for_rebase
|
8c5ba24bf81d648402d388dac1df7591b2557712
|
[
"MIT"
] | 1
|
2020-05-28T10:07:25.000Z
|
2020-05-28T10:07:25.000Z
|
tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_nd.py
|
mingxiaoh/chainer_for_rebase
|
8c5ba24bf81d648402d388dac1df7591b2557712
|
[
"MIT"
] | null | null | null |
tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_nd.py
|
mingxiaoh/chainer_for_rebase
|
8c5ba24bf81d648402d388dac1df7591b2557712
|
[
"MIT"
] | null | null | null |
import unittest
import functools
import math
import mock
import numpy
from operator import mul
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product({
'dims': [(4,), (4, 3), (4, 3, 2), (1, 1, 1, 1)],
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPoolingND(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
# Avoid unstability of numerical gradient
x_shape = (2, 3) + self.dims
self.x = numpy.arange(
functools.reduce(mul, x_shape), dtype=self.dtype).reshape(x_shape)
self.x = 2 * self.x / self.x.size - 1
outs = tuple(conv.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p)
in six.moves.zip(
self.dims, self.ksize, self.stride, self.pad))
gy_shape = (2, 3) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {
'atol': 1e-03, 'rtol': 1e-03}
def check_forward(self, x_data, use_cudnn='always'):
dims = self.dims
ksize = self.ksize
stride = self.stride
pad = self.pad
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
patches = pooling_nd_helper.pooling_patches(
dims, ksize, stride, pad, self.cover_all)
for k in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[k, c]
expect = numpy.array([x[idx].max() for idx in patches])
expect = expect.reshape(y_data.shape[2:])
testing.assert_allclose(expect, y_data[k, c])
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, use_cudnn='never')
def test_forward_cpu_wide(self): # see #120
ndim = self.ndim
x_shape = (2, 3) + (15,) * ndim
x_data = numpy.random.rand(*x_shape).astype(self.dtype)
x = chainer.Variable(x_data)
ksize = stride = int(math.ceil(pow(32, 1.0 / ndim)))
functions.max_pooling_nd(x, ksize, stride=stride, pad=0)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.cudnn
@condition.retry(3)
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
def check_forward_consistency_regression(self, x_data, use_cudnn='always'):
# Regression test to max_pooling_2d.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = functions.max_pooling_nd(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_2d = functions.max_pooling_2d(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
testing.assert_allclose(y_nd.data, y_2d.data)
@condition.retry(3)
def test_forward_consistency_regression_cpu(self):
self.check_forward_consistency_regression(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_consistency_regression_gpu(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_consistency_regression_no_cudnn(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x), 'never')
def check_backward(self, x_data, y_grad, use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
functions.MaxPoolingND(
self.ndim, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all),
x_data, y_grad, dtype='d', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_non_contiguous(self):
self.check_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def check_backward_consistency_regression(self, x_data, gy_data,
use_cudnn='always'):
# Regression test to two-dimensional max pooling layer.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = cuda.get_array_module(x_data)
# Backward computation for N-dimensional max pooling layer.
x_nd = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_nd = functions.MaxPoolingND(self.ndim, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_nd = func_nd(x_nd)
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional max pooling layer.
x_2d = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_2d = functions.MaxPooling2D(ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
y_2d = func_2d.apply((x_2d,))[0]
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
testing.assert_allclose(x_nd.grad, x_2d.grad)
@condition.retry(3)
def test_backward_consistency_regression_cpu(self):
self.check_backward_consistency_regression(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_consistency_regression_gpu(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_consistency_regression_no_cudnn(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), use_cudnn='never')
def test_backward_cpu_more_than_once(self):
func = functions.MaxPoolingND(
self.ndim, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
func(self.x)
func.backward_cpu((self.x,), (self.gy,))
func.backward_cpu((self.x,), (self.gy,))
@testing.parameterize(*testing.product({
'dims': [(4, 3, 2), (3, 2), (2,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPoolingNDCudnnCall(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.arange(functools.reduce(mul, x_shape),
dtype=self.dtype).reshape(x_shape)
gy_shape = (2, 3) + tuple(
conv.get_conv_outsize(d, k, s, p)
for (d, k, s, p)
in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with mock.patch('cupy.cudnn.cudnn.poolingForward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto') and
self.ndim > 1)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.poolingBackward') as func:
y.backward()
self.assertEqual(func.called, expect)
testing.run_module(__name__, __file__)
| 36.578947
| 79
| 0.616958
|
7950bd95802edd8bc47d29c9132f5220091e6a4e
| 842
|
py
|
Python
|
day05/mysql_study/My_select3.py
|
zhangyage/Python-oldboy
|
a95c1b465929e2be641e425fcb5e15b366800831
|
[
"Apache-2.0"
] | 1
|
2020-06-04T08:44:09.000Z
|
2020-06-04T08:44:09.000Z
|
day05/mysql_study/My_select3.py
|
zhangyage/Python-oldboy
|
a95c1b465929e2be641e425fcb5e15b366800831
|
[
"Apache-2.0"
] | null | null | null |
day05/mysql_study/My_select3.py
|
zhangyage/Python-oldboy
|
a95c1b465929e2be641e425fcb5e15b366800831
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
mysql查询操作
fetchall()和fetchone()比较
fetchall() #获取所有的数据
fetchone() #只获取一条数据
cur.scroll(0,mode='absolute') #绝对定位,可以定位指针的位置结合yeild学习 0代表的是开头
cur.scroll(-1,mode='relative') #相对定位,-1是将指针退回一个,
'''
import MySQLdb
conn = MySQLdb.connect(host='192.168.75.133',user='zhangyage',passwd='zhangyage',db='oldboy')
cur = conn.cursor(cursorclass = MySQLdb.cursors.DictCursor)
result1=cur.execute('select * from Userinfo')
data = cur.fetchone() #保存sql执行的结果
print data #输出第一条
#cur.scroll(0,mode='absolute') #绝对定位,将指针知道开始,这样两次的输出结果是相同的
cur.scroll(-1,mode='relative') #相对定位,将指针退回一个,这样两次的输出结果是相同的
data = cur.fetchone() #保存sql执行的结果
print data #输出第二条
cur.close()
conn.close()
#print result1 #这个输出的是查询时候影响的行数
#print data #输出查询结果 是元组的形式
| 24.057143
| 93
| 0.675772
|
7950bdb27156d2f304e63d35cd13d7536264cae9
| 5,445
|
py
|
Python
|
htdocs/frost/frost_ts.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | 1
|
2019-10-07T17:01:24.000Z
|
2019-10-07T17:01:24.000Z
|
htdocs/frost/frost_ts.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
htdocs/frost/frost_ts.py
|
trentford/iem
|
7264d24f2d79a3cd69251a09758e6531233a732f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Generate some line charts from ISU Frost Model Output"""
import sys
import os
import cgi
import datetime
import numpy as np
import pytz
import matplotlib.dates as mdates
from pyiem.util import ncopen, ssw
from pyiem.plot.use_agg import plt
from pyiem.datatypes import temperature
def get_latest_time(model):
''' Figure out the latest model runtime '''
utc = datetime.datetime.utcnow()
utc = utc.replace(tzinfo=pytz.UTC)
utc = utc.replace(hour=12, minute=0, second=0, microsecond=0)
limit = 24
while not os.path.isfile(
utc.strftime(("/mesonet/share/frost/" +
model + "/%Y%m%d%H%M_iaoutput.nc"))):
utc -= datetime.timedelta(hours=12)
limit -= 1
if limit < 0:
return None
return utc
def get_times(nc):
''' Return array of datetimes for the time array '''
tm = nc.variables['time']
sts = datetime.datetime.strptime(tm.units.replace('minutes since ', ''),
'%Y-%m-%d %H:%M:%S')
sts = sts.replace(tzinfo=pytz.utc)
res = []
for t in tm[:]:
res.append(sts + datetime.timedelta(minutes=float(t)))
return res
def get_ij(lon, lat, nc):
''' Figure out the closest grid cell '''
dist = ((nc.variables['lon'][:] - lon)**2 +
(nc.variables['lat'][:] - lat)**2)**.5
return np.unravel_index(np.argmin(dist), dist.shape)
def add_labels(fig):
"""Create a legend for the condition variable"""
fig.text(0.85, 0.8, "Frost", color='red')
fig.text(0.85, 0.75, "Ice/Snow", color='orange')
fig.text(0.85, 0.7, "Wet", color='green')
fig.text(0.85, 0.65, "Dew", color="brown")
fig.text(0.85, 0.6, "Frz Rain", color="purple")
def get_icond_color(model, val):
""" Get the color for this Model and icond
METRO: 1-8 dry, wet, ice/snow, mix, dew, melting snow, blk ice, icing rain
BRIDGET: 0-5 dry, frosty, icy/snowy, melting, freezing, wet
"""
if val is None or val < 0:
return 'none'
if model == 'metro':
colors = ['white', 'white', 'green', 'orange', 'orange', 'brown',
'blue', 'orange', 'purple']
else:
colors = ['white', 'tan', 'orange', 'blue', 'purple', 'green']
if val > (len(colors) - 1):
return 'none'
return colors[val]
def get_ifrost_color(val):
"""Which color to use"""
if val is None or val == -1:
return 'none'
colors = ['#EEEEEE', 'r']
try:
return colors[val]
except Exception as _exp:
return 'none'
def process(model, lon, lat):
""" Generate a plot for this given combination """
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.7, 0.8])
modelts = get_latest_time(model)
if modelts is None:
ax.text(0.5, 0.5, "No Data Found to Plot!", ha='center')
ssw("Content-Type: image/png\n\n")
fig.savefig(getattr(sys.stdout, 'buffer', sys.stdout), format="png")
return
nc = ncopen(
modelts.strftime(("/mesonet/share/frost/" +
model + "/%Y%m%d%H%M_iaoutput.nc")))
times = get_times(nc)
i, j = get_ij(lon, lat, nc)
ax.plot(times,
temperature(nc.variables['bdeckt'][:, i, j], 'K').value('F'),
color='k',
label='Bridge Deck Temp' if model == 'bridget' else 'Pavement')
ax.plot(times, temperature(nc.variables['tmpk'][:, i, j], 'K').value("F"),
color='r', label='Air Temp')
ax.plot(times, temperature(nc.variables['dwpk'][:, i, j], 'K').value("F"),
color='g', label='Dew Point')
# ax.set_ylim(-30,150)
ax.set_title(("ISUMM5 %s Timeseries\n"
"i: %s j:%s lon: %.2f lat: %.2f Model Run: %s"
) % (model, i, j, nc.variables['lon'][i, j],
nc.variables['lat'][i, j],
modelts.astimezone(pytz.timezone("America/Chicago")
).strftime("%-d %b %Y %-I:%M %p")))
ax.xaxis.set_major_locator(
mdates.DayLocator(interval=1, tz=pytz.timezone("America/Chicago")))
ax.xaxis.set_major_formatter(
mdates.DateFormatter('%d %b\n%Y', tz=pytz.timezone("America/Chicago")))
ax.axhline(32, linestyle='-.')
ax.grid(True)
ax.set_ylabel(r"Temperature $^\circ$F")
(ymin, ymax) = ax.get_ylim()
for i2, ifrost in enumerate(nc.variables['ifrost'][:-1, i, j]):
ax.barh(ymax-1, 1.0/24.0/4.0, left=times[i2],
fc=get_ifrost_color(ifrost), ec='none')
for i2, icond in enumerate(nc.variables['icond'][:-1, i, j]):
ax.barh(ymax-2, 1.0/24.0/4.0, left=times[i2],
fc=get_icond_color(model, icond), ec='none')
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
ax.legend(loc='upper center',
bbox_to_anchor=(0.5, -0.12), fancybox=True, shadow=True, ncol=3)
add_labels(fig)
ssw("Content-Type: image/png\n\n")
fig.savefig(getattr(sys.stdout, 'buffer', sys.stdout), format="png")
def main():
""" Go Main Go """
form = cgi.FieldStorage()
if 'lon' in form and 'lat' in form:
process(form.getfirst('model'), float(form.getfirst('lon')),
float(form.getfirst('lat')))
if __name__ == '__main__':
# main
main()
| 33.819876
| 79
| 0.567493
|
7950bdd72000a2182d05a16051f6f99a6bb8217c
| 531
|
py
|
Python
|
lib/galaxy/app_unittest_utils/celery_helper.py
|
pvanheus/galaxy
|
48403b0e45b71c4b0ce7a1e22d65a0a7cdb79574
|
[
"CC-BY-3.0"
] | 3
|
2016-09-15T21:04:56.000Z
|
2019-04-21T02:48:25.000Z
|
lib/galaxy/app_unittest_utils/celery_helper.py
|
pvanheus/galaxy
|
48403b0e45b71c4b0ce7a1e22d65a0a7cdb79574
|
[
"CC-BY-3.0"
] | 209
|
2015-06-17T16:15:20.000Z
|
2022-03-21T15:23:07.000Z
|
lib/galaxy/app_unittest_utils/celery_helper.py
|
pvanheus/galaxy
|
48403b0e45b71c4b0ce7a1e22d65a0a7cdb79574
|
[
"CC-BY-3.0"
] | 7
|
2016-07-10T16:44:30.000Z
|
2020-08-30T19:25:51.000Z
|
from functools import wraps
def rebind_container_to_task(app):
import galaxy.app
galaxy.app.app = app
from galaxy.celery import tasks
def magic_bind_dynamic(func):
return wraps(func)(app.magic_partial(func, shared=None))
for task in tasks.CELERY_TASKS:
task_fn = getattr(tasks, task, None)
if task_fn:
task_fn = getattr(task_fn, '__wrapped__', task_fn)
container_bound_task = magic_bind_dynamic(task_fn)
setattr(tasks, task, container_bound_task)
| 29.5
| 64
| 0.683616
|
7950be1316d8e0fdc8291317a9d598d3ee667577
| 1,495
|
py
|
Python
|
ooobuild/dyn/reflection/x_singleton_type_description.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/reflection/x_singleton_type_description.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/reflection/x_singleton_type_description.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.reflection
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.reflection import XSingletonTypeDescription as XSingletonTypeDescription
setattr(XSingletonTypeDescription, '__ooo_ns__', 'com.sun.star.reflection')
setattr(XSingletonTypeDescription, '__ooo_full_ns__', 'com.sun.star.reflection.XSingletonTypeDescription')
setattr(XSingletonTypeDescription, '__ooo_type_name__', 'interface')
else:
from ...lo.reflection.x_singleton_type_description import XSingletonTypeDescription as XSingletonTypeDescription
__all__ = ['XSingletonTypeDescription']
| 40.405405
| 116
| 0.789298
|
7950bf2b33692324216754b29e8bd1b8b9c7fae0
| 460
|
py
|
Python
|
eventmap/eventmap/urls.py
|
alexdzehil/learn_python_project
|
197d223c4383bee521903752f59235aec52dc5e7
|
[
"MIT"
] | null | null | null |
eventmap/eventmap/urls.py
|
alexdzehil/learn_python_project
|
197d223c4383bee521903752f59235aec52dc5e7
|
[
"MIT"
] | null | null | null |
eventmap/eventmap/urls.py
|
alexdzehil/learn_python_project
|
197d223c4383bee521903752f59235aec52dc5e7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('events.urls', namespace='events')),
path('accounts/', include('accounts.urls', namespace='accounts')),
path('accounts/', include('allauth.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 28.75
| 76
| 0.728261
|
7950bfe3fefcd96f742e6d0af946609098c3c9a5
| 18,486
|
py
|
Python
|
pooch/_version.py
|
santis19/pooch
|
ef9fb2dd79a2dde8a93d8ce1c6e473def512a705
|
[
"BSD-3-Clause"
] | null | null | null |
pooch/_version.py
|
santis19/pooch
|
ef9fb2dd79a2dde8a93d8ce1c6e473def512a705
|
[
"BSD-3-Clause"
] | null | null | null |
pooch/_version.py
|
santis19/pooch
|
ef9fb2dd79a2dde8a93d8ce1c6e473def512a705
|
[
"BSD-3-Clause"
] | null | null | null |
# pylint: skip-file
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "pooch/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| 33.129032
| 88
| 0.583955
|
7950c047b2c2857c6acfd01961aab2f0cd526ac5
| 424
|
py
|
Python
|
Numpy/arrays.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2021-12-17T11:03:13.000Z
|
2021-12-17T11:03:13.000Z
|
Numpy/arrays.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | 1
|
2020-02-05T00:14:43.000Z
|
2020-02-06T09:22:49.000Z
|
Numpy/arrays.py
|
Fernal73/LearnPython3
|
5288017c0dbf95633b84f1e6324f00dec6982d36
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
a = np.arange(1,16,dtype=int).reshape(3,5).T
print(a)
b = a[1:4:2]
print(b)
a = a.T
b = a[:,1:4:2]
print(b)
a = np.arange(25).reshape(5, 5)
b = np.array([1., 5, 10, 15, 20])
print(a,b)
c = a / b[:, np.newaxis]
print(c)
a = np.random.rand(10,3)
print(a)
b = np.where(a - 0.5 > 0, a - 0.5, 0.5 - a)
c = np.argmin(b,axis=1)
a = a[np.arange(0,10), c]
print(a)
| 16.96
| 44
| 0.558962
|
7950c07037b2782786bde5afd988850ce01b37e9
| 5,078
|
py
|
Python
|
dvc/fs/azure.py
|
meierale/dvc
|
67b42ebe4f9474d71a2d5b195014046775a08401
|
[
"Apache-2.0"
] | 1
|
2021-07-20T05:33:18.000Z
|
2021-07-20T05:33:18.000Z
|
dvc/fs/azure.py
|
meierale/dvc
|
67b42ebe4f9474d71a2d5b195014046775a08401
|
[
"Apache-2.0"
] | null | null | null |
dvc/fs/azure.py
|
meierale/dvc
|
67b42ebe4f9474d71a2d5b195014046775a08401
|
[
"Apache-2.0"
] | 1
|
2021-08-05T07:15:30.000Z
|
2021-08-05T07:15:30.000Z
|
import logging
import os
import threading
from fsspec.asyn import fsspec_loop
from fsspec.utils import infer_storage_options
from funcy import cached_property, memoize, wrap_prop
from dvc.exceptions import DvcException
from dvc.path_info import CloudURLInfo
from dvc.scheme import Schemes
from dvc.utils import format_link
from .fsspec_wrapper import ObjectFSWrapper
logger = logging.getLogger(__name__)
_DEFAULT_CREDS_STEPS = (
"https://azuresdkdocs.blob.core.windows.net/$web/python/"
"azure-identity/1.4.0/azure.identity.html#azure.identity"
".DefaultAzureCredential"
)
class AzureAuthError(DvcException):
pass
@memoize
def _az_config():
# NOTE: ideally we would've used get_default_cli().config from
# azure.cli.core, but azure-cli-core has a lot of conflicts with other
# dependencies. So instead we are just use knack directly
from knack.config import CLIConfig
config_dir = os.getenv(
"AZURE_CONFIG_DIR", os.path.expanduser(os.path.join("~", ".azure"))
)
return CLIConfig(config_dir=config_dir, config_env_var_prefix="AZURE")
# pylint:disable=abstract-method
class AzureFileSystem(ObjectFSWrapper):
scheme = Schemes.AZURE
PATH_CLS = CloudURLInfo
PARAM_CHECKSUM = "etag"
DETAIL_FIELDS = frozenset(("etag", "size"))
REQUIRES = {
"adlfs": "adlfs",
"knack": "knack",
"azure-identity": "azure.identity",
}
@classmethod
def _strip_protocol(cls, path: str):
bucket = infer_storage_options(path).get("host")
if bucket:
return path
bucket = _az_config().get("storage", "container_name", None)
return f"azure://{bucket}"
@staticmethod
def _get_kwargs_from_urls(urlpath):
ops = infer_storage_options(urlpath)
if "host" in ops:
return {"bucket": ops["host"]}
return {}
def _prepare_credentials(self, **config):
from azure.identity.aio import DefaultAzureCredential
# Disable spam from failed cred types for DefaultAzureCredential
logging.getLogger("azure.identity.aio").setLevel(logging.ERROR)
login_info = {}
login_info["connection_string"] = config.get(
"connection_string",
_az_config().get("storage", "connection_string", None),
)
login_info["account_name"] = config.get(
"account_name", _az_config().get("storage", "account", None)
)
login_info["account_key"] = config.get(
"account_key", _az_config().get("storage", "key", None)
)
login_info["sas_token"] = config.get(
"sas_token", _az_config().get("storage", "sas_token", None)
)
login_info["tenant_id"] = config.get("tenant_id")
login_info["client_id"] = config.get("client_id")
login_info["client_secret"] = config.get("client_secret")
if not (login_info["account_name"] or login_info["connection_string"]):
raise AzureAuthError(
"Authentication to Azure Blob Storage requires either "
"account_name or connection_string.\nLearn more about "
"configuration settings at "
+ format_link("https://man.dvc.org/remote/modify")
)
any_secondary = any(
value for key, value in login_info.items() if key != "account_name"
)
if (
login_info["account_name"]
and not any_secondary
and not config.get("allow_anonymous_login", False)
):
with fsspec_loop():
login_info["credential"] = DefaultAzureCredential(
exclude_interactive_browser_credential=False
)
for login_method, required_keys in [ # noqa
("connection string", ["connection_string"]),
(
"AD service principal",
["tenant_id", "client_id", "client_secret"],
),
("account key", ["account_name", "account_key"]),
("SAS token", ["account_name", "sas_token"]),
(
f"default credentials ({_DEFAULT_CREDS_STEPS})",
["account_name", "credential"],
),
("anonymous login", ["account_name"]),
]:
if all(login_info.get(key) is not None for key in required_keys):
break
else:
login_method = None
self.login_method = login_method
return login_info
@wrap_prop(threading.Lock())
@cached_property
def fs(self):
from adlfs import AzureBlobFileSystem
from azure.core.exceptions import AzureError
try:
return AzureBlobFileSystem(**self.fs_args)
except (ValueError, AzureError) as e:
raise AzureAuthError(
f"Authentication to Azure Blob Storage via {self.login_method}"
" failed.\nLearn more about configuration settings at"
f" {format_link('https://man.dvc.org/remote/modify')}"
) from e
| 33.853333
| 79
| 0.618157
|
7950c0eeb49041ab8b71decbeb240076f498a55a
| 463
|
py
|
Python
|
spanish_content/migrations/0011_eduprogram_video.py
|
RachellCalhoun/lightandleadership
|
c7880c8760d82842ab2626d17124d5df13f9d9d8
|
[
"Apache-2.0"
] | 1
|
2016-02-03T07:49:44.000Z
|
2016-02-03T07:49:44.000Z
|
spanish_content/migrations/0011_eduprogram_video.py
|
RachellCalhoun/lightandleadership
|
c7880c8760d82842ab2626d17124d5df13f9d9d8
|
[
"Apache-2.0"
] | 6
|
2016-02-03T07:57:48.000Z
|
2019-08-15T20:29:59.000Z
|
spanish_content/migrations/0011_eduprogram_video.py
|
RachellCalhoun/lightandleadership
|
c7880c8760d82842ab2626d17124d5df13f9d9d8
|
[
"Apache-2.0"
] | 1
|
2016-02-02T09:21:56.000Z
|
2016-02-02T09:21:56.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-11 22:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spanish_content', '0010_auto_20170304_1848'),
]
operations = [
migrations.AddField(
model_name='eduprogram',
name='video',
field=models.BooleanField(default=False),
),
]
| 22.047619
| 55
| 0.62635
|
7950c1cf133fcd04abd2bdd50be8c38299d5315b
| 3,538
|
py
|
Python
|
salt/modules/system_profiler.py
|
0xf10e/salt
|
fb16449a1ce4ddfc86de07f1e61709fe982f1e78
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/system_profiler.py
|
0xf10e/salt
|
fb16449a1ce4ddfc86de07f1e61709fe982f1e78
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/system_profiler.py
|
0xf10e/salt
|
fb16449a1ce4ddfc86de07f1e61709fe982f1e78
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
System Profiler Module
Interface with Mac OSX's command-line System Profiler utility to get
information about package receipts and installed applications.
.. versionadded:: 2015.2.0
'''
import plistlib
import subprocess
import salt.utils
PROFILER_BINARY = '/usr/sbin/system_profiler'
def __virtual__():
'''
Check to see if the system_profiler binary is available
'''
PROFILER_BINARY = salt.utils.which('system_profiler')
if PROFILER_BINARY:
return True
else:
return False
def _call_system_profiler(datatype):
'''
Call out to system_profiler. Return a dictionary
of the stuff we are interested in.
'''
p = subprocess.Popen(
[PROFILER_BINARY, '-detailLevel', 'full',
'-xml', datatype], stdout=subprocess.PIPE)
(sysprofresults, sysprof_stderr) = p.communicate(input=None)
plist = plistlib.readPlistFromString(sysprofresults)
try:
apps = plist[0]['_items']
except (IndexError, KeyError):
apps = []
return apps
def receipts():
'''
Return the results of a call to
`system_profiler -xml -detail full
SPInstallHistoryDataType`
as a dictionary. Top-level keys of the dictionary
are the names of each set of install receipts, since
there can be multiple receipts with the same name.
Contents of each key are a list of dictionaries.
CLI Example:
.. code-block:: bash
salt '*' systemprofiler.receipts
'''
apps = _call_system_profiler('SPInstallHistoryDataType')
appdict = {}
for a in apps:
details = dict(a)
details.pop('_name')
if 'install_date' in details:
details['install_date'] = details['install_date'].strftime('%Y-%m-%d %H:%M:%S')
if 'info' in details:
try:
details['info'] = '{0}: {1}'.format(details['info'][0],
details['info'][1].strftime('%Y-%m-%d %H:%M:%S'))
except (IndexError, AttributeError):
pass
if a['_name'] not in appdict:
appdict[a['_name']] = []
appdict[a['_name']].append(details)
return appdict
def applications():
'''
Return the results of a call to
`system_profiler -xml -detail full
SPApplicationsDataType`
as a dictionary. Top-level keys of the dictionary
are the names of each set of install receipts, since
there can be multiple receipts with the same name.
Contents of each key are a list of dicttionaries.
Note that this can take a long time depending on how many
applications are installed on the target Mac.
CLI Example:
.. code-block:: bash
salt '*' systemprofiler.applications
'''
apps = _call_system_profiler('SPApplicationsDataType')
appdict = {}
for a in apps:
details = dict(a)
details.pop('_name')
if 'lastModified' in details:
details['lastModified'] = details['lastModified'].strftime('%Y-%m-%d %H:%M:%S')
if 'info' in details:
try:
details['info'] = '{0}: {1}'.format(details['info'][0],
details['info'][1].strftime('%Y-%m-%d %H:%M:%S'))
except (IndexError, AttributeError):
pass
if a['_name'] not in appdict:
appdict[a['_name']] = []
appdict[a['_name']].append(details)
return appdict
| 26.207407
| 101
| 0.594404
|
7950c2423e674d7034b7e0562c94cbb676814f0e
| 2,507
|
py
|
Python
|
compose/cli/docker_client.py
|
mattjbray/compose
|
f65f89ad8c26684a314d9099fe35bcea07dbe5dc
|
[
"Apache-2.0"
] | null | null | null |
compose/cli/docker_client.py
|
mattjbray/compose
|
f65f89ad8c26684a314d9099fe35bcea07dbe5dc
|
[
"Apache-2.0"
] | null | null | null |
compose/cli/docker_client.py
|
mattjbray/compose
|
f65f89ad8c26684a314d9099fe35bcea07dbe5dc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from docker import Client
from docker.errors import TLSParameterError
from docker.tls import TLSConfig
from docker.utils import kwargs_from_env
from ..const import HTTP_TIMEOUT
from ..const import IS_WINDOWS_PLATFORM
from .errors import UserError
from .utils import generate_user_agent
from .utils import unquote_path
log = logging.getLogger(__name__)
def tls_config_from_options(options):
tls = options.get('--tls', False)
ca_cert = unquote_path(options.get('--tlscacert'))
cert = unquote_path(options.get('--tlscert'))
key = unquote_path(options.get('--tlskey'))
verify = options.get('--tlsverify')
skip_hostname_check = options.get('--skip-hostname-check', False)
advanced_opts = any([ca_cert, cert, key, verify])
if tls is True and not advanced_opts:
return True
elif advanced_opts: # --tls is a noop
client_cert = None
if cert or key:
client_cert = (cert, key)
return TLSConfig(
client_cert=client_cert, verify=verify, ca_cert=ca_cert,
assert_hostname=False if skip_hostname_check else None
)
return None
def docker_client(environment, version=None, tls_config=None, host=None,
tls_version=None):
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
try:
kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
except TLSParameterError:
raise UserError(
"TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY "
"and DOCKER_CERT_PATH are set correctly.\n"
"You might need to run `eval \"$(docker-machine env default)\"`")
if host:
kwargs['base_url'] = host
if tls_config:
kwargs['tls'] = tls_config
if version:
kwargs['version'] = version
timeout = environment.get('COMPOSE_HTTP_TIMEOUT')
if timeout:
kwargs['timeout'] = int(timeout)
else:
kwargs['timeout'] = HTTP_TIMEOUT
kwargs['user_agent'] = generate_user_agent()
if 'base_url' not in kwargs and IS_WINDOWS_PLATFORM:
# docker-py 1.10 defaults to using npipes, but we don't want that
# change in compose yet - use the default TCP connection instead.
kwargs['base_url'] = 'tcp://127.0.0.1:2375'
return Client(**kwargs)
| 30.950617
| 82
| 0.682489
|
7950c2ccaa68fdd52ee4e298b1f507ef4df382ea
| 18,601
|
py
|
Python
|
nova/tests/unit/compute/test_shelve.py
|
TMaddox/nova
|
e5c169d15528a8e2eadb8eca668ea0d183cf8648
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/compute/test_shelve.py
|
TMaddox/nova
|
e5c169d15528a8e2eadb8eca668ea0d183cf8648
|
[
"Apache-2.0"
] | 1
|
2019-01-02T01:30:35.000Z
|
2019-01-02T01:38:02.000Z
|
nova/tests/unit/compute/test_shelve.py
|
TMaddox/nova
|
e5c169d15528a8e2eadb8eca668ea0d183cf8648
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_utils import timeutils
from nova.compute import claims
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova.tests.unit.compute import test_compute
from nova.tests.unit.image import fake as fake_image
CONF = cfg.CONF
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
def _fake_resources():
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0
}
return resources
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
CONF.set_override('shelved_offload_time', shelved_offload_time)
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
image_id = 'fake_image_id'
host = 'fake-mini'
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
instance.task_state = task_states.SHELVING
instance.save()
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve.start')
if clean_shutdown:
self.compute.driver.power_off(instance,
CONF.shutdown_timeout,
self.compute.SHUTDOWN_RETRY_INTERVAL)
else:
self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
if CONF.shelved_offload_time == 0:
self.compute.network_api.cleanup_instance_network_on_host(
self.context, instance, instance.host)
self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
mox.IgnoreArg())
tracking = {'last_state': instance.vm_state}
def check_save(expected_task_state=None):
self.assertEqual(123, instance.power_state)
if tracking['last_state'] == vm_states.ACTIVE:
if CONF.shelved_offload_time == 0:
self.assertEqual(task_states.SHELVING_OFFLOADING,
instance.task_state)
else:
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED, instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING],
expected_task_state)
self.assertIn('shelved_at', instance.system_metadata)
self.assertEqual(image_id,
instance.system_metadata['shelved_image_id'])
self.assertEqual(host,
instance.system_metadata['shelved_host'])
tracking['last_state'] = instance.vm_state
elif (tracking['last_state'] == vm_states.SHELVED and
CONF.shelved_offload_time == 0):
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.SHELVED_OFFLOADED,
instance.vm_state)
self.assertEqual([task_states.SHELVING,
task_states.SHELVING_OFFLOADING],
expected_task_state)
tracking['last_state'] = instance.vm_state
else:
self.fail('Unexpected save!')
self.compute._notify_about_instance_usage(self.context,
instance, 'shelve.end')
if CONF.shelved_offload_time == 0:
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.shelve_instance(self.context, instance,
image_id=image_id, clean_shutdown=clean_shutdown)
def test_shelve(self):
self._shelve_instance(-1)
def test_shelve_forced_shutdown(self):
self._shelve_instance(-1, clean_shutdown=False)
def test_shelve_and_offload(self):
self._shelve_instance(0)
def _shelve_offload(self, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
instance.save()
cur_time = timeutils.utcnow()
timeutils.set_time_override(cur_time)
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute.driver, 'power_off')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
if clean_shutdown:
self.compute.driver.power_off(instance,
CONF.shutdown_timeout,
self.compute.SHUTDOWN_RETRY_INTERVAL)
else:
self.compute.driver.power_off(instance, 0, 0)
self.compute.network_api.cleanup_instance_network_on_host(
self.context, instance, instance.host)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save'):
self.compute.shelve_offload_instance(self.context, instance,
clean_shutdown=clean_shutdown)
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_shelve_offload(self):
self._shelve_offload()
def test_shelve_offload_forced_shutdown(self):
self._shelve_offload(clean_shutdown=False)
def test_unshelve(self):
instance = self._create_fake_instance_obj()
instance.task_state = task_states.UNSHELVING
instance.save()
image = {'id': 'fake_id'}
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
host = 'fake-mini'
cur_time = timeutils.utcnow()
# Adding shelved_* keys in system metadata to verify
# whether those are deleted after unshelve call.
sys_meta = dict(instance.system_metadata)
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = image['id']
sys_meta['shelved_host'] = host
instance.system_metadata = sys_meta
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_instance_network_on_host')
self.deleted_image_id = None
def fake_delete(self2, ctxt, image_id):
self.deleted_image_id = image_id
def fake_claim(context, instance, limits):
instance.host = self.compute.host
return claims.Claim(context, instance,
self.rt, _fake_resources())
tracking = {
'last_state': instance.task_state,
'spawned': False,
}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
if tracking['spawned']:
self.assertIsNone(instance.task_state)
else:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['spawned'] = True
tracking['last_state'] == instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
tracking['last_state'] == instance.task_state
else:
self.fail('Unexpected save!')
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
self.compute.network_api.setup_instance_network_on_host(
self.context, instance, self.compute.host)
self.compute.driver.spawn(self.context, instance, image,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
with mock.patch.object(self.rt, 'instance_claim',
side_effect=fake_claim), \
mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(
self.context, instance, image=image,
filter_properties=filter_properties,
node=node)
self.assertNotIn('shelved_at', instance.system_metadata)
self.assertNotIn('shelved_image_id', instance.system_metadata)
self.assertNotIn('shelved_host', instance.system_metadata)
self.assertEqual(image['id'], self.deleted_image_id)
self.assertEqual(instance.host, self.compute.host)
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertEqual(self.compute.host, instance.host)
self.assertFalse(instance.auto_disk_config)
def test_unshelve_volume_backed(self):
instance = self._create_fake_instance_obj()
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
instance.task_state = task_states.UNSHELVING
instance.save()
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_instance_network_on_host')
tracking = {'last_state': instance.task_state}
def check_save(expected_task_state=None):
if tracking['last_state'] == task_states.UNSHELVING:
self.assertEqual(task_states.SPAWNING, instance.task_state)
tracking['last_state'] = instance.task_state
elif tracking['last_state'] == task_states.SPAWNING:
self.assertEqual(123, instance.power_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertIsNone(instance.key_data)
self.assertFalse(instance.auto_disk_config)
self.assertIsNone(instance.task_state)
tracking['last_state'] = instance.task_state
else:
self.fail('Unexpected save!')
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
self.compute._prep_block_device(self.context, instance,
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
self.compute.network_api.setup_instance_network_on_host(
self.context, instance, self.compute.host)
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(self.context, instance, self.rt,
_fake_resources()))
self.compute.driver.spawn(self.context, instance, None,
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
self.compute._get_power_state(self.context, instance).AndReturn(123)
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.end')
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
mock_save.side_effect = check_save
self.compute.unshelve_instance(self.context, instance, image=None,
filter_properties=filter_properties, node=node)
def test_shelved_poll_none_exist(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(timeutils, 'is_older_than')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_not_timedout(self):
instance = self._create_fake_instance_obj()
sys_meta = instance.system_metadata
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
db.instance_update_and_get_original(self.context, instance['uuid'],
{'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.ReplayAll()
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_timedout(self):
instance = self._create_fake_instance_obj()
sys_meta = instance.system_metadata
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
(old, instance) = db.instance_update_and_get_original(self.context,
instance['uuid'], {'vm_state': vm_states.SHELVED,
'system_metadata': sys_meta})
def fake_destroy(inst, nw_info, bdm):
# NOTE(alaski) There are too many differences between an instance
# as returned by instance_update_and_get_original and
# instance_get_all_by_filters so just compare the uuid.
self.assertEqual(instance['uuid'], inst['uuid'])
self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
self.compute._poll_shelved_instances(self.context)
class ShelveComputeAPITestCase(test_compute.BaseTestCase):
def test_shelve(self):
# Ensure instance can be shelved.
fake_instance = self._create_fake_instance_obj(
{'display_name': 'vm01'})
instance = fake_instance
self.assertIsNone(instance['task_state'])
def fake_init(self2):
# In original _FakeImageService.__init__(), some fake images are
# created. To verify the snapshot name of this test only, here
# sets a fake method.
self2.images = {}
def fake_create(self2, ctxt, metadata, data=None):
self.assertEqual(metadata['name'], 'vm01-shelved')
metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
return metadata
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
self.compute_api.shelve(self.context, instance)
self.assertEqual(instance.task_state, task_states.SHELVING)
db.instance_destroy(self.context, instance['uuid'])
def test_unshelve(self):
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj()
self.assertIsNone(instance['task_state'])
self.compute_api.shelve(self.context, instance)
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.save()
self.compute_api.unshelve(self.context, instance)
self.assertEqual(instance.task_state, task_states.UNSHELVING)
db.instance_destroy(self.context, instance['uuid'])
| 44.393795
| 79
| 0.63889
|
7950c2fdf695598e3a01256b55f471bbfc592368
| 28,739
|
py
|
Python
|
tests/unit/states/test_postgres.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
tests/unit/states/test_postgres.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/states/test_postgres.py
|
l2ol33rt/salt
|
ff68bbd9f4bda992a3e039822fb32f141e94347c
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, MagicMock, patch
# Import salt libs
import salt.modules.postgres as postgresmod
import salt.states.postgres_database as postgres_database
import salt.states.postgres_user as postgres_user
import salt.states.postgres_group as postgres_group
import salt.states.postgres_extension as postgres_extension
import salt.states.postgres_schema as postgres_schema
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PostgresUserTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql'))
patcher.start()
self.addCleanup(patcher.stop)
self.salt_stub = {
'config.option': Mock(),
'cmd.run_all': Mock(),
'file.chown': Mock(),
'file.remove': Mock(),
}
self.addCleanup(delattr, self, 'salt_stub')
return {
postgres_database: {},
postgres_group: {},
postgres_extension: {},
postgres_schema: {},
postgres_user: {
'__grains__': {'os_family': 'Linux'},
'__salt__': self.salt_stub,
'__opts__': {'test': False},
}
}
def test_present__creation(self):
# test=True
with patch.dict(postgres_user.__salt__, {'postgres.role_get': Mock(return_value=None),
'postgres.user_create': MagicMock()}):
with patch.dict(postgres_user.__opts__, {'test': True}):
ret = postgres_user.present('foo')
self.assertEqual(
ret,
{'comment': 'User foo is set to be created',
'changes': {}, 'name': 'foo', 'result': None}
)
self.assertEqual(self.salt_stub['postgres.user_create'].call_count, 0)
# test=False
ret = postgres_user.present('foo')
self.assertEqual(
ret,
{'comment': 'The user foo has been created',
'changes': {'foo': 'Present'}, 'name': 'foo', 'result': True}
)
self.salt_stub['postgres.user_create'].assert_called_once_with(username='foo',
superuser=None,
encrypted=True,
runas=None,
inherit=None,
rolepassword=None,
port=None,
replication=None,
host=None,
createroles=None,
user=None,
groups=None,
maintenance_db=None,
login=None,
password=None,
createdb=None)
def test_present__update(self):
# test=True
with patch.dict(postgres_user.__salt__, {'postgres.role_get': Mock(return_value={
'can create databases': False,
'can create roles': False,
'can login': False,
'can update system catalogs': False,
'connections': None,
'defaults variables': {},
'expiry time': None,
'inherits privileges': True,
'replication': False,
'superuser': False,
}),
'postgres.user_update': MagicMock()}):
with patch.dict(postgres_user.__opts__, {'test': True}):
ret = postgres_user.present('foo', login=True, replication=False)
self.assertEqual(
ret,
{'comment': 'User foo is set to be updated',
'changes': {'foo': {'login': True}}, 'name': 'foo', 'result': None}
)
self.assertEqual(self.salt_stub['postgres.user_update'].call_count, 0)
# test=False
ret = postgres_user.present('foo', login=True, replication=False)
self.assertEqual(
ret,
{'comment': 'The user foo has been updated',
'changes': {'foo': {'login': True}}, 'name': 'foo', 'result': True}
)
self.salt_stub['postgres.user_update'].assert_called_once_with(username='foo',
superuser=None,
encrypted=True,
runas=None,
inherit=None,
rolepassword=None,
port=None,
replication=False,
host=None,
createroles=None,
user=None,
groups=None,
maintenance_db=None,
login=True,
password=None,
createdb=None)
def test_present__no_update(self):
# test=True
with patch.dict(postgres_user.__salt__, {'postgres.role_get': Mock(return_value={
'can create databases': False,
'can create roles': False,
'can login': False,
'can update system catalogs': False,
'connections': None,
'defaults variables': {},
'expiry time': None,
'inherits privileges': True,
'replication': False,
'superuser': False,
}),
'postgres.user_update': MagicMock()}):
with patch.dict(postgres_user.__opts__, {'test': True}):
ret = postgres_user.present('foo', login=False, replication=False)
self.assertEqual(
ret,
{'comment': 'User foo is already present',
'changes': {}, 'name': 'foo', 'result': True}
)
self.assertEqual(self.salt_stub['postgres.user_update'].call_count, 0)
# test=False
ret = postgres_user.present('foo', login=False, replication=False)
self.assertEqual(
ret,
{'comment': 'User foo is already present',
'changes': {}, 'name': 'foo', 'result': True}
)
self.assertEqual(self.salt_stub['postgres.user_update'].call_count, 0)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PostgresGroupTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql'))
patcher.start()
self.addCleanup(patcher.stop)
self.salt_stub = {
'config.option': Mock(),
'cmd.run_all': Mock(),
'file.chown': Mock(),
'file.remove': Mock(),
}
self.addCleanup(delattr, self, 'salt_stub')
return {
postgres_database: {},
postgres_user: {},
postgres_extension: {},
postgres_schema: {},
postgres_group: {
'__grains__': {'os_family': 'Linux'},
'__salt__': self.salt_stub,
'__opts__': {'test': False},
}
}
def test_present__creation(self):
# test=True
with patch.dict(postgres_group.__salt__, {'postgres.role_get': Mock(return_value=None),
'postgres.group_create': MagicMock()}):
with patch.dict(postgres_group.__opts__, {'test': True}):
ret = postgres_group.present('foo')
self.assertEqual(
ret,
{'comment': 'Group foo is set to be created',
'changes': {}, 'name': 'foo', 'result': None}
)
self.assertEqual(self.salt_stub['postgres.group_create'].call_count, 0)
# test=False
ret = postgres_group.present('foo')
self.assertEqual(
ret,
{'comment': 'The group foo has been created',
'changes': {}, 'name': 'foo', 'result': True}
)
self.salt_stub['postgres.group_create'].assert_called_once_with(superuser=None,
replication=None,
encrypted=True,
runas=None,
inherit=None,
rolepassword=None,
port=None,
groupname='foo',
host=None,
createroles=None,
user=None,
groups=None,
maintenance_db=None,
login=None,
password=None,
createdb=None)
def test_present__update(self):
# test=True
with patch.dict(postgres_group.__salt__, {'postgres.role_get': Mock(return_value={
'can create databases': False,
'can create roles': False,
'can login': False,
'can update system catalogs': False,
'connections': None,
'defaults variables': {},
'expiry time': None,
'inherits privileges': True,
'replication': False,
'superuser': False,
}),
'postgres.group_update': MagicMock()}):
with patch.dict(postgres_group.__opts__, {'test': True}):
ret = postgres_group.present('foo', login=True, replication=False)
self.assertEqual(
ret,
{'comment': 'Group foo is set to be updated',
'changes': {'foo': {'login': True}}, 'name': 'foo', 'result': None}
)
self.assertEqual(self.salt_stub['postgres.group_update'].call_count, 0)
# test=False
ret = postgres_group.present('foo', login=True, replication=False)
self.assertEqual(
ret,
{'comment': 'The group foo has been updated',
'changes': {'foo': {'login': True}}, 'name': 'foo', 'result': True}
)
self.salt_stub['postgres.group_update'].assert_called_once_with(superuser=None,
replication=False,
encrypted=True,
runas=None,
inherit=None,
rolepassword=None,
port=None,
groupname='foo',
host=None,
createroles=None,
user=None,
groups=None,
maintenance_db=None,
login=True,
password=None,
createdb=None)
def test_present__no_update(self):
# test=True
with patch.dict(postgres_group.__salt__, {'postgres.role_get': Mock(return_value={
'can create databases': False,
'can create roles': False,
'can login': False,
'can update system catalogs': False,
'connections': None,
'defaults variables': {},
'expiry time': None,
'inherits privileges': True,
'replication': False,
'superuser': False,
}),
'postgres.group_update': MagicMock()}):
with patch.dict(postgres_group.__opts__, {'test': True}):
ret = postgres_group.present('foo', login=False, replication=False)
self.assertEqual(
ret,
{'comment': 'Group foo is already present',
'changes': {}, 'name': 'foo', 'result': True}
)
self.assertEqual(self.salt_stub['postgres.group_update'].call_count, 0)
# test=False
ret = postgres_group.present('foo', login=False, replication=False)
self.assertEqual(
ret,
{'comment': 'Group foo is already present',
'changes': {}, 'name': 'foo', 'result': True}
)
self.assertEqual(self.salt_stub['postgres.group_update'].call_count, 0)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PostgresExtensionTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql'))
patcher.start()
self.addCleanup(patcher.stop)
self.salt_stub = {
'config.option': Mock(),
'cmd.run_all': Mock(),
'file.chown': Mock(),
'file.remove': Mock(),
}
self.addCleanup(delattr, self, 'salt_stub')
return {
postgres_database: {},
postgres_user: {},
postgres_group: {},
postgres_schema: {},
postgres_extension: {
'__grains__': {'os_family': 'Linux'},
'__salt__': self.salt_stub,
'__opts__': {'test': False},
}
}
def test_present_failed(self):
'''
scenario of creating upgrading extensions with possible schema and
version specifications
'''
with patch.dict(postgres_extension.__salt__, {
'postgres.create_metadata': Mock(side_effect=[
[postgresmod._EXTENSION_NOT_INSTALLED],
[postgresmod._EXTENSION_TO_MOVE, postgresmod._EXTENSION_INSTALLED],
]),
'postgres.create_extension': Mock(side_effect=[
False, False,
])}):
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': 'Failed to install extension foo',
'changes': {}, 'name': 'foo', 'result': False},
)
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': 'Failed to upgrade extension foo',
'changes': {}, 'name': 'foo', 'result': False}
)
def test_present(self):
'''
scenario of creating upgrading extensions with possible schema and
version specifications
'''
with patch.dict(postgres_extension.__salt__, {
'postgres.create_metadata': Mock(side_effect=[
[postgresmod._EXTENSION_NOT_INSTALLED],
[postgresmod._EXTENSION_INSTALLED],
[postgresmod._EXTENSION_TO_MOVE, postgresmod._EXTENSION_INSTALLED],
]),
'postgres.create_extension': Mock(side_effect=[
True, True, True,
])}):
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': 'The extension foo has been installed',
'changes': {'foo': 'Installed'}, 'name': 'foo', 'result': True}
)
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': 'Extension foo is already present',
'changes': {}, 'name': 'foo', 'result': True}
)
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': 'The extension foo has been upgraded',
'changes': {'foo': 'Upgraded'}, 'name': 'foo', 'result': True}
)
def test_presenttest(self):
'''
scenario of creating upgrading extensions with possible schema and
version specifications
'''
with patch.dict(postgres_extension.__salt__, {
'postgres.create_metadata': Mock(side_effect=[
[postgresmod._EXTENSION_NOT_INSTALLED],
[postgresmod._EXTENSION_INSTALLED],
[postgresmod._EXTENSION_TO_MOVE, postgresmod._EXTENSION_INSTALLED],
]),
'postgres.create_extension': Mock(side_effect=[
True, True, True,
])}):
with patch.dict(postgres_extension.__opts__, {'test': True}):
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': 'Extension foo is set to be installed',
'changes': {}, 'name': 'foo', 'result': None}
)
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': "Extension foo is set to be created",
'changes': {}, 'name': 'foo', 'result': None}
)
ret = postgres_extension.present('foo')
self.assertEqual(
ret,
{'comment': "Extension foo is set to be upgraded",
'changes': {}, 'name': 'foo', 'result': None}
)
def test_absent(self):
'''
scenario of creating upgrading extensions with possible schema and
version specifications
'''
with patch.dict(postgres_extension.__salt__, {
'postgres.is_installed_extension': Mock(side_effect=[
True, False,
]),
'postgres.drop_extension': Mock(side_effect=[
True, True,
])}):
ret = postgres_extension.absent('foo')
self.assertEqual(
ret,
{'comment': 'Extension foo has been removed',
'changes': {'foo': 'Absent'}, 'name': 'foo', 'result': True}
)
ret = postgres_extension.absent('foo')
self.assertEqual(
ret,
{'comment': (
'Extension foo is not present, '
'so it cannot be removed'),
'changes': {}, 'name': 'foo', 'result': True}
)
def test_absent_failed(self):
'''
scenario of creating upgrading extensions with possible schema and
version specifications
'''
with patch.dict(postgres_extension.__opts__, {'test': False}):
with patch.dict(postgres_extension.__salt__, {
'postgres.is_installed_extension': Mock(side_effect=[
True, True,
]),
'postgres.drop_extension': Mock(side_effect=[
False, False,
])}):
ret = postgres_extension.absent('foo')
self.assertEqual(
ret,
{'comment': 'Extension foo failed to be removed',
'changes': {}, 'name': 'foo', 'result': False}
)
def test_absent_failedtest(self):
with patch.dict(postgres_extension.__salt__, {
'postgres.is_installed_extension': Mock(side_effect=[
True, True,
]),
'postgres.drop_extension': Mock(side_effect=[
False, False,
])}):
with patch.dict(postgres_extension.__opts__, {'test': True}):
ret = postgres_extension.absent('foo')
self.assertEqual(
ret,
{'comment': 'Extension foo is set to be removed',
'changes': {}, 'name': 'foo', 'result': None}
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PostgresSchemaTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
patcher = patch('salt.utils.which', Mock(return_value='/usr/bin/pgsql'))
patcher.start()
self.addCleanup(patcher.stop)
self.salt_stub = {
'config.option': Mock(),
'cmd.run_all': Mock(),
'file.chown': Mock(),
'file.remove': Mock(),
}
self.addCleanup(delattr, self, 'salt_stub')
return {
postgres_database: {},
postgres_user: {},
postgres_extension: {},
postgres_group: {},
postgres_schema: {
'__grains__': {'os_family': 'Linux'},
'__salt__': self.salt_stub,
'__opts__': {'test': False},
}
}
def test_present_creation(self):
with patch.dict(postgres_schema.__salt__, {'postgres.schema_get': Mock(return_value=None),
'postgres.schema_create': MagicMock()}):
ret = postgres_schema.present('dbname', 'foo')
self.assertEqual(
ret,
{'comment': 'Schema foo has been created in database dbname',
'changes': {'foo': 'Present'},
'dbname': 'dbname',
'name': 'foo',
'result': True}
)
self.assertEqual(self.salt_stub['postgres.schema_create'].call_count, 1)
def test_present_nocreation(self):
with patch.dict(postgres_schema.__salt__, {
'postgres.schema_get': Mock(return_value={'foo':
{'acl': '',
'owner': 'postgres'}
}),
'postgres.schema_create': MagicMock()}):
ret = postgres_schema.present('dbname', 'foo')
self.assertEqual(
ret,
{'comment': 'Schema foo already exists in database dbname',
'changes': {},
'dbname': 'dbname',
'name': 'foo',
'result': True}
)
self.assertEqual(self.salt_stub['postgres.schema_create'].call_count, 0)
def test_absent_remove(self):
with patch.dict(postgres_schema.__salt__, {'postgres.schema_exists': Mock(return_value=True),
'postgres.schema_remove': MagicMock()}):
ret = postgres_schema.absent('dbname', 'foo')
self.assertEqual(
ret,
{'comment': 'Schema foo has been removed from database dbname',
'changes': {'foo': 'Absent'},
'dbname': 'dbname',
'name': 'foo',
'result': True}
)
self.assertEqual(self.salt_stub['postgres.schema_remove'].call_count, 1)
def test_absent_noremove(self):
with patch.dict(postgres_schema.__salt__, {'postgres.schema_exists': Mock(return_value=False),
'postgres.schema_remove': MagicMock()}):
ret = postgres_schema.absent('dbname', 'foo')
self.assertEqual(
ret,
{'comment': 'Schema foo is not present in database dbname,'
' so it cannot be removed',
'changes': {},
'dbname': 'dbname',
'name': 'foo',
'result': True}
)
self.assertEqual(self.salt_stub['postgres.schema_remove'].call_count, 0)
| 49.126496
| 102
| 0.40318
|
7950c30ec39845cf18c8282b04dc3fec7067dc18
| 4,525
|
py
|
Python
|
tests/utils.py
|
shagunsodhani/xplogger
|
46107543cee9a6708d9337090b53dc88e2bfd890
|
[
"MIT"
] | 6
|
2021-03-26T22:13:39.000Z
|
2021-09-20T09:47:18.000Z
|
tests/utils.py
|
shagunsodhani/xplogger
|
46107543cee9a6708d9337090b53dc88e2bfd890
|
[
"MIT"
] | 3
|
2021-04-27T23:09:13.000Z
|
2021-05-29T20:31:16.000Z
|
tests/utils.py
|
shagunsodhani/xplogger
|
46107543cee9a6708d9337090b53dc88e2bfd890
|
[
"MIT"
] | null | null | null |
import numpy as np
import xplogger.logbook
from xplogger.types import ConfigType
def make_logbook_config(logger_dir: str) -> ConfigType:
return xplogger.logbook.make_config(
logger_dir=logger_dir,
wandb_config=None,
tensorboard_config=None,
mlflow_config=None,
)
def make_logbook(logger_dir: str) -> xplogger.logbook.LogBook:
logbook = xplogger.logbook.LogBook(config=make_logbook_config(logger_dir))
return logbook
def _get_valid_config_logs():
log1 = [
{
"num_layers": 2,
"lr": 0.01,
"alpha": np.float64(0.2),
"beta": np.float32(0.1),
"gamma": np.int64(1),
"delta": np.int32(10),
},
{"dataset": "mnist", "optim": "adam"},
]
log2 = [{"num_layers": 2, "lr": 0.01}, {"num_layers": 3, "lr": 0.001, "none": None}]
log3 = [{}]
log4 = []
log5 = [{"num_layers": 2, "subconfig": {"num_layers": 3, "lr": 0.001}}]
return [log1, log2, log3, log4, log5]
def _get_invalid_config_logs():
log1 = [["num_layers=2", "lr=0.01"]]
log2 = [None]
log3 = [[]]
return [log1, log2, log3]
def _get_valid_message_logs():
log1 = ["This is a message", "This is another message"]
log2 = [""]
log3 = []
log4 = [
["num_layers=2"],
["lr=0.01"],
[
f"alpha:{np.float64(0.2)}, beta:{np.float32(0.1)}, gamma:{np.int64(1)}, delta: {np.int32(10)}"
],
]
log5 = [{}, [], None]
log6 = [{"num_layers": 2, "lr": 0.01}, {"dataset": "mnist", "optim": "adam"}]
log7 = [
{
"first_message": "a",
"nested_message": {
"lr": 0.01,
"datasets": ["mnist", "cifar"],
"optim": "adam",
},
}
]
return [log1, log2, log3, log4, log5, log6, log7]
def _get_invalid_message_logs():
return [None]
def _get_valid_metric_logs():
log1 = [{"acc": 20.2, "loss": 0.01}, {"acc@1": 10.1, "mode": "train"}]
log2 = [{"acc": 20.2, "loss": 0.01, "acc@1": 10.1, "mode": "train", "none": None}]
log3 = [{}]
log4 = []
log5 = [
{
"unnested_metric": 1,
"nested_metric": {
"metric1": 3,
"metric2": 0.001,
"alpha": np.float64(0.2),
"beta": np.float32(0.1),
"gamma": np.int64(1),
"delta": np.int32(10),
},
}
]
return [log1, log2, log3, log4, log5]
def _get_invalid_metric_logs():
log1 = [["acc=10.1", "mode=train"]]
log2 = [None]
log3 = [[]]
return [log1, log2, log3]
def get_logs(log_type: str = "config", valid: bool = True):
if log_type == "config" or log_type == "metadata":
# both config and metadata have the same type.
if valid:
return _get_valid_config_logs()
return _get_invalid_config_logs()
if log_type == "message":
if valid:
return _get_valid_message_logs()
return _get_invalid_message_logs()
if log_type == "metric":
if valid:
return _get_valid_metric_logs()
return _get_invalid_metric_logs()
def get_logs_and_types(valid: bool = True):
log_types = ["config", "metadata", "metric"]
if not valid:
log_types.append("message")
# generally messages can not be directly written using `logbook.write`
for _type in log_types:
for log in get_logs(log_type=_type, valid=valid):
yield (log, _type)
def get_logs_and_types_for_parser():
logs_and_types = [
({"num_layers": 2, "lr": 0.01}, "config"),
({"dataset": "mnist", "optim": "adam"}, "config"),
(
{
"alpha": np.float64(0.2),
"beta": np.float32(0.1),
"gamma": np.int64(1),
"delta": np.int32(10),
},
"config",
),
({"message": "Starting training."}, "message"),
({"best_acc_so_far": 0.0, "best_lr": 0.01}, "metadata"),
({"acc": 20.2, "loss": 0.01, "mode": "train", "epoch": 1}, "metric"),
({"acc": 40.4, "loss": 0.001, "mode": "train", "epoch": 2}, "metric"),
(
{"acc@1": 21.3, "acc@5": 50.2, "loss": 0.001, "mode": "eval", "epoch": 2},
"metric",
),
({"best_acc_so_far": 21.3, "best_lr": 0.01}, "metadata"),
({"message": "Ending training."}, "message"),
]
return [logs_and_types]
| 29.00641
| 106
| 0.509834
|
7950c32b515c7b8fd08e278330c938546e3a3d10
| 269
|
py
|
Python
|
feder/letters/formsets.py
|
dzemeuksis/feder
|
32ef7793af6256d4ecada61505c7baf334b34419
|
[
"MIT"
] | 16
|
2015-08-11T17:20:26.000Z
|
2022-02-11T20:15:41.000Z
|
feder/letters/formsets.py
|
dzemeuksis/feder
|
32ef7793af6256d4ecada61505c7baf334b34419
|
[
"MIT"
] | 534
|
2015-08-04T00:10:54.000Z
|
2022-03-17T10:44:47.000Z
|
feder/letters/formsets.py
|
dzemeuksis/feder
|
32ef7793af6256d4ecada61505c7baf334b34419
|
[
"MIT"
] | 10
|
2017-08-30T13:34:32.000Z
|
2022-02-18T13:00:35.000Z
|
from atom.ext.crispy_forms.forms import BaseTableFormSet
from extra_views import InlineFormSet
from feder.letters.models import Attachment
class AttachmentInline(InlineFormSet):
model = Attachment
formset_class = BaseTableFormSet
fields = ["attachment"]
| 24.454545
| 56
| 0.802974
|
7950c3a932b54b90f410e26330f6112829c77f1b
| 1,491
|
py
|
Python
|
refinery/units/formats/office/xtdoc.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
refinery/units/formats/office/xtdoc.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
refinery/units/formats/office/xtdoc.py
|
bronxc/refinery
|
9448facf48a0008f27861dd1a5ee8f5218e6bb86
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Optional
from refinery.units.formats import PathExtractorUnit, UnpackResult
from refinery.units.formats.archive.xtzip import xtzip
from refinery.lib.structures import MemoryFile
class xtdoc(PathExtractorUnit):
"""
Extract files from an OLE document such as a Microsoft Word DOCX file.
"""
@PathExtractorUnit.Requires('olefile', optional=False)
def _olefile():
import olefile
return olefile
def unpack(self, data):
with MemoryFile(data) as stream:
try:
oledoc = self._olefile.OleFileIO(stream)
except OSError as error:
self.log_info(F'error, {error}, treating input as zip file')
yield from xtzip().unpack(data)
return
for item in oledoc.listdir():
if not item or not item[-1]:
continue
path = '/'.join(item)
olestream = oledoc.openstream(path)
c0 = ord(item[-1][:1])
if c0 < 20:
item[-1] = F'[{c0:d}]{item[-1][1:]}'
path = '/'.join(item)
self.log_debug('exploring:', path)
yield UnpackResult(path, olestream.read())
@classmethod
def handles(self, data: bytearray) -> Optional[bool]:
if data.startswith(B'\xD0\xCF\x11\xE0'):
return True
return xtzip.handles(data)
| 33.133333
| 76
| 0.564722
|
7950c3b0ed7bd0737bf9402fcc42117531ff16f5
| 4,171
|
py
|
Python
|
scripts/Util.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | 4
|
2017-11-11T18:16:22.000Z
|
2018-11-08T13:31:09.000Z
|
scripts/Util.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | null | null | null |
scripts/Util.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | 2
|
2019-09-04T12:28:58.000Z
|
2021-09-27T13:02:48.000Z
|
"""
This module is used for utility and helper functions used elsewhere in the program.
Functions:
get_pick_height: Returns the height from which to pick the disk.
get_place_height: Returns the height where to place the disk.
index2state: Converts state indexes to tuples.
printGame: Visualizes moves from start to goal state.
game2robot: Converts moves to format readable to robot-moving function.
"""
import sys
import rospy
# Comments beginning with "noinspection" are PyCharm auto-generated comments
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
# noinspection PyUnboundLocalVariable
def user_print(text, style, new_line=True):
"""Print text with given style (color)"""
if style == 'error':
color = RED
elif style == 'info':
color = CYAN
elif style == 'warn':
color = BLUE
elif style == 'input':
color = GREEN
sys.stdout.write(color)
if new_line:
print text
else:
print text,
sys.stdout.write(RESET)
def user_input(text):
"""Get user input with colored prompt"""
color = GREEN
sys.stdout.write(color)
print text, " >> ",
sys.stdout.write(RESET)
return raw_input()
def get_pick_height(disk):
"""Calculate and return the height from which to pick the disk."""
disk_height = rospy.get_param('~disk_height')
return disk * disk_height
def get_place_height(disk):
"""Calculate and return the height where to place the disk."""
disk_height = rospy.get_param('~disk_height')
return disk * disk_height
# noinspection PyUnusedLocal
def index2state(lookup, actions, nStates):
"""
Converts state indexes back to tuples.
Args:
lookup (dict): Dictionary containing state-index pairs
actions (list): List of actions (state indexes) to convert
nStates (int): Number of states
Returns
sequence (list): List of converted tuples
"""
keys = [0 for i in range(nStates)]
for item in lookup.items():
keys[item[1]] = item[0]
sequence = []
for action in actions:
sequence.append(keys[action])
return sequence
def printGame(sequence, height):
"""
Visualize moves from start to goal state.
Args:
sequence (list): List containing tuples representing states
height (int): Height of the rod, equal to the number of disks
"""
move = 0
for state in sequence:
print move, ". potez:"
lista = list(state)
for i in range(3):
for j in range(height - len(state[i])):
lista[i] = ('|',) + lista[i]
for i in range(height):
print (" {0} {1} {2}".format(lista[0][i], lista[1][i], lista[2][i]))
print ("============================")
print ("============================")
print
move += 1
# noinspection PyUnboundLocalVariable
def game2robot(sequence):
"""
Convert moves to format readable to robot-moving function.
Args:
sequence (list): List containing tuples representing states
"""
commands = []
for i in range(len(sequence) - 1):
fromState = sequence[i]
toState = sequence[i + 1]
for j in range(3): # For each rod...
if len(fromState[j]) > len(toState[j]): # ...if there were more disks before...
pick_destination = j # ...this is the rod to pick from
pick_height = len(fromState[j]) # Number of disks BEFORE picking
elif len(fromState[j]) < len(toState[j]): # ... if there are more disks after...
place_destination = j # ...this is the rod to place to
place_height = len(fromState[j]) # Number of disks BEFORE placing
place_height += 1 # We need height AFTER placing so add 1
commands.append((pick_destination, pick_height, place_destination, place_height))
return commands
| 30.896296
| 97
| 0.592184
|
7950c46552a79afa6bd9bd68ed138d1324b84305
| 48,741
|
py
|
Python
|
scvi/model/_totalvi.py
|
Semih-Kurt/scvi-tools
|
1bea2af8cc99e11d55a6925f09d978de5f6994fb
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/model/_totalvi.py
|
Semih-Kurt/scvi-tools
|
1bea2af8cc99e11d55a6925f09d978de5f6994fb
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/model/_totalvi.py
|
Semih-Kurt/scvi-tools
|
1bea2af8cc99e11d55a6925f09d978de5f6994fb
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import warnings
from collections.abc import Iterable as IterableClass
from functools import partial
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, TypeVar, Union
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi._utils import _doc_params
from scvi.data._utils import _check_nonnegative_integers
from scvi.data.anndata import AnnDataManager
from scvi.data.anndata.fields import (
CategoricalJointObsField,
CategoricalObsField,
LayerField,
NumericalJointObsField,
ProteinObsmField,
)
from scvi.dataloaders import DataSplitter
from scvi.model._utils import (
_get_batch_code_from_category,
_init_library_size,
cite_seq_raw_counts_properties,
)
from scvi.model.base._utils import _de_core
from scvi.module import TOTALVAE
from scvi.train import AdversarialTrainingPlan, TrainRunner
from scvi.utils._docstrings import doc_differential_expression, setup_anndata_dsp
from .base import ArchesMixin, BaseModelClass, RNASeqMixin, VAEMixin
logger = logging.getLogger(__name__)
Number = TypeVar("Number", int, float)
class TOTALVI(RNASeqMixin, VAEMixin, ArchesMixin, BaseModelClass):
"""
total Variational Inference [GayosoSteier21]_.
Parameters
----------
adata
AnnData object that has been registered via :meth:`~scvi.model.TOTALVI.setup_anndata`.
n_latent
Dimensionality of the latent space.
gene_dispersion
One of the following:
* ``'gene'`` - genes_dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - genes_dispersion can differ between different batches
* ``'gene-label'`` - genes_dispersion can differ between different labels
protein_dispersion
One of the following:
* ``'protein'`` - protein_dispersion parameter is constant per protein across cells
* ``'protein-batch'`` - protein_dispersion can differ between different batches NOT TESTED
* ``'protein-label'`` - protein_dispersion can differ between different labels NOT TESTED
gene_likelihood
One of:
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
latent_distribution
One of:
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)
empirical_protein_background_prior
Set the initialization of protein background prior empirically. This option fits a GMM for each of
100 cells per batch and averages the distributions. Note that even with this option set to `True`,
this only initializes a parameter that is learned during inference. If `False`, randomly initializes.
The default (`None`), sets this to `True` if greater than 10 proteins are used.
override_missing_proteins
If `True`, will not treat proteins with all 0 expression in a particular batch as missing.
**model_kwargs
Keyword args for :class:`~scvi.module.TOTALVAE`
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> scvi.model.TOTALVI.setup_anndata(adata, batch_key="batch", protein_expression_obsm_key="protein_expression")
>>> vae = scvi.model.TOTALVI(adata)
>>> vae.train()
>>> adata.obsm["X_totalVI"] = vae.get_latent_representation()
Notes
-----
See further usage examples in the following tutorials:
1. :doc:`/tutorials/notebooks/totalVI`
2. :doc:`/tutorials/notebooks/cite_scrna_integration_w_totalVI`
3. :doc:`/tutorials/notebooks/scarches_scvi_tools`
"""
def __init__(
self,
adata: AnnData,
n_latent: int = 20,
gene_dispersion: Literal[
"gene", "gene-batch", "gene-label", "gene-cell"
] = "gene",
protein_dispersion: Literal[
"protein", "protein-batch", "protein-label"
] = "protein",
gene_likelihood: Literal["zinb", "nb"] = "nb",
latent_distribution: Literal["normal", "ln"] = "normal",
empirical_protein_background_prior: Optional[bool] = None,
override_missing_proteins: bool = False,
**model_kwargs,
):
super(TOTALVI, self).__init__(adata)
self.protein_state_registry = self.adata_manager.get_state_registry(
REGISTRY_KEYS.PROTEIN_EXP_KEY
)
if (
ProteinObsmField.PROTEIN_BATCH_MASK in self.protein_state_registry
and not override_missing_proteins
):
batch_mask = self.protein_state_registry.protein_batch_mask
msg = (
"Some proteins have all 0 counts in some batches. "
+ "These proteins will be treated as missing measurements; however, "
+ "this can occur due to experimental design/biology. "
+ "Reinitialize the model with `override_missing_proteins=True`,"
+ "to override this behavior."
)
warnings.warn(msg, UserWarning)
self._use_adversarial_classifier = True
else:
batch_mask = None
self._use_adversarial_classifier = False
emp_prior = (
empirical_protein_background_prior
if empirical_protein_background_prior is not None
else (self.summary_stats.n_proteins > 10)
)
if emp_prior:
prior_mean, prior_scale = self._get_totalvi_protein_priors(adata)
else:
prior_mean, prior_scale = None, None
n_cats_per_cov = (
self.adata_manager.get_state_registry(REGISTRY_KEYS.CAT_COVS_KEY)[
CategoricalJointObsField.N_CATS_PER_KEY
]
if REGISTRY_KEYS.CAT_COVS_KEY in self.adata_manager.data_registry
else None
)
n_batch = self.summary_stats.n_batch
library_log_means, library_log_vars = _init_library_size(
self.adata_manager, n_batch
)
self.module = TOTALVAE(
n_input_genes=self.summary_stats.n_vars,
n_input_proteins=self.summary_stats.n_proteins,
n_batch=n_batch,
n_latent=n_latent,
n_continuous_cov=self.summary_stats.get("n_extra_continuous_covs", 0),
n_cats_per_cov=n_cats_per_cov,
gene_dispersion=gene_dispersion,
protein_dispersion=protein_dispersion,
gene_likelihood=gene_likelihood,
latent_distribution=latent_distribution,
protein_batch_mask=batch_mask,
protein_background_prior_mean=prior_mean,
protein_background_prior_scale=prior_scale,
library_log_means=library_log_means,
library_log_vars=library_log_vars,
**model_kwargs,
)
self._model_summary_string = (
"TotalVI Model with the following params: \nn_latent: {}, "
"gene_dispersion: {}, protein_dispersion: {}, gene_likelihood: {}, latent_distribution: {}"
).format(
n_latent,
gene_dispersion,
protein_dispersion,
gene_likelihood,
latent_distribution,
)
self.init_params_ = self._get_init_params(locals())
def train(
self,
max_epochs: Optional[int] = 400,
lr: float = 4e-3,
use_gpu: Optional[Union[str, int, bool]] = None,
train_size: float = 0.9,
validation_size: Optional[float] = None,
batch_size: int = 256,
early_stopping: bool = True,
check_val_every_n_epoch: Optional[int] = None,
reduce_lr_on_plateau: bool = True,
n_steps_kl_warmup: Union[int, None] = None,
n_epochs_kl_warmup: Union[int, None] = None,
adversarial_classifier: Optional[bool] = None,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using amortized variational inference.
Parameters
----------
max_epochs
Number of passes through the dataset.
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
train_size
Size of training set in the range [0.0, 1.0].
validation_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + validation_size < 1`, the remaining cells belong to a test set.
batch_size
Minibatch size to use during training.
early_stopping
Whether to perform early stopping with respect to the validation set.
check_val_every_n_epoch
Check val every n train epochs. By default, val is not checked, unless `early_stopping` is `True`
or `reduce_lr_on_plateau` is `True`. If either of the latter conditions are met, val is checked
every epoch.
reduce_lr_on_plateau
Reduce learning rate on plateau of validation metric (default is ELBO).
n_steps_kl_warmup
Number of training steps (minibatches) to scale weight on KL divergences from 0 to 1.
Only activated when `n_epochs_kl_warmup` is set to None. If `None`, defaults
to `floor(0.75 * adata.n_obs)`.
n_epochs_kl_warmup
Number of epochs to scale weight on KL divergences from 0 to 1.
Overrides `n_steps_kl_warmup` when both are not `None`.
adversarial_classifier
Whether to use adversarial classifier in the latent space. This helps mixing when
there are missing proteins in any of the batches. Defaults to `True` is missing proteins
are detected.
plan_kwargs
Keyword args for :class:`~scvi.train.AdversarialTrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
if adversarial_classifier is None:
adversarial_classifier = self._use_adversarial_classifier
n_steps_kl_warmup = (
n_steps_kl_warmup
if n_steps_kl_warmup is not None
else int(0.75 * self.adata.n_obs)
)
if reduce_lr_on_plateau:
check_val_every_n_epoch = 1
update_dict = {
"lr": lr,
"adversarial_classifier": adversarial_classifier,
"reduce_lr_on_plateau": reduce_lr_on_plateau,
"n_epochs_kl_warmup": n_epochs_kl_warmup,
"n_steps_kl_warmup": n_steps_kl_warmup,
"check_val_every_n_epoch": check_val_every_n_epoch,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
if max_epochs is None:
n_cells = self.adata.n_obs
max_epochs = np.min([round((20000 / n_cells) * 400), 400])
plan_kwargs = plan_kwargs if isinstance(plan_kwargs, dict) else dict()
data_splitter = DataSplitter(
self.adata_manager,
train_size=train_size,
validation_size=validation_size,
batch_size=batch_size,
use_gpu=use_gpu,
)
training_plan = AdversarialTrainingPlan(self.module, **plan_kwargs)
runner = TrainRunner(
self,
training_plan=training_plan,
data_splitter=data_splitter,
max_epochs=max_epochs,
use_gpu=use_gpu,
early_stopping=early_stopping,
**kwargs,
)
return runner()
@torch.no_grad()
def get_latent_library_size(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
give_mean: bool = True,
batch_size: Optional[int] = None,
) -> np.ndarray:
r"""
Returns the latent library size for each cell.
This is denoted as :math:`\ell_n` in the totalVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
give_mean
Return the mean or a sample from the posterior distribution.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
self._check_if_trained(warn=False)
adata = self._validate_anndata(adata)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
libraries = []
for tensors in post:
inference_inputs = self.module._get_inference_input(tensors)
outputs = self.module.inference(**inference_inputs)
if give_mean:
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
library = torch.exp(ql_m + 0.5 * ql_v)
else:
library = outputs["library_gene"]
libraries += [library.cpu()]
return torch.cat(libraries).numpy()
@torch.no_grad()
def get_normalized_expression(
self,
adata=None,
indices=None,
n_samples_overall: Optional[int] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
gene_list: Optional[Sequence[str]] = None,
protein_list: Optional[Sequence[str]] = None,
library_size: Optional[Union[float, Literal["latent"]]] = 1,
n_samples: int = 1,
sample_protein_mixing: bool = False,
scale_protein: bool = False,
include_protein_background: bool = False,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
) -> Tuple[Union[np.ndarray, pd.DataFrame], Union[np.ndarray, pd.DataFrame]]:
r"""
Returns the normalized gene expression and protein expression.
This is denoted as :math:`\rho_n` in the totalVI paper for genes, and TODO
for proteins, :math:`(1-\pi_{nt})\alpha_{nt}\beta_{nt}`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples_overall
Number of samples to use in total
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- List[int], then average over batches in list
gene_list
Return frequencies of expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
protein_list
Return protein expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
library_size
Scale the expression frequencies to a common library size.
This allows gene expression levels to be interpreted on a common scale of relevant
magnitude.
n_samples
Get sample scale from multiple samples.
sample_protein_mixing
Sample mixing bernoulli, setting background to zero
scale_protein
Make protein expression sum to 1
include_protein_background
Include background component for protein expression
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a `np.ndarray` instead of a `pd.DataFrame`. Includes gene
names as columns. If either n_samples=1 or return_mean=True, defaults to False.
Otherwise, it defaults to True.
Returns
-------
- **gene_normalized_expression** - normalized expression for RNA
- **protein_normalized_expression** - normalized expression for proteins
If ``n_samples`` > 1 and ``return_mean`` is False, then the shape is ``(samples, cells, genes)``.
Otherwise, shape is ``(cells, genes)``. Return type is ``pd.DataFrame`` unless ``return_numpy`` is True.
"""
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata)
if indices is None:
indices = np.arange(adata.n_obs)
if n_samples_overall is not None:
indices = np.random.choice(indices, n_samples_overall)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = adata.var_names
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
if indices is None:
indices = np.arange(adata.n_obs)
if n_samples > 1 and return_mean is False:
if return_numpy is False:
warnings.warn(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(adata_manager, transform_batch)
scale_list_gene = []
scale_list_pro = []
for tensors in post:
x = tensors[REGISTRY_KEYS.X_KEY]
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
px_scale = torch.zeros_like(x)
py_scale = torch.zeros_like(y)
if n_samples > 1:
px_scale = torch.stack(n_samples * [px_scale])
py_scale = torch.stack(n_samples * [py_scale])
for b in transform_batch:
generative_kwargs = dict(transform_batch=b)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
if library_size == "latent":
px_scale += generative_outputs["px_"]["rate"].cpu()
else:
px_scale += generative_outputs["px_"]["scale"].cpu()
px_scale = px_scale[..., gene_mask]
py_ = generative_outputs["py_"]
# probability of background
protein_mixing = 1 / (1 + torch.exp(-py_["mixing"].cpu()))
if sample_protein_mixing is True:
protein_mixing = torch.distributions.Bernoulli(
protein_mixing
).sample()
protein_val = py_["rate_fore"].cpu() * (1 - protein_mixing)
if include_protein_background is True:
protein_val += py_["rate_back"].cpu() * protein_mixing
if scale_protein is True:
protein_val = torch.nn.functional.normalize(
protein_val, p=1, dim=-1
)
protein_val = protein_val[..., protein_mask]
py_scale += protein_val
px_scale /= len(transform_batch)
py_scale /= len(transform_batch)
scale_list_gene.append(px_scale)
scale_list_pro.append(py_scale)
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
scale_list_gene = torch.cat(scale_list_gene, dim=1)
scale_list_pro = torch.cat(scale_list_pro, dim=1)
# (cells, features, samples)
scale_list_gene = scale_list_gene.permute(1, 2, 0)
scale_list_pro = scale_list_pro.permute(1, 2, 0)
else:
scale_list_gene = torch.cat(scale_list_gene, dim=0)
scale_list_pro = torch.cat(scale_list_pro, dim=0)
if return_mean is True and n_samples > 1:
scale_list_gene = torch.mean(scale_list_gene, dim=-1)
scale_list_pro = torch.mean(scale_list_pro, dim=-1)
scale_list_gene = scale_list_gene.cpu().numpy()
scale_list_pro = scale_list_pro.cpu().numpy()
if return_numpy is None or return_numpy is False:
gene_df = pd.DataFrame(
scale_list_gene,
columns=adata.var_names[gene_mask],
index=adata.obs_names[indices],
)
protein_names = self.protein_state_registry.column_names
pro_df = pd.DataFrame(
scale_list_pro,
columns=protein_names[protein_mask],
index=adata.obs_names[indices],
)
return gene_df, pro_df
else:
return scale_list_gene, scale_list_pro
@torch.no_grad()
def get_protein_foreground_probability(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
protein_list: Optional[Sequence[str]] = None,
n_samples: int = 1,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
):
r"""
Returns the foreground probability for proteins.
This is denoted as :math:`(1 - \pi_{nt})` in the totalVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- List[int], then average over batches in list
protein_list
Return protein expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a :class:`~numpy.ndarray` instead of a :class:`~pandas.DataFrame`. DataFrame includes
gene names as columns. If either `n_samples=1` or `return_mean=True`, defaults to `False`.
Otherwise, it defaults to `True`.
Returns
-------
- **foreground_probability** - probability foreground for each protein
If `n_samples` > 1 and `return_mean` is False, then the shape is `(samples, cells, genes)`.
Otherwise, shape is `(cells, genes)`. In this case, return type is :class:`~pandas.DataFrame` unless `return_numpy` is True.
"""
adata = self._validate_anndata(adata)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
if n_samples > 1 and return_mean is False:
if return_numpy is False:
warnings.warn(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if indices is None:
indices = np.arange(adata.n_obs)
py_mixings = []
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(
self.adata_manager, transform_batch
)
for tensors in post:
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
py_mixing = torch.zeros_like(y[..., protein_mask])
if n_samples > 1:
py_mixing = torch.stack(n_samples * [py_mixing])
for b in transform_batch:
generative_kwargs = dict(transform_batch=b)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
py_mixing += torch.sigmoid(generative_outputs["py_"]["mixing"])[
..., protein_mask
].cpu()
py_mixing /= len(transform_batch)
py_mixings += [py_mixing]
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
py_mixings = torch.cat(py_mixings, dim=1)
# (cells, features, samples)
py_mixings = py_mixings.permute(1, 2, 0)
else:
py_mixings = torch.cat(py_mixings, dim=0)
if return_mean is True and n_samples > 1:
py_mixings = torch.mean(py_mixings, dim=-1)
py_mixings = py_mixings.cpu().numpy()
if return_numpy is True:
return 1 - py_mixings
else:
pro_names = self.protein_state_registry.column_names
foreground_prob = pd.DataFrame(
1 - py_mixings,
columns=pro_names[protein_mask],
index=adata.obs_names[indices],
)
return foreground_prob
def _expression_for_de(
self,
adata=None,
indices=None,
n_samples_overall=None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
scale_protein=False,
batch_size: Optional[int] = None,
sample_protein_mixing=False,
include_protein_background=False,
protein_prior_count=0.5,
):
rna, protein = self.get_normalized_expression(
adata=adata,
indices=indices,
n_samples_overall=n_samples_overall,
transform_batch=transform_batch,
return_numpy=True,
n_samples=1,
batch_size=batch_size,
scale_protein=scale_protein,
sample_protein_mixing=sample_protein_mixing,
include_protein_background=include_protein_background,
)
protein += protein_prior_count
joint = np.concatenate([rna, protein], axis=1)
return joint
@_doc_params(
doc_differential_expression=doc_differential_expression,
)
def differential_expression(
self,
adata: Optional[AnnData] = None,
groupby: Optional[str] = None,
group1: Optional[Iterable[str]] = None,
group2: Optional[str] = None,
idx1: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
idx2: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
mode: Literal["vanilla", "change"] = "change",
delta: float = 0.25,
batch_size: Optional[int] = None,
all_stats: bool = True,
batch_correction: bool = False,
batchid1: Optional[Iterable[str]] = None,
batchid2: Optional[Iterable[str]] = None,
fdr_target: float = 0.05,
silent: bool = False,
protein_prior_count: float = 0.1,
scale_protein: bool = False,
sample_protein_mixing: bool = False,
include_protein_background: bool = False,
**kwargs,
) -> pd.DataFrame:
r"""
A unified method for differential expression analysis.
Implements `"vanilla"` DE [Lopez18]_ and `"change"` mode DE [Boyeau19]_.
Parameters
----------
{doc_differential_expression}
protein_prior_count
Prior count added to protein expression before LFC computation
scale_protein
Force protein values to sum to one in every single cell (post-hoc normalization)
sample_protein_mixing
Sample the protein mixture component, i.e., use the parameter to sample a Bernoulli
that determines if expression is from foreground/background.
include_protein_background
Include the protein background component as part of the protein expression
**kwargs
Keyword args for :meth:`scvi.model.base.DifferentialComputation.get_bayes_factors`
Returns
-------
Differential expression DataFrame.
"""
adata = self._validate_anndata(adata)
model_fn = partial(
self._expression_for_de,
scale_protein=scale_protein,
sample_protein_mixing=sample_protein_mixing,
include_protein_background=include_protein_background,
protein_prior_count=protein_prior_count,
batch_size=batch_size,
)
col_names = np.concatenate(
[
np.asarray(adata.var_names),
self.protein_state_registry.column_names,
]
)
result = _de_core(
self.get_anndata_manager(adata, required=True),
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
cite_seq_raw_counts_properties,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
fdr_target,
silent,
**kwargs,
)
return result
@torch.no_grad()
def posterior_predictive_sample(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 1,
batch_size: Optional[int] = None,
gene_list: Optional[Sequence[str]] = None,
protein_list: Optional[Sequence[str]] = None,
) -> np.ndarray:
r"""
Generate observation samples from the posterior predictive distribution.
The posterior predictive distribution is written as :math:`p(\hat{x}, \hat{y} \mid x, y)`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of required samples for each cell
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
gene_list
Names of genes of interest
protein_list
Names of proteins of interest
Returns
-------
x_new : :class:`~numpy.ndarray`
tensor with shape (n_cells, n_genes, n_samples)
"""
if self.module.gene_likelihood not in ["nb"]:
raise ValueError("Invalid gene_likelihood")
adata = self._validate_anndata(adata)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = adata.var_names
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
scdl_list = []
for tensors in scdl:
rna_sample, protein_sample = self.module.sample(
tensors, n_samples=n_samples
)
rna_sample = rna_sample[..., gene_mask]
protein_sample = protein_sample[..., protein_mask]
data = torch.cat([rna_sample, protein_sample], dim=-1).numpy()
scdl_list += [data]
if n_samples > 1:
scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))
scdl_list = np.concatenate(scdl_list, axis=0)
return scdl_list
@torch.no_grad()
def _get_denoised_samples(
self,
adata=None,
indices=None,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[int] = None,
) -> np.ndarray:
"""
Return samples from an adjusted posterior predictive.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
indices of `adata` to use
n_samples
How may samples per cell
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution
transform_batch
int of which batch to condition on for all cells
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
scdl_list = []
for tensors in scdl:
x = tensors[REGISTRY_KEYS.X_KEY]
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
generative_kwargs = dict(transform_batch=transform_batch)
inference_kwargs = dict(n_samples=n_samples)
with torch.no_grad():
inference_outputs, generative_outputs, = self.module.forward(
tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
px_ = generative_outputs["px_"]
py_ = generative_outputs["py_"]
device = px_["r"].device
pi = 1 / (1 + torch.exp(-py_["mixing"]))
mixing_sample = torch.distributions.Bernoulli(pi).sample()
protein_rate = py_["rate_fore"]
rate = torch.cat((rna_size_factor * px_["scale"], protein_rate), dim=-1)
if len(px_["r"].size()) == 2:
px_dispersion = px_["r"]
else:
px_dispersion = torch.ones_like(x).to(device) * px_["r"]
if len(py_["r"].size()) == 2:
py_dispersion = py_["r"]
else:
py_dispersion = torch.ones_like(y).to(device) * py_["r"]
dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + dispersion)
r = dispersion
l_train = torch.distributions.Gamma(r, (1 - p) / p).sample()
data = l_train.cpu().numpy()
# make background 0
data[:, :, self.adata.shape[1] :] = (
data[:, :, self.adata.shape[1] :] * (1 - mixing_sample).cpu().numpy()
)
scdl_list += [data]
scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))
return np.concatenate(scdl_list, axis=0)
@torch.no_grad()
def get_feature_correlation_matrix(
self,
adata=None,
indices=None,
n_samples: int = 10,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
correlation_type: Literal["spearman", "pearson"] = "spearman",
log_transform: bool = False,
) -> pd.DataFrame:
"""
Generate gene-gene correlation matrix using scvi uncertainty and expression.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution
transform_batch
Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
correlation_type
One of "pearson", "spearman".
log_transform
Whether to log transform denoised values prior to correlation calculation.
Returns
-------
Gene-protein-gene-protein correlation matrix
"""
from scipy.stats import spearmanr
adata = self._validate_anndata(adata)
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(
self.get_anndata_manager(adata, required=True), transform_batch
)
corr_mats = []
for b in transform_batch:
denoised_data = self._get_denoised_samples(
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if log_transform is True:
flattened[:, : self.n_genes] = np.log(
flattened[:, : self.n_genes] + 1e-8
)
flattened[:, self.n_genes :] = np.log1p(flattened[:, self.n_genes :])
if correlation_type == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
else:
corr_matrix, _ = spearmanr(flattened, axis=0)
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
var_names = adata.var_names
names = np.concatenate(
[
np.asarray(var_names),
self.protein_state_registry.column_names,
]
)
return pd.DataFrame(corr_matrix, index=names, columns=names)
@torch.no_grad()
def get_likelihood_parameters(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: Optional[int] = 1,
give_mean: Optional[bool] = False,
batch_size: Optional[int] = None,
) -> Dict[str, np.ndarray]:
r"""
Estimates for the parameters of the likelihood :math:`p(x, y \mid z)`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
give_mean
Return expected value of parameters or a samples
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
raise NotImplementedError
def _validate_anndata(
self, adata: Optional[AnnData] = None, copy_if_view: bool = True
):
adata = super()._validate_anndata(adata=adata, copy_if_view=copy_if_view)
error_msg = "Number of {} in anndata different from when setup_anndata was run. Please rerun setup_anndata."
if REGISTRY_KEYS.PROTEIN_EXP_KEY in self.adata_manager.data_registry.keys():
pro_exp = self.get_from_registry(adata, REGISTRY_KEYS.PROTEIN_EXP_KEY)
if self.summary_stats.n_proteins != pro_exp.shape[1]:
raise ValueError(error_msg.format("proteins"))
is_nonneg_int = _check_nonnegative_integers(pro_exp)
if not is_nonneg_int:
warnings.warn(
"Make sure the registered protein expression in anndata contains unnormalized count data."
)
else:
raise ValueError("No protein data found, please setup or transfer anndata")
return adata
def _get_totalvi_protein_priors(self, adata, n_cells=100):
"""Compute an empirical prior for protein background."""
import warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.mixture import GaussianMixture
warnings.filterwarnings("error")
logger.info("Computing empirical prior initialization for protein background.")
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata)
pro_exp = adata_manager.get_from_registry(REGISTRY_KEYS.PROTEIN_EXP_KEY)
pro_exp = pro_exp.to_numpy() if isinstance(pro_exp, pd.DataFrame) else pro_exp
batch_mask = adata_manager.get_state_registry(
REGISTRY_KEYS.PROTEIN_EXP_KEY
).get(ProteinObsmField.PROTEIN_BATCH_MASK)
batch = adata_manager.get_from_registry(REGISTRY_KEYS.BATCH_KEY).ravel()
cats = adata_manager.get_state_registry(REGISTRY_KEYS.BATCH_KEY)[
CategoricalObsField.CATEGORICAL_MAPPING_KEY
]
codes = np.arange(len(cats))
batch_avg_mus, batch_avg_scales = [], []
for b in np.unique(codes):
# can happen during online updates
# the values of these batches will not be used
num_in_batch = np.sum(batch == b)
if num_in_batch == 0:
batch_avg_mus.append(0)
batch_avg_scales.append(1)
continue
batch_pro_exp = pro_exp[batch == b]
# non missing
if batch_mask is not None:
batch_pro_exp = batch_pro_exp[:, batch_mask[b]]
if batch_pro_exp.shape[1] < 5:
logger.debug(
f"Batch {b} has too few proteins to set prior, setting randomly."
)
batch_avg_mus.append(0.0)
batch_avg_scales.append(0.05)
continue
# a batch is missing because it's in the reference but not query data
# for scarches case, these values will be replaced by original state dict
if batch_pro_exp.shape[0] == 0:
batch_avg_mus.append(0.0)
batch_avg_scales.append(0.05)
continue
cells = np.random.choice(np.arange(batch_pro_exp.shape[0]), size=n_cells)
batch_pro_exp = batch_pro_exp[cells]
gmm = GaussianMixture(n_components=2)
mus, scales = [], []
# fit per cell GMM
for c in batch_pro_exp:
try:
gmm.fit(np.log1p(c.reshape(-1, 1)))
# when cell is all 0
except ConvergenceWarning:
mus.append(0)
scales.append(0.05)
continue
means = gmm.means_.ravel()
sorted_fg_bg = np.argsort(means)
mu = means[sorted_fg_bg].ravel()[0]
covariances = gmm.covariances_[sorted_fg_bg].ravel()[0]
scale = np.sqrt(covariances)
mus.append(mu)
scales.append(scale)
# average distribution over cells
batch_avg_mu = np.mean(mus)
batch_avg_scale = np.sqrt(np.sum(np.square(scales)) / (n_cells**2))
batch_avg_mus.append(batch_avg_mu)
batch_avg_scales.append(batch_avg_scale)
# repeat prior for each protein
batch_avg_mus = np.array(batch_avg_mus, dtype=np.float32).reshape(1, -1)
batch_avg_scales = np.array(batch_avg_scales, dtype=np.float32).reshape(1, -1)
batch_avg_mus = np.tile(batch_avg_mus, (pro_exp.shape[1], 1))
batch_avg_scales = np.tile(batch_avg_scales, (pro_exp.shape[1], 1))
warnings.resetwarnings()
return batch_avg_mus, batch_avg_scales
@torch.no_grad()
def get_protein_background_mean(self, adata, indices, batch_size):
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
background_mean = []
for tensors in scdl:
_, inference_outputs, _ = self.module.forward(tensors)
b_mean = inference_outputs["py_"]["rate_back"]
background_mean += [b_mean.cpu().numpy()]
return np.concatenate(background_mean)
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
protein_expression_obsm_key: str,
protein_names_uns_key: Optional[str] = None,
batch_key: Optional[str] = None,
layer: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
**kwargs,
) -> Optional[AnnData]:
"""
%(summary)s.
Parameters
----------
%(param_adata)s
protein_expression_obsm_key
key in `adata.obsm` for protein expression data.
protein_names_uns_key
key in `adata.uns` for protein names. If None, will use the column names of `adata.obsm[protein_expression_obsm_key]`
if it is a DataFrame, else will assign sequential names to proteins.
%(param_batch_key)s
%(param_layer)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
%(param_copy)s
Returns
-------
%(returns)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
batch_field = CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key)
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
CategoricalObsField(
REGISTRY_KEYS.LABELS_KEY, None
), # Default labels field for compatibility with TOTALVAE
batch_field,
CategoricalJointObsField(
REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys
),
NumericalJointObsField(
REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys
),
ProteinObsmField(
REGISTRY_KEYS.PROTEIN_EXP_KEY,
protein_expression_obsm_key,
use_batch_mask=True,
batch_key=batch_field.attr_key,
colnames_uns_key=protein_names_uns_key,
is_count_data=True,
),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
| 39.466397
| 132
| 0.603311
|
7950c4f957285b61671a169327c06f57bf87a302
| 1,086
|
py
|
Python
|
scripts/migrate_promed_ids.py
|
ecohealthalliance/portfolio-manager
|
dae8e9354a2dec80c5f2c4b9d85a7fa7d192a51f
|
[
"Apache-2.0"
] | null | null | null |
scripts/migrate_promed_ids.py
|
ecohealthalliance/portfolio-manager
|
dae8e9354a2dec80c5f2c4b9d85a7fa7d192a51f
|
[
"Apache-2.0"
] | null | null | null |
scripts/migrate_promed_ids.py
|
ecohealthalliance/portfolio-manager
|
dae8e9354a2dec80c5f2c4b9d85a7fa7d192a51f
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import pymongo
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-port', help='mongodb port', default=27017)
parser.add_argument('-db', help='mongodb db', default='meteor')
args = parser.parse_args()
db = pymongo.Connection('localhost', int(args.port))[args.db]
portfolios = db.portfolios
resources = db.resources
for portfolio in portfolios.find():
portfolioId = portfolio.get('_id')
promedResources = portfolio.get('resources')
resourceIds = []
for promedId in promedResources:
resource = resources.find_one({'promedId': promedId})
if resource:
resourceId = resource.get('_id')
resourceIds.append(resourceId)
if len(resourceIds) is len(promedResources):
portfolios.update({'_id': portfolioId}, {'$set': {'resources': resourceIds}})
for resource in resources.find():
resourceId = resource.get('_id')
resources.update({'_id': resourceId}, {'$set': {'source': 'promed'}})
| 35.032258
| 89
| 0.633517
|
7950c55ee4b348f8adf2913952b20caf3497f38d
| 6,747
|
py
|
Python
|
tools/train_utils/train_utils.py
|
Bilal-A-Qureshi/OpenPCDet
|
633c6026e56fc3fb2112f2a9f7ce08a21619e78f
|
[
"Apache-2.0"
] | null | null | null |
tools/train_utils/train_utils.py
|
Bilal-A-Qureshi/OpenPCDet
|
633c6026e56fc3fb2112f2a9f7ce08a21619e78f
|
[
"Apache-2.0"
] | null | null | null |
tools/train_utils/train_utils.py
|
Bilal-A-Qureshi/OpenPCDet
|
633c6026e56fc3fb2112f2a9f7ce08a21619e78f
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
def train_one_epoch(cur_epoch,model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, cur_epoch)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(cur_epoch,
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| 39
| 132
| 0.641915
|
7950c56b3b62693f974cbcc5ab8686f30fa42cbe
| 10,884
|
py
|
Python
|
loopy/codegen/result.py
|
benSepanski/loopy
|
5db582d579eb65ce58b93e2c53feb1d48404cf2d
|
[
"MIT"
] | null | null | null |
loopy/codegen/result.py
|
benSepanski/loopy
|
5db582d579eb65ce58b93e2c53feb1d48404cf2d
|
[
"MIT"
] | null | null | null |
loopy/codegen/result.py
|
benSepanski/loopy
|
5db582d579eb65ce58b93e2c53feb1d48404cf2d
|
[
"MIT"
] | null | null | null |
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2016 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
from pytools import ImmutableRecord
def process_preambles(preambles):
seen_preamble_tags = set()
dedup_preambles = []
for tag, preamble in sorted(preambles, key=lambda tag_code: tag_code[0]):
if tag in seen_preamble_tags:
continue
seen_preamble_tags.add(tag)
dedup_preambles.append(preamble)
from loopy.tools import remove_common_indentation
return [
remove_common_indentation(lines) + "\n"
for lines in dedup_preambles]
# {{{ code generation result
class GeneratedProgram(ImmutableRecord):
"""
.. attribute:: name
.. attribute:: is_device_program
.. attribute:: ast
Once generated, this captures the AST of the overall function
definition, including the body.
.. attribute:: body_ast
Once generated, this captures the AST of the operative function
body (including declaration of necessary temporaries), but not
the overall function definition.
"""
class CodeGenerationResult(ImmutableRecord):
"""
.. attribute:: host_program
.. attribute:: device_programs
A list of :class:`GeneratedProgram` instances
intended to run on the compute device.
.. attribute:: implemented_domains
A mapping from instruction ID to a list of :class:`islpy.Set`
objects.
.. attribute:: host_preambles
.. attribute:: device_preambles
.. automethod:: host_code
.. automethod:: device_code
.. automethod:: all_code
.. attribute:: implemented_data_info
a list of :class:`loopy.codegen.ImplementedDataInfo` objects.
Only added at the very end of code generation.
"""
@staticmethod
def new(codegen_state, insn_id, ast, implemented_domain):
prg = GeneratedProgram(
name=codegen_state.gen_program_name,
is_device_program=codegen_state.is_generating_device_code,
ast=ast)
if codegen_state.is_generating_device_code:
kwargs = {
"host_program": None,
"device_programs": [prg],
}
else:
kwargs = {
"host_program": prg,
"device_programs": [],
}
return CodeGenerationResult(
implemented_data_info=codegen_state.implemented_data_info,
implemented_domains={insn_id: [implemented_domain]},
**kwargs)
def host_code(self):
preamble_codes = process_preambles(getattr(self, "host_preambles", []))
return (
"".join(preamble_codes)
+
str(self.host_program.ast))
def device_code(self):
preamble_codes = process_preambles(getattr(self, "device_preambles", []))
return (
"".join(preamble_codes)
+ "\n"
+ "\n\n".join(str(dp.ast) for dp in self.device_programs))
def all_code(self):
preamble_codes = process_preambles(
getattr(self, "host_preambles", [])
+
list(getattr(self, "device_preambles", []))
)
return (
"".join(preamble_codes)
+ "\n"
+ "\n\n".join(str(dp.ast) for dp in self.device_programs)
+ "\n\n"
+ str(self.host_program.ast))
def current_program(self, codegen_state):
if codegen_state.is_generating_device_code:
if self.device_programs:
result = self.device_programs[-1]
else:
result = None
else:
result = self.host_program
if result is None:
ast = codegen_state.ast_builder.ast_block_class([])
result = GeneratedProgram(
name=codegen_state.gen_program_name,
is_device_program=codegen_state.is_generating_device_code,
ast=ast)
assert result.name == codegen_state.gen_program_name
return result
def with_new_program(self, codegen_state, program):
if codegen_state.is_generating_device_code:
assert program.name == codegen_state.gen_program_name
assert program.is_device_program
return self.copy(
device_programs=(
self.device_programs[:-1]
+
[program]))
else:
assert program.name == codegen_state.gen_program_name
assert not program.is_device_program
return self.copy(host_program=program)
def current_ast(self, codegen_state):
return self.current_program(codegen_state).ast
def with_new_ast(self, codegen_state, new_ast):
return self.with_new_program(
codegen_state,
self.current_program(codegen_state).copy(
ast=new_ast))
# }}}
# {{{ support code for AST merging
def merge_codegen_results(codegen_state, elements, collapse=True):
elements = [el for el in elements if el is not None]
if not elements:
return CodeGenerationResult(
host_program=None,
device_programs=[],
implemented_domains={},
implemented_data_info=codegen_state.implemented_data_info)
ast_els = []
new_device_programs = []
dev_program_names = set()
implemented_domains = {}
codegen_result = None
block_cls = codegen_state.ast_builder.ast_block_class
block_scope_cls = codegen_state.ast_builder.ast_block_scope_class
for el in elements:
if isinstance(el, CodeGenerationResult):
if codegen_result is None:
codegen_result = el
else:
assert (
el.current_program(codegen_state).name
== codegen_result.current_program(codegen_state).name)
for insn_id, idoms in six.iteritems(el.implemented_domains):
implemented_domains.setdefault(insn_id, []).extend(idoms)
if not codegen_state.is_generating_device_code:
for dp in el.device_programs:
if dp.name not in dev_program_names:
new_device_programs.append(dp)
dev_program_names.add(dp.name)
cur_ast = el.current_ast(codegen_state)
if (isinstance(cur_ast, block_cls)
and not isinstance(cur_ast, block_scope_cls)):
ast_els.extend(cur_ast.contents)
else:
ast_els.append(cur_ast)
else:
ast_els.append(el)
if collapse and len(ast_els) == 1:
ast, = ast_els
else:
ast = block_cls(ast_els)
kwargs = {}
if not codegen_state.is_generating_device_code:
kwargs["device_programs"] = new_device_programs
return (codegen_result
.with_new_ast(codegen_state, ast)
.copy(
implemented_domains=implemented_domains,
implemented_data_info=codegen_state.implemented_data_info,
**kwargs))
def wrap_in_if(codegen_state, condition_exprs, inner):
if condition_exprs:
from pymbolic.primitives import LogicalAnd
from pymbolic.mapper.stringifier import PREC_NONE
cur_ast = inner.current_ast(codegen_state)
return inner.with_new_ast(
codegen_state,
codegen_state.ast_builder.emit_if(
codegen_state.expression_to_code_mapper(
LogicalAnd(tuple(condition_exprs)), PREC_NONE),
cur_ast))
return inner
# }}}
# {{{ program generation top-level
def generate_host_or_device_program(codegen_state, schedule_index):
ast_builder = codegen_state.ast_builder
temp_decls = ast_builder.get_temporary_decls(codegen_state, schedule_index)
from functools import partial
from loopy.codegen.control import build_loop_nest
if codegen_state.is_generating_device_code:
from loopy.schedule import CallKernel
assert isinstance(codegen_state.kernel.schedule[schedule_index], CallKernel)
from loopy.codegen.loop import set_up_hw_parallel_loops
codegen_result = set_up_hw_parallel_loops(
codegen_state, schedule_index,
next_func=partial(build_loop_nest,
schedule_index=schedule_index + 1))
else:
codegen_result = build_loop_nest(codegen_state, schedule_index)
if (codegen_state.is_generating_device_code) or (
codegen_state.kernel.is_called_from_host):
codegen_result = merge_codegen_results(
codegen_state,
ast_builder.generate_top_of_body(codegen_state)
+ temp_decls
+ [codegen_result],
collapse=False)
cur_prog = codegen_result.current_program(codegen_state)
body_ast = cur_prog.ast
fdecl_ast = ast_builder.get_function_declaration(
codegen_state, codegen_result, schedule_index)
fdef_ast = ast_builder.get_function_definition(
codegen_state, codegen_result,
schedule_index, fdecl_ast, body_ast)
codegen_result = codegen_result.with_new_program(
codegen_state,
cur_prog.copy(
ast=ast_builder.process_ast(fdef_ast),
body_ast=ast_builder.process_ast(body_ast)))
else:
codegen_result = codegen_result.copy(
host_program=None)
return codegen_result
# }}}
| 33.489231
| 84
| 0.630099
|
7950c5d229922e5a3130d40ad166807b1b1e793e
| 25,099
|
py
|
Python
|
shed/tests/test_simple.py
|
st3107/shed-streaming
|
c632fc465d7e11fe0155fbc3e8add1965615dd51
|
[
"BSD-3-Clause"
] | 4
|
2017-09-20T16:26:34.000Z
|
2020-03-24T15:51:28.000Z
|
shed/tests/test_simple.py
|
st3107/shed-streaming
|
c632fc465d7e11fe0155fbc3e8add1965615dd51
|
[
"BSD-3-Clause"
] | 172
|
2017-07-25T21:36:12.000Z
|
2022-02-25T16:05:36.000Z
|
shed/tests/test_simple.py
|
st3107/shed-streaming
|
c632fc465d7e11fe0155fbc3e8add1965615dd51
|
[
"BSD-3-Clause"
] | 6
|
2017-08-08T12:39:18.000Z
|
2021-03-29T22:28:47.000Z
|
import operator as op
import time
import uuid
import networkx as nx
import numpy as np
import pytest
from bluesky.plan_stubs import checkpoint, abs_set, trigger_and_read
from bluesky.plans import scan, count
from shed import (
SimpleFromEventStream as FromEventStream,
SimpleToEventStream as ToEventStream,
walk_to_translation,
simple_to_event_stream_new_api,
)
from shed.simple import _hash_or_uid, build_upstream_node_set
from shed.tests.utils import y
from shed.utils import unstar
from rapidz import Stream, move_to_first
def test_from_event_model(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
L = t.sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert len(L) == 10
for i, ll in enumerate(L):
assert i == ll
def test_from_event_model_single(RE, hw):
source = Stream()
t = FromEventStream("event", "data", source, principle=True)
L = t.sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert len(L) == 10
for i, ll in enumerate(L):
assert i == ll["motor"]
def test_from_event_model_multi(RE, hw):
source = Stream()
t = FromEventStream(
"event", ("data", ("motor", "motor_setpoint")), source, principle=True
)
L = t.sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert len(L) == 10
for i, ll in enumerate(L):
assert i == ll[0]
assert i == ll[1]
def test_from_event_model_all(RE, hw):
source = Stream()
t = FromEventStream("event", (), source, principle=True)
L = t.sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert len(L) == 10
for i, ll in enumerate(L):
assert i == ll["data"]["motor"]
def test_from_event_model_stream_syntax(RE, hw):
source = Stream()
t = source.simple_from_event_stream(
"event", ("data", "motor"), principle=True
)
L = t.sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert len(L) == 10
for i, ll in enumerate(L):
assert i == ll
def test_from_event_model_stream_name():
def data():
suid = str(uuid.uuid4())
duid = str(uuid.uuid4())
yield "start", {"hi": "world", "uid": suid}
yield "descriptor", {
"name": "hi",
"data_keys": {"ct"},
"uid": duid,
"run_start": suid,
}
for i in range(10):
yield "event", {
"uid": str(uuid.uuid4()),
"data": {"ct": i},
"descriptor": duid,
}
duid = str(uuid.uuid4())
yield "descriptor", {
"name": "not hi",
"data_keys": {"ct"},
"uid": duid,
"run_start": suid,
}
for i in range(100, 110):
yield "event", {
"uid": str(uuid.uuid4()),
"data": {"ct": i},
"descriptor": duid,
}
yield "stop", {"uid": str(uuid.uuid4()), "run_start": suid}
g = data()
source = Stream()
t = FromEventStream(
"event", ("data", "ct"), source, event_stream_name="hi"
)
L = t.sink_to_list()
for gg in g:
source.emit(gg)
assert len(L) == 10
for i, ll in enumerate(L):
assert i == ll
def test_from_event_model_stream_name2():
def data():
suid = str(uuid.uuid4())
duid = str(uuid.uuid4())
yield "start", {"hi": "world", "uid": suid}
yield "descriptor", {
"name": "hi",
"data_keys": {"ct"},
"uid": duid,
"run_start": suid,
}
for i in range(10):
yield "event", {
"uid": str(uuid.uuid4()),
"data": {"ct": i},
"descriptor": duid,
}
duid = str(uuid.uuid4())
yield "descriptor", {
"name": "not hi",
"data_keys": {"ct"},
"uid": duid,
"run_start": suid,
}
for i in range(100, 110):
yield "event", {
"uid": str(uuid.uuid4()),
"data": {"ct": i},
"descriptor": duid,
}
yield "stop", {"uid": str(uuid.uuid4()), "run_start": suid}
g = data()
source = Stream()
t = FromEventStream(
"event", ("data", "ct"), source, event_stream_name="not hi"
)
L = t.sink_to_list()
for gg in g:
source.emit(gg)
assert len(L) == 10
for i, ll in enumerate(L):
assert i + 100 == ll
def test_walk_up():
raw = Stream()
a_translation = FromEventStream("start", ("time",), raw, principle=True)
b_translation = FromEventStream("event", ("data", "pe1_image"), raw)
d = b_translation.zip_latest(a_translation)
dd = d.map(op.truediv)
e = ToEventStream(dd, ("data",))
g = nx.DiGraph()
walk_to_translation(e, g)
att = []
for node, attrs in g.nodes.items():
att.append(attrs["stream"])
s = {a_translation, b_translation, d, dd, e}
assert s == set(att)
assert {_hash_or_uid(k) for k in s} == set(g.nodes)
def test_walk_up_partial():
raw = Stream()
a_translation = FromEventStream("start", ("time",), raw, principle=True)
b_translation = FromEventStream("event", ("data", "pe1_image"), raw)
d = b_translation.zip_latest(a_translation)
ddd = ToEventStream(d, ("data",))
dd = d.map(op.truediv)
e = ToEventStream(dd, ("data",))
g = nx.DiGraph()
walk_to_translation(e, g)
att = []
for node, attrs in g.nodes.items():
att.append(attrs["stream"])
s = {ddd, dd, e, d}
assert s == set(att)
assert {_hash_or_uid(k) for k in s} == set(g.nodes)
def test_to_event_model(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = ToEventStream(t, ("ct",), data_key_md={"ct": {"units": "arb"}})
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert tt
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
assert d[1]["data_keys"]["ct"]["units"] == "arb"
assert d[-1]["run_start"]
def test_to_event_model_stream_syntax(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = t.simple_to_event_stream(("ct",), data_key_md={"ct": {"units": "arb"}})
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert tt
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
assert d[1]["data_keys"]["ct"]["units"] == "arb"
assert d[-1]["run_start"]
def test_align():
a = Stream()
b = Stream()
z = a.AlignEventStreams(b)
sl = z.sink_to_list()
# TODO: use real run engine here
for n, d, dd in zip(
["start", "descriptor", "event", "stop"],
[
{"a": "hi", "b": {"hi": "world"}, "uid": "hi", "time": 123},
{"bla": "foo", "uid": "abc"},
{"data": "now", "descriptor": "abc"},
{"stop": "doc"},
],
[
{"a": "hi2", "b": {"hi2": "world"}},
{"bla": "foo", "uid": "123"},
{"data": "now", "descriptor": "123"},
{"stop": "doc"},
],
):
a.emit((n, d))
b.emit((n, dd))
assert len(sl) == 4
assert sl[0][1].get("b") == {"hi": "world", "hi2": "world"}
def test_align_stream_syntax():
a = Stream()
b = Stream()
z = a.align_event_streams(b)
sl = z.sink_to_list()
# TODO: use real run engine here
for n, d, dd in zip(
["start", "descriptor", "event", "stop"],
[
{"a": "hi", "b": {"hi": "world"}, "uid": "hi", "time": 123},
{"bla": "foo", "uid": "abc"},
{"data": "now", "descriptor": "abc"},
{"stop": "doc"},
],
[
{"a": "hi2", "b": {"hi2": "world"}},
{"bla": "foo", "uid": "123"},
{"data": "now", "descriptor": "123"},
{"stop": "doc"},
],
):
a.emit((n, d))
b.emit((n, dd))
assert len(sl) == 4
assert sl[0][1].get("b") == {"hi": "world", "hi2": "world"}
assert "original_start_time" in sl[0][1]
def test_align_interrupted(RE, hw):
a = Stream()
b = FromEventStream("event", ("data", "img"), a, principle=True).map(
op.add, 1
)
b.sink(print)
c = ToEventStream(b, ("out",))
z = move_to_first(a.AlignEventStreams(c))
sl = z.sink_to_list()
L = []
RE.subscribe(lambda *x: L.append(x))
RE(count([hw.img]))
for nd in L:
name, doc = nd
# cause an exception
if name == "event":
doc["data"]["img"] = "hi"
try:
a.emit((name, doc))
except TypeError:
pass
assert {"start", "stop"} == set(list(zip(*sl))[0])
# check that buffers are not cleared, yet
sl.clear()
# If there are elements in the buffer they need to be cleared when all
# start docs come in.
for nd in L:
name, doc = nd
# cause an exception
if name == "event":
doc["data"]["img"] = 1
a.emit((name, doc))
if name == "start":
# now buffers should be clear
assert not any(
[b for n, tb in z.true_buffers.items() for u, b in tb.items()]
)
assert {"start", "descriptor", "event", "stop"} == set(list(zip(*sl))[0])
# now buffers should be clear (as all docs were emitted)
assert not any(
[b for n, tb in z.true_buffers.items() for u, b in tb.items()]
)
def test_align_res_dat(RE, hw):
a = Stream()
b = FromEventStream("event", ("data", "motor"), a, principle=True).map(
op.add, 1
)
c = ToEventStream(b, ("out",))
z = a.AlignEventStreams(c)
sl = z.sink_to_list()
RE.subscribe(lambda *x: a.emit(x))
osu = RE(scan([hw.img], hw.motor, 0, 10, 10))
for n, d in sl:
if n == "start":
assert d["original_start_uid"] == osu[0]
if n == "event":
assert d["data"]["out"] == d["data"]["motor"] + 1
def test_align_buffering(RE, hw):
zz = {"data": False}
a = Stream()
b = FromEventStream(
"event",
("data", "motor"),
a.filter(lambda x: zz["data"]),
principle=True,
).map(op.add, 1)
c = ToEventStream(b, ("out",))
z = move_to_first(a.AlignEventStreams(c))
sl = z.sink_to_list()
RE.subscribe(lambda *x: a.emit(x))
RE(scan([hw.img], hw.motor, 0, 10, 10, md={"hello": "world"}))
zz["data"] = True
sl.clear()
RE(scan([hw.img], hw.motor, 0, 10, 10))
assert "hello" not in sl[0][1]
def test_align_buffering2(RE, hw):
a = Stream()
d = Stream()
b = FromEventStream(
"event", ("data", "motor"), principle=True, upstream=a
).map(op.add, 1)
c = ToEventStream(b, ("out",))
z = c.AlignEventStreams(d)
names = z.pluck(0).sink_to_list()
L = []
RE.subscribe(lambda *x: L.append(x))
RE(scan([hw.img], hw.motor, 0, 10, 10, md={"hello": "world"}))
for nd in L:
d.emit(nd)
print("hi")
for nd in L:
a.emit(nd)
assert all(k in names for k in ["start", "descriptor", "event", "stop"])
def test_align_multi_stream(RE, hw):
a = Stream()
b = FromEventStream(
"event",
("data", "motor"),
a,
principle=True,
event_stream_name="primary",
).map(op.add, 1)
c = ToEventStream(b, ("out",))
c.sink(print)
z = a.AlignEventStreams(c, event_stream_name="primary")
sl = z.sink_to_list()
RE.subscribe(lambda *x: a.emit(x))
def one_1d_step(detectors, motor, step):
"""
Inner loop of a 1D step scan
This is the default function for ``per_step`` param in 1D plans.
"""
yield from checkpoint()
yield from abs_set(motor, step, wait=True)
yield from trigger_and_read(list(detectors) + [motor], name="dark")
return (yield from trigger_and_read(list(detectors) + [motor]))
osu = RE(scan([hw.img], hw.motor, 0, 10, 10, per_step=one_1d_step))
assert len(sl) == 10 + 3
for n, d in sl:
if n == "start":
assert d["original_start_uid"] == osu[0]
if n == "event":
print(d)
assert d["data"]["out"] == d["data"]["motor"] + 1
def test_to_event_model_dict(RE, hw):
source = Stream()
t = FromEventStream("event", ("data",), source, principle=True)
n = ToEventStream(t)
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
n.sink(print)
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
print(d[1]["hints"])
# AAA
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["hints"] == {
"analyzer": {"fields": ["motor", "motor_setpoint"]}
}
assert d[2]["data"] == {"motor_setpoint": 0, "motor": 0}
assert d[-1]["run_start"]
def test_replay_export_test():
def y():
suid = str(uuid.uuid4())
yield ("start", {"uid": suid, "time": time.time()})
duid = str(uuid.uuid4())
yield (
"descriptor",
{
"uid": duid,
"run_start": suid,
"name": "primary",
"data_keys": {"det_image": {"dtype": "int", "units": "arb"}},
"time": time.time(),
},
)
for i in range(5):
yield (
"event",
{
"uid": str(uuid.uuid4()),
"data": {"det_image": i},
"timestamps": {"det_image": time.time()},
"seq_num": i + 1,
"time": time.time(),
"descriptor": duid,
},
)
yield (
"stop",
{"uid": str(uuid.uuid4()), "time": time.time(), "run_start": suid},
)
print("build graph")
g1 = FromEventStream(
"event", ("data", "det_image"), principle=True, stream_name="g1"
)
g11 = FromEventStream("event", ("data", "det_image"), stream_name="g11")
g11_1 = g1.zip(g11)
g2 = g11_1.starmap(op.mul).map(np.log)
g = g2.SimpleToEventStream(("img2",))
from pprint import pprint
g.sink(pprint)
L = g.sink_to_list()
print("run experiment")
for yy in y():
print(yy[0])
g11.update(yy)
g1.update(yy)
assert L[-1][1]["run_start"]
def test_no_stop(hw, RE):
source = Stream().filter(lambda x: x[0] != "stop")
t = FromEventStream("event", ("data",), source, principle=True)
n = ToEventStream(t)
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["hints"] == {
"analyzer": {"fields": ["motor", "motor_setpoint"]}
}
assert d[2]["data"] == {"motor_setpoint": 0, "motor": 0}
def test_parent_nodes():
# build the graph
g1 = FromEventStream(
"event",
("data", "det_image"),
principle=True,
stream_name="g1",
asynchronous=True,
)
g11 = FromEventStream(
"event", ("data", "det_image"), stream_name="g11", asynchronous=True
)
g2 = g1.zip(g11).starmap(op.mul, stream_name="mul")
g = g2.SimpleToEventStream(("img2",))
l1 = g.sink_to_list()
# g.sink(print)
assert len(g.translation_nodes) == 2
print("start experiment")
# run the experiment
l0 = []
for yy in y(5):
l0.append(yy)
g11.update(yy)
g1.update(yy)
print(g11.start_uid)
assert len(l1[0][1]["parent_node_map"]) == 2
@pytest.mark.xfail(raises=RuntimeError)
def test_no_parent_nodes():
# build the graph
g1 = FromEventStream(
"event", ("data", "det_image"), stream_name="g1", asynchronous=True
)
g11 = FromEventStream(
"event", ("data", "det_image"), stream_name="g11", asynchronous=True
)
g2 = g1.zip(g11).starmap(op.mul, stream_name="mul")
g2.SimpleToEventStream(("img2",))
def test_multi_path_principle(hw, RE):
source = Stream()
fes1 = FromEventStream("start", ("number",), source, principle=True)
fes2 = FromEventStream("event", ("data", "motor"), source, principle=True)
out1 = fes1.map(op.add, 1)
out2 = fes2.combine_latest(out1, emit_on=0).starmap(op.mul)
a = ToEventStream(out1, ("out1",))
b = ToEventStream(out2, ("out2",))
la = a.sink_to_list()
lb = b.sink_to_list()
RE.subscribe(lambda *x: source.emit(x))
for i in range(1, 3):
RE(count([hw.motor], md={"number": 5}))
for lst in [la, lb]:
o1 = [z[0] for z in lst]
o2 = ["start", "descriptor", "event", "stop"] * i
assert o1 == o2
def test_same_hdr_many_times(hw, RE):
source = Stream()
fes1 = FromEventStream("start", ("number",), source, principle=True)
fes2 = FromEventStream("event", ("data", "motor"), source, principle=True)
out1 = fes1.map(op.add, 1)
out2 = fes2.combine_latest(out1, emit_on=0).starmap(op.mul)
a = ToEventStream(out1, ("out1",))
b = ToEventStream(out2, ("out2",))
la = a.sink_to_list()
lb = b.sink_to_list()
L = []
RE.subscribe(lambda *x: L.append(x))
RE(count([hw.motor], md={"number": 5}))
for i in range(1, 3):
for ll in L:
source.emit(ll)
for lst in [la, lb]:
o1 = [z[0] for z in lst]
o2 = ["start", "descriptor", "event", "stop"] * i
assert o1 == o2
def test_last_cache(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = ToEventStream(
t, ("ct",), data_key_md={"ct": {"units": "arb"}}
).LastCache()
tt = t.sink_to_list()
names = n.pluck(0).sink_to_list()
docs = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert len(docs) == 10 + 3 + 2
assert names[-3] == "descriptor"
assert names[-2] == "event"
assert tt
assert set(names) == {"start", "stop", "event", "descriptor"}
assert docs[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
assert docs[1]["data_keys"]["ct"]["units"] == "arb"
assert docs[-1]["run_start"]
def test_build_upstream_node_set():
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = ToEventStream(
t, ("ct",), data_key_md={"ct": {"units": "arb"}}
).LastCache()
s = build_upstream_node_set(n)
assert len(s) == 3
def test_to_event_model_new_api(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = simple_to_event_stream_new_api(
{t: {"data_keys": {"ct": {"units": "arb", "precision": 2}}}}
)
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert tt
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
assert d[1]["data_keys"]["ct"]["units"] == "arb"
assert d[-1]["run_start"]
def test_to_event_model_new_api_no_data_keys(RE, hw):
source = Stream()
t = FromEventStream("event", ("data",), source, principle=True)
assert t.principle
n = simple_to_event_stream_new_api({t: {}})
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert tt
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["hints"] == {
"analyzer": {"fields": ["motor", "motor_setpoint"]}
}
assert d[1]["data_keys"]["motor"]
assert d[-1]["run_start"]
def test_to_event_model_new_api_clobber(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = simple_to_event_stream_new_api(
{t: {"data_keys": {"ct": {"units": "arb", "dtype": "array"}}}}
)
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert tt
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["data_keys"]["ct"]["dtype"] == "array"
assert d[-1]["run_start"]
def test_to_event_model_new_api_multi(RE, hw):
source = Stream()
stop = FromEventStream("stop", (), source)
t = FromEventStream(
"event", ("data", "motor"), source, principle=True, stream_name="hi"
)
assert t.principle
tt = t.zip(stop)
n = simple_to_event_stream_new_api(
{
t: {"data_keys": {"ct": {"units": "arb", "precision": 2}}},
tt: {
"name": "final",
"data_keys": {"ct": {"units": "arb", "precision": 2}},
},
},
hello="world",
)
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert tt
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[0]["hello"] == "world"
assert d[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
assert d[1]["data_keys"]["ct"]["units"] == "arb"
assert d[-3]["name"] == "final"
assert d[-1]["run_start"]
@pytest.mark.xfail(raises=RuntimeError)
def test_to_event_model_new_api_no_principle(RE, hw):
source = Stream()
stop = FromEventStream("stop", (), source)
t = FromEventStream("event", ("data", "motor"), source, stream_name="hi")
tt = t.zip(stop)
simple_to_event_stream_new_api(
{
t: {"data_keys": {"ct": {"units": "arb", "precision": 2}}},
tt: {
"name": "final",
"data_keys": {"ct": {"units": "arb", "precision": 2}},
},
},
hello="world",
)
def test_to_event_model_new_api_multi_parent(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
t2 = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = simple_to_event_stream_new_api(
{
t.zip(t2).pluck(0): {
"data_keys": {"ct": {"units": "arb", "precision": 2}}
}
}
)
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
RE.subscribe(unstar(source.emit))
RE.subscribe(print)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert tt
assert set(p) == {"start", "stop", "event", "descriptor"}
assert d[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
assert d[1]["data_keys"]["ct"]["units"] == "arb"
assert d[-1]["run_start"]
def test_to_event_model_new_api_e_stop(RE, hw):
source = Stream()
t = FromEventStream("event", ("data", "motor"), source, principle=True)
assert t.principle
n = simple_to_event_stream_new_api(
{t: {"data_keys": {"ct": {"units": "arb", "precision": 2}}}}
)
tt = t.sink_to_list()
p = n.pluck(0).sink_to_list()
d = n.pluck(1).sink_to_list()
def f(*x):
if x[0] == "stop":
return
source.emit(x)
RE.subscribe(f)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
rs = d[0]["uid"]
assert tt
assert set(p) == {"start", "event", "descriptor"}
assert d[1]["hints"] == {"analyzer": {"fields": ["ct"]}}
assert d[1]["data_keys"]["ct"]["units"] == "arb"
ll = len(d)
RE(scan([hw.motor], hw.motor, 0, 9, 10))
assert d[ll]["run_start"] == rs
assert set(p) == {"start", "stop", "event", "descriptor"}
| 27.611661
| 79
| 0.538229
|
7950c5ecbb2b96efd47c2c67c6807f7824ea1012
| 350
|
py
|
Python
|
src/data/save_npy.py
|
paristsai/jdata
|
bb8839d7f7a4a10f3ebcdd8f4d1ce2fa29562868
|
[
"BSD-3-Clause"
] | 3
|
2019-12-05T12:04:05.000Z
|
2020-09-27T15:54:02.000Z
|
src/data/save_npy.py
|
paristsai/jdata
|
bb8839d7f7a4a10f3ebcdd8f4d1ce2fa29562868
|
[
"BSD-3-Clause"
] | 3
|
2020-03-24T16:24:54.000Z
|
2021-04-30T20:38:32.000Z
|
src/data/save_npy.py
|
paristsai/jdata
|
bb8839d7f7a4a10f3ebcdd8f4d1ce2fa29562868
|
[
"BSD-3-Clause"
] | 1
|
2020-11-06T12:02:37.000Z
|
2020-11-06T12:02:37.000Z
|
import numpy as np
import pandas as pd
from .. import DATA_DIR
def main():
interim_path = DATA_DIR.joinpath("interim")
df = pd.read_csv("{0}/All_Action.csv".format(interim_path))
np.save("{0}/All_Action.npy".format(interim_path), df.values)
print("{0}/All_Action.npy".format(interim_path))
if __name__ == "__main__":
main()
| 20.588235
| 65
| 0.685714
|
7950c66d4ac999889e47b6a2d0dd10c1e6ab339f
| 988
|
py
|
Python
|
lib/procedures/__init__.py
|
shashank3959/NAS-Projects
|
2c0577231a52375de5ebd7a588750899a8c7bf1c
|
[
"MIT"
] | 923
|
2020-01-11T06:36:53.000Z
|
2022-03-31T00:26:57.000Z
|
lib/procedures/__init__.py
|
shashank3959/NAS-Projects
|
2c0577231a52375de5ebd7a588750899a8c7bf1c
|
[
"MIT"
] | 25
|
2020-02-27T08:35:46.000Z
|
2022-01-25T08:54:19.000Z
|
lib/procedures/__init__.py
|
shashank3959/NAS-Projects
|
2c0577231a52375de5ebd7a588750899a8c7bf1c
|
[
"MIT"
] | 262
|
2020-01-02T02:19:40.000Z
|
2022-03-23T04:56:16.000Z
|
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
from .starts import prepare_seed, prepare_logger, get_machine_info, save_checkpoint, copy_checkpoint
from .optimizers import get_optim_scheduler
def get_procedures(procedure):
from .basic_main import basic_train, basic_valid
from .search_main import search_train, search_valid
from .search_main_v2 import search_train_v2
from .simple_KD_main import simple_KD_train, simple_KD_valid
train_funcs = {'basic' : basic_train, \
'search': search_train,'Simple-KD': simple_KD_train, \
'search-v2': search_train_v2}
valid_funcs = {'basic' : basic_valid, \
'search': search_valid,'Simple-KD': simple_KD_valid, \
'search-v2': search_valid}
train_func = train_funcs[procedure]
valid_func = valid_funcs[procedure]
return train_func, valid_func
| 42.956522
| 104
| 0.638664
|
7950c6a58edb9315a8cc2c844ca7c29e06741e17
| 3,468
|
py
|
Python
|
atomate/vasp/workflows/tests/test_insertion_workflow.py
|
fraricci/atomate
|
ac8997888d79730a697aa166d5bcd516a7222d01
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
atomate/vasp/workflows/tests/test_insertion_workflow.py
|
fraricci/atomate
|
ac8997888d79730a697aa166d5bcd516a7222d01
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
atomate/vasp/workflows/tests/test_insertion_workflow.py
|
fraricci/atomate
|
ac8997888d79730a697aa166d5bcd516a7222d01
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import os
from pathlib import Path
from fireworks.core.fworker import FWorker
from fireworks.core.rocket_launcher import rapidfire
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core import Structure
from atomate.utils.testing import AtomateTest
from atomate.vasp.powerups import add_modify_incar, use_fake_vasp, use_potcar_spec
from atomate.vasp.workflows.base.electrode import get_ion_insertion_wf
__author__ = "Jimmy Shen"
__email__ = "jmmshn@gmail.com"
module_dir = Path(__file__).resolve().parent
db_dir = module_dir / "../../../common/test_files"
ref_dir = module_dir / "../../test_files"
wf_dir = ref_dir / "insertion_wf"
class TestInsertionWorkflow(AtomateTest):
def setUp(self):
super().setUp()
input_output_dirs = ref_dir / "insertion_wf"
names = os.walk(input_output_dirs).__next__()[1]
calc_dirs = {n_: input_output_dirs / n_ for n_ in names}
base_struct = Structure.from_file(wf_dir / "YPO4-static/inputs/POSCAR")
sm = StructureMatcher(ltol=0.6, stol=0.6, angle_tol=9)
# Run the workflow with fake VASP
wf = get_ion_insertion_wf(
structure=base_struct,
structure_matcher=sm,
working_ion="Mg",
volumetric_data_type="AECCAR",
db_file=db_dir / "db.json",
optimizefw_kwargs={"ediffg": -0.05},
)
wf = use_fake_vasp(
wf,
calc_dirs,
check_incar=False,
check_kpoints=False,
check_poscar=False,
check_potcar=False,
)
wf = add_modify_incar(wf, modify_incar_params={"incar_update": {"KPAR": 8}})
wf = use_potcar_spec(wf)
self.wf = wf
wf_stop_early = get_ion_insertion_wf(
structure=base_struct,
structure_matcher=sm,
working_ion="Mg",
volumetric_data_type="AECCAR",
db_file=db_dir / "db.json",
max_inserted_atoms=1,
optimizefw_kwargs={"ediffg": -0.05},
)
wf_stop_early = use_fake_vasp(
wf_stop_early,
calc_dirs,
check_incar=False,
check_kpoints=False,
check_poscar=False,
check_potcar=False,
)
wf = add_modify_incar(wf, modify_incar_params={"incar_update": {"KPAR": 8}})
wf_stop_early = use_potcar_spec(wf_stop_early)
self.wf_stop_early = wf_stop_early
def test_has_inserted(self):
self.lp.add_wf(self.wf_stop_early)
rapidfire(
self.lp,
fworker=FWorker(
env={
"db_file": os.path.join(db_dir, "db.json"),
"vasp_cmd": ["echo", "fake"],
}
),
)
formulas = self.get_task_collection(coll_name="tasks").distinct(
"formula_pretty"
)
self.assertEqual(set(formulas), {"YPO4"})
self.lp.add_wf(self.wf)
rapidfire(
self.lp,
fworker=FWorker(
env={
"db_file": os.path.join(db_dir, "db.json"),
"vasp_cmd": ["echo", "fake"],
}
),
)
# Check that all of the inserted pretty formulas are present
formulas = self.get_task_collection(coll_name="tasks").distinct(
"formula_pretty"
)
self.assertEqual(set(formulas), {"YPO4"})
| 32.716981
| 84
| 0.589677
|
7950c78654d47be41b7b2de9651955303c8e996a
| 6,082
|
py
|
Python
|
markdown_generator/pubsFromBib.py
|
yal054/yal054
|
9571edd48a08ba4eed2114fa2da5b8cfdd01cb6a
|
[
"MIT"
] | 4
|
2022-01-07T04:01:02.000Z
|
2022-01-17T06:04:59.000Z
|
markdown_generator/pubsFromBib.py
|
yal054/yal054.github.io
|
9571edd48a08ba4eed2114fa2da5b8cfdd01cb6a
|
[
"MIT"
] | null | null | null |
markdown_generator/pubsFromBib.py
|
yal054/yal054.github.io
|
9571edd48a08ba4eed2114fa2da5b8cfdd01cb6a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a set of bibtex of publications and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)).
#
# The core python code is also in `pubsFromBibs.py`.
# Run either from the `markdown_generator` folder after replacing updating the publist dictionary with:
# * bib file names
# * specific venue keys based on your bib file preferences
# * any specific pre-text for specific files
# * Collection Name (future feature)
#
# TODO: Make this work with other databases of citations,
# TODO: Merge this with the existing TSV parsing solution
from pybtex.database.input import bibtex
import pybtex.database.input.bibtex
from time import strptime
import string
import html
import os
import re
#todo: incorporate different collection types rather than a catch all publications, requires other changes to template
publist = {
# "proceeding": {
# "file" : "proceedings.bib",
# "venuekey": "booktitle",
# "venue-pretext": "In the proceedings of ",
# "collection" : {"name":"publications",
# "permalink":"/publication/"}
#
# },
"journal":{
"file": "pubs.bib",
"venuekey" : "journal",
"venue-pretext" : "",
"collection" : {"name":"publications",
"permalink":"/publication/"}
}
}
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
for pubsource in publist:
parser = bibtex.Parser()
bibdata = parser.parse_file(publist[pubsource]["file"])
#loop through the individual references in a given bibtex file
for bib_id in bibdata.entries:
#reset default date
pub_year = "1900"
pub_month = "01"
pub_day = "01"
b = bibdata.entries[bib_id].fields
try:
pub_year = f'{b["year"]}'
#todo: this hack for month and day needs some cleanup
if "month" in b.keys():
if(len(b["month"])<3):
pub_month = "0"+b["month"]
pub_month = pub_month[-2:]
elif(b["month"] not in range(12)):
tmnth = strptime(b["month"][:3],'%b').tm_mon
pub_month = "{:02d}".format(tmnth)
else:
pub_month = str(b["month"])
if "day" in b.keys():
pub_day = str(b["day"])
pub_date = pub_year+"-"+pub_month+"-"+pub_day
#strip out {} as needed (some bibtex entries that maintain formatting)
clean_title = b["title"].replace("{", "").replace("}","").replace("\\","").replace(" ","-")
url_slug = re.sub("\\[.*\\]|[^a-zA-Z0-9_-]", "", clean_title)
url_slug = url_slug.replace("--","-")
md_filename = (str(pub_date) + "-" + url_slug + ".md").replace("--","-")
html_filename = (str(pub_date) + "-" + url_slug).replace("--","-")
#Build Citation from text
citation = ""
#citation authors - todo - add highlighting for primary author?
for author in bibdata.entries[bib_id].persons["author"]:
#citation = citation+" "+author.first_names[0]+" "+author.last_names[0]+", "
citation = citation+" "+str(author)+", "
#citation title
citation = citation + "\"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + ".\""
#add venue logic depending on citation type
venue = publist[pubsource]["venue-pretext"]+b[publist[pubsource]["venuekey"]].replace("{", "").replace("}","").replace("\\","")
citation = citation + " " + html_escape(venue)
citation = citation + ", " + pub_year + "."
## YAML variables
md = "---\ntitle: \"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + '"\n'
md += """collection: """ + publist[pubsource]["collection"]["name"]
md += """\npermalink: """ + publist[pubsource]["collection"]["permalink"] + html_filename
note = False
if "note" in b.keys():
if len(str(b["note"])) > 5:
md += "\nexcerpt: '" + html_escape(b["note"]) + "'"
note = True
md += "\ndate: " + str(pub_date)
md += "\nvenue: '" + html_escape(venue) + "'"
url = False
if "url" in b.keys():
if len(str(b["url"])) > 5:
md += "\npaperurl: '" + b["url"] + "'"
url = True
md += "\ncitation: '" + html_escape(citation) + "'"
md += "\n---"
## Markdown description for individual page
if note:
md += "\n" + html_escape(b["note"]) + "\n"
if url:
md += "\n[Access paper here](" + b["url"] + "){:target=\"_blank\"}\n"
else:
md += "\nUse [Google Scholar](https://scholar.google.com/scholar?q="+html.escape(clean_title.replace("-","+"))+"){:target=\"_blank\"} for full citation"
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
print(f'SUCESSFULLY PARSED {bib_id}: \"', b["title"][:60],"..."*(len(b['title'])>60),"\"")
# field may not exist for a reference
except KeyError as e:
print(f'WARNING Missing Expected Field {e} from entry {bib_id}: \"', b["title"][:30],"..."*(len(b['title'])>30),"\"")
continue
| 37.776398
| 273
| 0.522525
|
7950c8abffdf651641ee1e67d40958089d53c6ea
| 12,335
|
py
|
Python
|
opsgenie_swagger/models/pingdom_v2_integration.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
opsgenie_swagger/models/pingdom_v2_integration.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
opsgenie_swagger/models/pingdom_v2_integration.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | 1
|
2020-11-07T11:27:13.000Z
|
2020-11-07T11:27:13.000Z
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.integration import Integration # noqa: F401,E501
from opsgenie_swagger.models.recipient import Recipient # noqa: F401,E501
from opsgenie_swagger.models.team_meta import TeamMeta # noqa: F401,E501
from opsgenie_swagger.models.token_based_incoming_feature import TokenBasedIncomingFeature # noqa: F401,E501
class PingdomV2Integration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_write_access': 'bool'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_write_access': 'allowWriteAccess'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, feature_type=None, allow_configuration_access=None, allow_write_access=None): # noqa: E501
"""PingdomV2Integration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_write_access = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this PingdomV2Integration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this PingdomV2Integration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this PingdomV2Integration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this PingdomV2Integration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this PingdomV2Integration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this PingdomV2Integration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this PingdomV2Integration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this PingdomV2Integration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this PingdomV2Integration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this PingdomV2Integration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this PingdomV2Integration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this PingdomV2Integration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this PingdomV2Integration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this PingdomV2Integration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this PingdomV2Integration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this PingdomV2Integration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this PingdomV2Integration. # noqa: E501
:return: The is_advanced of this PingdomV2Integration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this PingdomV2Integration.
:param is_advanced: The is_advanced of this PingdomV2Integration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def feature_type(self):
"""Gets the feature_type of this PingdomV2Integration. # noqa: E501
:return: The feature_type of this PingdomV2Integration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this PingdomV2Integration.
:param feature_type: The feature_type of this PingdomV2Integration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this PingdomV2Integration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this PingdomV2Integration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this PingdomV2Integration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this PingdomV2Integration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this PingdomV2Integration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this PingdomV2Integration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this PingdomV2Integration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this PingdomV2Integration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PingdomV2Integration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.789308
| 265
| 0.681232
|
7950c906c06a92f8d60f92c22ba73aecc73fdf56
| 4,704
|
py
|
Python
|
mantichora/main.py
|
alphatwirl/mantichora
|
f1d15090b5e5a55c3553cd1692d7097178e10599
|
[
"BSD-3-Clause"
] | 13
|
2019-07-30T08:30:44.000Z
|
2021-02-11T22:25:29.000Z
|
mantichora/main.py
|
alphatwirl/mantichora
|
f1d15090b5e5a55c3553cd1692d7097178e10599
|
[
"BSD-3-Clause"
] | 5
|
2019-03-13T11:11:32.000Z
|
2021-05-31T21:56:10.000Z
|
mantichora/main.py
|
alphatwirl/mantichora
|
f1d15090b5e5a55c3553cd1692d7097178e10599
|
[
"BSD-3-Clause"
] | 1
|
2021-04-20T11:09:07.000Z
|
2021-04-20T11:09:07.000Z
|
# Tai Sakuma <tai.sakuma@gmail.com>
import functools
from .hubmp import MultiprocessingHub, mp_start_method_default
from .hubthreading import ThreadingHub
##__________________________________________________________________||
class mantichora:
"""A simple interface to multiprocessing and threading
https://github.com/alphatwirl/mantichora
Parameters
----------
nworkers : int, optional
The number of workers, the default 4.
mode : str, 'multiprocessing' or 'threading'
The mode of concurrency. The default 'multiprocessing'.
New in version 0.10.0
mp_start_method : str, 'fork', 'spawn', or 'forkserver'
The start method of multiprocessing. The default `fork`.
Each method is described in
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
This option is only relevant for the =multiprocessing mode.
On Jupyter Notebook, the 'fork' method is typically the best
choice.
The 'spawn' and "forkserver" have extra restrictions, for
example, on how the main module is written. The restrictions
are described at
https://docs.python.org/3/library/multiprocessing.html#the-spawn-and-forkserver-start-methods
On MacOS, in the 'fork' method, errors with the message "may
have been in progress in another thread when fork() was
called" might occur. This error might be resolved if the
environment variable 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY' is
set 'YES' as suggested in
https://stackoverflow.com/questions/50168647/multiprocessing-causes-python-to-crash-and-gives-an-error-may-have-been-in-progr
New in version 0.9.9
"""
def __init__(self, nworkers=4, mode='multiprocessing', mp_start_method=mp_start_method_default):
if mode == 'multiprocessing':
self.hub = MultiprocessingHub(
nworkers=nworkers, progressbar=True,
mp_start_method=mp_start_method)
elif mode == 'threading':
self.hub = ThreadingHub(nworkers=nworkers)
else:
raise ValueError(("'mode' must be "
"'multiprocessing' or 'threading': "
"'{}' is given").format(mode))
self.hub.open()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.terminate()
self.end()
def run(self, func, *args, **kwargs):
"""run a task function in a background process
Parameters
----------
func : callable
A task function to be run in a background process
args : list
Positional parameters to `func`
kwargs: dict
Keyword parameters to `func`
Returns
-------
int
run ID
"""
task_func = functools.partial(func, *args, **kwargs)
return self.hub.put(task_func)
def returns(self):
"""return a list of return values of the task functions
The return values are sorted in the order of the task
functions which have been given to `run()`
This method waits until all task functions finish.
Returns
-------
list
return values of the task functions
"""
pairs = self.receive_all() # list of pairs (runid, result)
return [p[1] for p in pairs]
def receive_one(self):
"""return a pair of the run ID and return value of a task function
This method waits until one task function finishes.
Returns
-------
list or None
a pair of the run ID and return value of a task function.
`None` if no task functions are outstanding.
"""
return self.hub.receive_one()
def receive_finished(self):
"""return pairs of the run IDs and return values of finished task function
This method doesn't wait.
Returns
-------
list or None
pairs of the run IDs and return values of task functions.
`None` if no task functions are outstanding.
"""
return self.hub.poll()
def receive_all(self):
"""return pairs of the run IDs and return values of all tasks
This function is obsolete, to be deleted
"""
return self.hub.receive()
def terminate(self):
"""terminate all tasks if possible
"""
self.hub.terminate()
def end(self):
"""end
"""
self.hub.close()
##__________________________________________________________________||
| 30.348387
| 133
| 0.617772
|
7950ca26f64be74e307cae891bf9e5c8cfb8892b
| 1,817
|
py
|
Python
|
akaocr/tools/train_recog.py
|
qai-research/Efficient_Text_Detection
|
e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b
|
[
"MIT"
] | 2
|
2021-04-28T04:13:09.000Z
|
2021-06-05T04:11:11.000Z
|
akaocr/tools/train_recog.py
|
qai-research/Efficient_Text_Detection
|
e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b
|
[
"MIT"
] | 2
|
2021-05-06T13:49:52.000Z
|
2021-05-14T08:45:13.000Z
|
akaocr/tools/train_recog.py
|
qai-research/Efficient_Text_Detection
|
e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
_____________________________________________________________________________
Created By : Nguyen Huu Kim - Kimnh3
Created Date: Mar 31, 2021 6:31pm GMT+0700
Project : AkaOCR core
_____________________________________________________________________________
This file contain code for train recog
_____________________________________________________________________________
"""
import sys
import torch
sys.path.append("../")
from models.recog.atten import Atten
from engine import Trainer
from engine.config import setup, parse_base
from engine.trainer.loop import CustomLoopHeat, CustomLoopAtten
from engine.build import build_dataloader
from engine.metric.accuracy import RecogAccuracy, DetecAccuracy
from engine.metric.evaluation import DetecEvaluation, RecogEvaluation
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def test_recog(args):
cfg = setup("recog", args)
cfg.SOLVER.DATA_SOURCE = args.data_recog
model = Atten(cfg)
model.to(device=cfg.SOLVER.DEVICE)
evaluate = RecogEvaluation(cfg)
acc = RecogAccuracy(cfg)
lossc = CustomLoopAtten(cfg)
train_loader = build_dataloader(cfg, args.data_recog)
test_loader = build_dataloader(cfg, args.data_test_recog)
trainer = Trainer(cfg, model, train_loader=train_loader, test_loader=test_loader, custom_loop=lossc, accuracy=acc,
evaluation=evaluate, resume=True)
trainer.do_train()
def main():
parser = parse_base()
parser.add_argument('--data_recog', type=str, default="../data/data_recog/train", help='path to recog data')
parser.add_argument('--data_test_recog', type=str, default="../data/data_recog/val", help='path to test recog data')
args = parser.parse_args()
test_recog(args)
if __name__ == '__main__':
main()
| 37.081633
| 120
| 0.764997
|
7950cad0aba338a20fa6a2472a1de11213f1c4d7
| 1,031
|
py
|
Python
|
py/py_0733_ascending_subsequences.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0733_ascending_subsequences.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0733_ascending_subsequences.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
# Solution of;
# Project Euler Problem 733: Ascending subsequences
# https://projecteuler.net/problem=733
#
# Let $a_i$ be the sequence defined by $a_i=153^i \bmod 10\,000\,019$ for $i
# \ge 1$. The first terms of $a_i$ are:$153, 23409, 3581577, 7980255, 976697,
# 9434375, \dots$Consider the subsequences consisting of 4 terms in ascending
# order. For the part of the sequence shown above, these are:$153, 23409,
# 3581577, 7980255$$153, 23409, 3581577, 9434375$$153, 23409, 7980255,
# 9434375$$153, 23409, 976697, 9434375$$153, 3581577, 7980255, 9434375$
# and$23409, 3581577, 7980255, 9434375$. Define $S(n)$ to be the sum of the
# terms for all such subsequences within the first $n$ terms of $a_i$. Thus
# $S(6)=94513710$. You are given that $S(100)=4465488724217$. Find $S(10^6)$
# modulo $1\,000\,000\,007$.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 733
timed.caller(dummy, n, i, prob_id)
| 35.551724
| 78
| 0.685742
|
7950ccac069280e3929efd1c7f46727119645f31
| 4,826
|
py
|
Python
|
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
|
Syndra/Ambari-source
|
717526b2bf3636622212b14de0d3d298a20c7370
|
[
"Apache-2.0"
] | 5
|
2017-07-20T11:15:10.000Z
|
2020-04-16T15:42:55.000Z
|
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
|
Syndra/Ambari-source
|
717526b2bf3636622212b14de0d3d298a20c7370
|
[
"Apache-2.0"
] | 8
|
2020-06-18T17:31:19.000Z
|
2022-03-02T08:32:03.000Z
|
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
|
Syndra/Ambari-source
|
717526b2bf3636622212b14de0d3d298a20c7370
|
[
"Apache-2.0"
] | 12
|
2017-05-17T09:48:01.000Z
|
2021-08-05T19:01:25.000Z
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
from resource_management.core.source import Template
from resource_management.core.logger import Logger
from install_jars import install_tez_jars
from yarn import yarn
from service import service
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class HistoryServer(Script):
def install(self, env):
self.install_packages(env)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service('historyserver', action='stop', serviceName='mapreduce')
def configure(self, env):
import params
env.set_params(params)
yarn(name="historyserver")
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HistoryserverWindows(HistoryServer):
def start(self, env):
import params
env.set_params(params)
self.configure(env)
service('historyserver', action='start', serviceName='mapreduce')
def status(self, env):
service('historyserver', action='status')
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HistoryServerDefault(HistoryServer):
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select_packages(params.version)
# MC Hammer said, "Can't touch this"
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
params.HdfsResource(None, action="execute")
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
if check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.version_for_stack_feature_checks):
# MC Hammer said, "Can't touch this"
resource_created = copy_to_hdfs(
"mapreduce",
params.user_group,
params.hdfs_user,
skip=params.sysprep_skip_copy_tarballs_hdfs)
resource_created = copy_to_hdfs(
"tez",
params.user_group,
params.hdfs_user,
skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
resource_created = copy_to_hdfs(
"slider",
params.user_group,
params.hdfs_user,
skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
if resource_created:
params.HdfsResource(None, action="execute")
else:
# In stack versions before copy_tarball_to_hdfs support tez.tar.gz was copied to a different folder in HDFS.
install_tez_jars()
service('historyserver', action='start', serviceName='mapreduce')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.mapred_historyserver_pid_file)
def get_log_folder(self):
import params
return params.mapred_log_dir
def get_user(self):
import params
return params.mapred_user
if __name__ == "__main__":
HistoryServer().execute()
| 37.123077
| 114
| 0.779113
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.