hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87d492ad79ab7c40cf3c50a41825cf7c13977632 | 720 | py | Python | test.py | glasses-n-contacts/imessage-analyzer | 9c3a933683c21fb4f1e52d0055d9cd89c886b46f | [
"MIT"
] | 5 | 2018-05-20T06:07:36.000Z | 2018-09-24T14:14:48.000Z | test.py | glasses-n-contacts/imessage-analyzer | 9c3a933683c21fb4f1e52d0055d9cd89c886b46f | [
"MIT"
] | 3 | 2018-05-18T05:39:27.000Z | 2018-06-14T14:51:12.000Z | test.py | glasses-n-contacts/imessage-analyzer | 9c3a933683c21fb4f1e52d0055d9cd89c886b46f | [
"MIT"
] | null | null | null | import nltk
import random
from nltk.corpus import movie_reviews
def find_features(document):
words = set(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
random.shuffle(documents)
all_words = []
for w in movie_reviews.words():
all_words.append(w.lower())
all_words = nltk.FreqDist(all_words)
word_features = list(all_words.keys())[:3000]
featuresets = [(find_features(rev), category) for (rev, category) in documents]
for feature in featuresets:
print(feature) | 22.5 | 79 | 0.705556 |
cbfd818f77967d7f959ffee11caf6978dfd8feea | 3,449 | py | Python | hdfs_namenode/datadog_checks/hdfs_namenode/config_models/instance.py | kjmadscience/integrations-core | 663bdf44730dd6c9f3565c121318b320bfcb4988 | [
"BSD-3-Clause"
] | null | null | null | hdfs_namenode/datadog_checks/hdfs_namenode/config_models/instance.py | kjmadscience/integrations-core | 663bdf44730dd6c9f3565c121318b320bfcb4988 | [
"BSD-3-Clause"
] | null | null | null | hdfs_namenode/datadog_checks/hdfs_namenode/config_models/instance.py | kjmadscience/integrations-core | 663bdf44730dd6c9f3565c121318b320bfcb4988 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from __future__ import annotations
from typing import Any, Mapping, Optional, Sequence
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class AuthToken(BaseModel):
class Config:
allow_mutation = False
reader: Optional[Mapping[str, Any]]
writer: Optional[Mapping[str, Any]]
class MetricPatterns(BaseModel):
class Config:
allow_mutation = False
exclude: Optional[Sequence[str]]
include: Optional[Sequence[str]]
class Proxy(BaseModel):
class Config:
allow_mutation = False
http: Optional[str]
https: Optional[str]
no_proxy: Optional[Sequence[str]]
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
allow_redirects: Optional[bool]
auth_token: Optional[AuthToken]
auth_type: Optional[str]
aws_host: Optional[str]
aws_region: Optional[str]
aws_service: Optional[str]
connect_timeout: Optional[float]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
extra_headers: Optional[Mapping[str, Any]]
hdfs_namenode_jmx_uri: str
headers: Optional[Mapping[str, Any]]
kerberos_auth: Optional[str]
kerberos_cache: Optional[str]
kerberos_delegate: Optional[bool]
kerberos_force_initiate: Optional[bool]
kerberos_hostname: Optional[str]
kerberos_keytab: Optional[str]
kerberos_principal: Optional[str]
log_requests: Optional[bool]
metric_patterns: Optional[MetricPatterns]
min_collection_interval: Optional[float]
ntlm_domain: Optional[str]
password: Optional[str]
persist_connections: Optional[bool]
proxy: Optional[Proxy]
read_timeout: Optional[float]
request_size: Optional[float]
service: Optional[str]
skip_proxy: Optional[bool]
tags: Optional[Sequence[str]]
timeout: Optional[float]
tls_ca_cert: Optional[str]
tls_cert: Optional[str]
tls_ignore_warning: Optional[bool]
tls_private_key: Optional[str]
tls_protocols_allowed: Optional[Sequence[str]]
tls_use_host_header: Optional[bool]
tls_verify: Optional[bool]
use_legacy_auth_encoding: Optional[bool]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
| 30.254386 | 110 | 0.722818 |
7bd96baa1a57decf5a762c598ac47e12c23ee21b | 1,040 | py | Python | setup.py | RealCyGuy/random-mitype | 100a1da4fac63d9f4b8c6810fd7186f24db12c19 | [
"MIT"
] | null | null | null | setup.py | RealCyGuy/random-mitype | 100a1da4fac63d9f4b8c6810fd7186f24db12c19 | [
"MIT"
] | null | null | null | setup.py | RealCyGuy/random-mitype | 100a1da4fac63d9f4b8c6810fd7186f24db12c19 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
# exec(open("random_mitype/version.py").read())
setup(
name="random_mitype",
version="0.1.3",
packages=find_packages(),
include_package_data=True,
description="Generate random typing tests for mitype.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Cyrus Yip",
install_requires=["Click"],
entry_points={"console_scripts": ["random-mitype=random_mitype.random_mitype:cli", ], },
license="MIT",
url="https://github.com/RealCyGuy/random-mitype",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Natural Language :: English",
"Topic :: Terminals",
"Environment :: Console :: Curses",
"Development Status :: 4 - Beta",
],
)
| 32.5 | 92 | 0.650962 |
868188d60fd58d03b3ff27af06f52428d63f38c9 | 7,960 | py | Python | tensorflow_hub/keras_layer.py | park0866/hub | 54a242de6e148d0836c4a58308b07bbe79c274b6 | [
"Apache-2.0"
] | 1 | 2019-04-25T09:29:35.000Z | 2019-04-25T09:29:35.000Z | tensorflow_hub/keras_layer.py | AnalystSubranjit/hub | c2ff9d4ae06fe600633694f81b859aead189754c | [
"Apache-2.0"
] | null | null | null | tensorflow_hub/keras_layer.py | AnalystSubranjit/hub | c2ff9d4ae06fe600633694f81b859aead189754c | [
"Apache-2.0"
] | 1 | 2019-06-04T08:48:12.000Z | 2019-06-04T08:48:12.000Z | # Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Keras Layer for using TF Hub modules in TF2 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import six
import tensorflow as tf
from tensorflow_hub import module_v2
# ATTENTION: This file uses private imports from TF2.
# __init__ may not import this file if tensorflow is too old.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import smart_cond
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.util import tf_inspect
# pylint: enable=g-direct-tensorflow-import
class KerasLayer(tf.keras.layers.Layer):
"""Wraps a Hub module (or a similar callable) for TF2 as a Keras Layer.
This layer wraps a callable object for use as a Keras layer. The callable
object can be passed directly, or be specified by a Python string with a
handle that gets passed to `hub.load()`.
The callable object is expected to follow the conventions detailed below.
(These are met by TF2-compatible modules loaded from TensorFlow Hub.)
The callable is invoked with a single positional argument set to one tensor
or a list of tensors containing the inputs to the layer. If the callable
accepts a `training` argument, a Python boolean is passed for it.
If present, the following attributes of callable are understood to have
special meanings:
variables: a list of all tf.Variable objects that the callable depends on.
trainable_variables: those elements of `variables` that are reported
as trainable variables of this Keras Layer.
regularization_losses: a list of callables to be added as losses of this
Keras Layer. Each one must accept zero arguments and return a scalar
tensor.
Note: to work-around missing shape inference functionalities from functions
created from FunctionDefs, in many cases one has to pass an 'output_shape'
and potentially 'input_shape' and 'dtype'. E.g. the following is a typical
work-around:
```
hub.KerasLayer(
"/tmp/text_embedding_model",
output_shape=[20], # Outputs a tensor with shape [batch_size, 20].
input_shape=[], # Expects a tensor of shape [batch_size] as input.
dtype=tf.string) # Expects a tf.string input tensor.
```
Args:
handle: a callable object (subject to the conventions above), or a
Python string for which hub.load() returns such a callable.
A string is required to save the Keras config of this Layer.
trainable: Boolean controlling whether the trainable variables of the
callable are reported as trainable variables of this layer.
arguments: optionally, a dict with additional keyword arguments passed
to the callable. These must be JSON-serializable to save the Keras config
of this layer.
**kwargs: 'output_shape': A tuple with the (possibly partial) output
shape of the callable *without* leading batch size. Other arguments
are pass into the Layer constructor.
"""
def __init__(self, handle, trainable=False, arguments=None, **kwargs):
# Note: for compatibility with keras-model serialization this layer is
# json-serializable. If you add or change arguments here, please also update
# the `get_config` method.
self._handle = handle
# Resolve the handle to a callable `func`.
if callable(handle):
self._func = handle
else:
self._func = module_v2.load(handle)
if not callable(self._func):
raise ValueError("Non-callable result from hub.load('%s')" %
str(handle))
# TODO(b/124219898): We should do shape inference on the callable.
if "output_shape" in kwargs:
self._output_shape = tuple(kwargs.pop("output_shape"))
# Initialize an empty layer, then add_weight() etc. as needed.
super(KerasLayer, self).__init__(trainable=trainable, **kwargs)
# Add trainable and non-trainable weights from the callable.
if hasattr(self._func, "trainable_variables"):
for v in self._func.trainable_variables:
self._add_existing_weight(v, trainable=True)
trainable_variables = set(self._func.trainable_variables)
else:
trainable_variables = set()
if hasattr(self._func, "variables"):
for v in self._func.variables:
if v not in trainable_variables:
self._add_existing_weight(v, trainable=False)
# Forward the callable's regularization losses (if any).
if hasattr(self._func, "regularization_losses"):
for l in self._func.regularization_losses:
if not callable(l):
raise ValueError(
"hub.KerasLayer(obj) expects obj.regularization_losses to be an "
"iterable of callables, each returning a scalar loss term.")
self.add_loss(l) # Supports callables.
# Prepare to call `func`.
self._func_fullargspec = tf_inspect.getfullargspec(self._func.__call__)
self._func_wants_training = (
"training" in self._func_fullargspec.args or
"training" in self._func_fullargspec.kwonlyargs)
if arguments is not None:
self._arguments = arguments
def _add_existing_weight(self, weight, trainable=None):
"""Calls add_weight() to register but not create an existing weight."""
if trainable is None: trainable = weight.trainable
self.add_weight(name=weight.name, shape=weight.shape, dtype=weight.dtype,
trainable=trainable, getter=lambda *_, **__: weight)
def call(self, inputs, training=None):
# We basically want to call this...
kwargs = getattr(self, "_arguments", None)
if kwargs is None:
kwargs = {}
f = functools.partial(self._func, inputs, **kwargs)
# ...but we may also have to pass a Python boolean for `training`.
if not self._func_wants_training:
result = f()
else:
if training is None:
training = tf.keras.backend.learning_phase() # Could be a tensor.
result = smart_cond.smart_cond(training,
lambda: f(training=True),
lambda: f(training=False))
# TODO(b/124219898): Polymorphic function should return shaped tensor.
if hasattr(self, "_output_shape"):
result.set_shape((inputs.shape[0],) + self._output_shape)
return result
def get_config(self):
config = super(KerasLayer, self).get_config()
if not isinstance(self._handle, six.string_types):
raise ValueError(
"Can only generate a valid config for `hub.KerasLayer(handle, ...)`"
"that uses a string `handle`.\n\n"
"Got `type(handle)`: {}".format(type(self._handle)))
config.update({
"handle": self._handle,
})
if hasattr(self, "_output_shape"):
config["output_shape"] = self._output_shape
if hasattr(self, "_arguments"):
# Raise clear errors for non-serializable arguments.
for key, value in self._arguments.items():
try:
json.dumps(value)
except TypeError as e:
raise ValueError(
"`hub.KerasLayer(..., arguments)` contains non json-serializable"
"values in key: {}".format(key))
config["arguments"] = self._arguments
return config
| 41.675393 | 80 | 0.693844 |
9f22b270a1bbffd4ebc8ac3baf6d76324c08b284 | 982 | py | Python | python/custreamz/setup.py | Ahsantw/cudf | e099688d5ca7dd20104930485a829881a68c522a | [
"Apache-2.0"
] | 239 | 2018-10-10T09:55:22.000Z | 2018-10-28T20:47:23.000Z | python/custreamz/setup.py | Ahsantw/cudf | e099688d5ca7dd20104930485a829881a68c522a | [
"Apache-2.0"
] | 25 | 2018-10-10T14:46:32.000Z | 2018-10-28T22:16:14.000Z | python/custreamz/setup.py | Ahsantw/cudf | e099688d5ca7dd20104930485a829881a68c522a | [
"Apache-2.0"
] | 19 | 2018-10-10T12:42:51.000Z | 2018-10-26T16:33:22.000Z | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
from setuptools import find_packages, setup
import versioneer
install_requires = ["cudf_kafka", "cudf"]
setup(
name="custreamz",
version=versioneer.get_version(),
description="cuStreamz - GPU Accelerated Streaming",
url="https://github.com/rapidsai/cudf",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Streaming",
"Topic :: Scientific/Engineering",
"Topic :: Apache Kafka",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=find_packages(include=["custreamz", "custreamz.*"]),
cmdclass=versioneer.get_cmdclass(),
install_requires=install_requires,
zip_safe=False,
extras_require={"test": ["pytest", "pytest-xdist"]},
)
| 30.6875 | 65 | 0.653768 |
128b707f51ad8ce2e249a6747b2ebda1c2899e7e | 6,362 | py | Python | validate.py | sremedios/nifti_image_generator | ffd476fa3d12a2cfb12250d903efd2277dc3426b | [
"MIT"
] | 6 | 2019-07-11T18:00:19.000Z | 2022-01-27T22:50:57.000Z | validate.py | sremedios/nifti_image_generator | ffd476fa3d12a2cfb12250d903efd2277dc3426b | [
"MIT"
] | 1 | 2019-02-26T14:20:32.000Z | 2019-02-26T14:20:32.000Z | validate.py | sremedios/nifti_image_generator | ffd476fa3d12a2cfb12250d903efd2277dc3426b | [
"MIT"
] | 7 | 2019-01-07T19:30:51.000Z | 2021-05-17T19:23:23.000Z | '''
Samuel Remedios
NIH CC CNRM
Predict contrast of an image.
'''
import os
import sys
from tqdm import tqdm
import time
import shutil
import json
from operator import itemgetter
from datetime import datetime
import numpy as np
from sklearn.utils import shuffle
from utils.load_data import load_data, load_image, load_slice_data
from utils.utils import now, parse_args, get_classes, record_results
from utils.preprocess import preprocess_dir
from utils.patch_ops import load_patch_data
from keras.models import load_model, model_from_json
from keras import backend as K
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if __name__ == '__main__':
############### DIRECTORIES ###############
results = parse_args("validate")
NUM_GPUS = 1
if results.GPUID == None:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
elif results.GPUID == -1:
NUM_GPUS = 3
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID)
VAL_DIR = os.path.abspath(os.path.expanduser(results.VAL_DIR))
CUR_DIR = os.path.abspath(
os.path.expanduser(
os.path.dirname(__file__)
)
)
PREPROCESSED_DIR = os.path.join(VAL_DIR, "preprocess")
if not os.path.exists(PREPROCESSED_DIR):
os.makedirs(PREPROCESSED_DIR)
############### MODEL SELECTION ###############
with open(results.model) as json_data:
model = model_from_json(json.load(json_data))
model.load_weights(results.weights)
############### PREPROCESSING ###############
classes = results.classes.replace(" ", "").split(',')
preprocess_dir(VAL_DIR,
PREPROCESSED_DIR,
classes,
results.numcores)
# get class encodings
class_encodings = get_classes(classes)
print(class_encodings)
############### DATA IMPORT ###############
patch_size = tuple([int(x) for x in results.patch_size.split('x')])
X, y, filenames, num_classes, img_shape = load_patch_data(PREPROCESSED_DIR,
patch_size=patch_size,
num_patches=results.num_patches,
classes=classes)
############### PREDICT ###############
PRED_DIR = results.OUT_DIR
if not os.path.exists(PRED_DIR):
os.makedirs(PRED_DIR)
BATCH_SIZE = 2**10
# make predictions with best weights and save results
preds = model.predict(X, batch_size=BATCH_SIZE, verbose=1)
# track overall accuracy
acc_count = len(set(filenames))
unsure_count = 0
total = len(set(filenames))
total_sure_only = len(set(filenames))
print("PREDICTION COMPLETE")
############### AGGREGATE PATCHES ###############
print("AGGREGATING RESULTS")
# initialize aggregate
final_pred_scores = {}
final_ground_truth = {}
pred_shape = preds[0].shape
for filename in tqdm(set(filenames)):
final_pred_scores[filename] = np.zeros(pred_shape)
# posslby faster, must unit test
from itertools import groupby
TOTAL_ELEMENTS = len(set(filenames))
final_pred_scores = {k: v for k, v in
(tqdm(map(lambda pair: (pair[0],
np.mean([p[1] for p in pair[1]], axis=0)),
groupby(zip(filenames, preds), lambda i: i[0])),
total=TOTAL_ELEMENTS))}
print("Num filenames: {}".format(len(filenames)))
print("Num preds: {}".format(len(preds)))
print("Shape of y: {}".format(y.shape))
for i in tqdm(range(len(preds))):
final_ground_truth[filenames[i]] = y[i]
print("RECORDING RESULTS")
############### RECORD RESULTS ###############
# mean of all values must be above 80
surety_threshold = .80
with open(os.path.join(PRED_DIR, now()+"_results.txt"), 'w') as f:
with open(os.path.join(PRED_DIR, now()+"_results_errors.txt"), 'w') as e:
for filename, pred in final_pred_scores.items():
surety = np.max(pred) - np.min(pred)
# check for surety
if surety < surety_threshold:
pos = "??" # unknown
f.write("UNSURE for {:<10} with {:<50}".format(
pos, filename))
unsure_count += 1
total_sure_only -= 1
acc_count -= 1
f.write("{:<10}\t{:<50}".format(pos, filename))
confidences = ", ".join(
["{:>5.2f}".format(x*100) for x in pred])
f.write("Confidences: {}\n".format(confidences))
else:
# find class of prediction via max
max_idx, max_val = max(enumerate(pred), key=itemgetter(1))
max_true, val_true = max(
enumerate(final_ground_truth[filename]), key=itemgetter(1))
pos = class_encodings[max_idx]
# record confidences
confidences = ", ".join(
["{:>5.2f}".format(x*100) for x in pred])
if max_idx == max_true:
f.write("CORRECT for {:<10} with {:<50}".format(
pos, filename))
else:
f.write("INCRRCT for {:<10} {:<50}".format(
pos, filename))
e.write("{:<10}\t{:<50}".format(pos, filename))
e.write("Confidences: {}\n".format(confidences))
acc_count -= 1
f.write("Confidences: {}\n".format(confidences))
f.write("{} of {} images correctly classified.\nUnsure Number: {}\nAccuracy: {:.2f}\nAccuracy Excluding Unsure: {:.2f}".format(
str(acc_count),
str(total),
str(unsure_count),
acc_count/total * 100.,
acc_count/total_sure_only * 100.,))
print("{} of {} images correctly classified.\nAccuracy: {:.2f}\n".format(
str(acc_count),
str(total),
acc_count/total * 100.))
# prevent small crash from TensorFlow/Keras session close bug
K.clear_session()
| 34.576087 | 139 | 0.537881 |
42f8dcdc22cc1848bc6cbb3935aed936af95e074 | 5,750 | py | Python | qa/rpc-tests/test_framework/socks5.py | arthurgarcia/cruzeta | 53f1bf16704afbb2ad2e78bfb1c9ed0b722ba7ed | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/socks5.py | arthurgarcia/cruzeta | 53f1bf16704afbb2ad2e78bfb1c9ed0b722ba7ed | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/socks5.py | arthurgarcia/cruzeta | 53f1bf16704afbb2ad2e78bfb1c9ed0b722ba7ed | [
"MIT"
] | null | null | null | # Copyright (c) 2015 The Bitcoin Core developers
# Copyright (c) 2017-2018 The CruZeta developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Dummy Socks5 server for testing.
'''
from __future__ import print_function, division, unicode_literals
import socket, threading, Queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = str(recvall(self.conn, n))
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception,e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = Queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| 35.493827 | 125 | 0.574435 |
90cbddb3ef2859242eadd416e33109a9f0d66b6d | 3,208 | py | Python | kpe/BertKPE/bertkpe/networks/Roberta2Tag.py | thunlp/COVID19IRQA | fe359ce12ce38fd74ccc004cc524ec6011580023 | [
"MIT"
] | 32 | 2020-03-26T17:03:54.000Z | 2021-09-10T08:30:48.000Z | kpe/BertKPE/bertkpe/networks/Roberta2Tag.py | thunlp/COVID19IRQA | fe359ce12ce38fd74ccc004cc524ec6011580023 | [
"MIT"
] | 1 | 2020-04-06T16:35:12.000Z | 2020-04-13T07:08:14.000Z | kpe/BertKPE/bertkpe/networks/Roberta2Tag.py | thunlp/COVID19IRQA | fe359ce12ce38fd74ccc004cc524ec6011580023 | [
"MIT"
] | 6 | 2020-03-28T05:07:22.000Z | 2021-03-04T01:46:00.000Z | import torch
import logging
import numpy as np
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from ..transformers import RobertaForTokenClassification
logger = logging.getLogger()
class RobertaForSeqTagging(RobertaForTokenClassification):
def forward(self, input_ids, attention_mask, valid_ids, active_mask, valid_output, labels=None):
# --------------------------------------------------------------------------------
# Bert Embedding Outputs
outputs = self.roberta(input_ids=input_ids,
attention_mask=attention_mask)
sequence_output = outputs[0]
# --------------------------------------------------------------------------------
# Valid Outputs : get first token vector
batch_size = sequence_output.size(0)
for i in range(batch_size):
valid_num = sum(valid_ids[i]).item()
vectors = sequence_output[i][valid_ids[i] == 1]
valid_output[i, :valid_num].copy_(vectors)
# --------------------------------------------------------------------------------
# Dropout
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
# --------------------------------------------------------------------------------
# Active Logits
active_loss = active_mask.view(-1) == 1 # [False, True, ...]
active_logits = logits.view(-1, self.num_labels)[active_loss] # False
if labels is not None:
loss_fct = CrossEntropyLoss()
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
return loss
else:
return active_logits
# class BertForSeqTagging(BertForTokenClassification):
# def forward(self, input_ids, attention_mask, valid_ids, active_mask, labels=None):
# outputs = self.bert(input_ids=input_ids,
# attention_mask=attention_mask)
# sequence_output = outputs[0]
# batch_size, max_len, feature_dim = sequence_output.shape
# valid_output = torch.zeros(batch_size, max_len, feature_dim,
# dtype=torch.float32, device='cuda')
# # get valid outputs : first tokens
# for i in range(batch_size):
# k = -1
# for j in range(max_len):
# if valid_ids[i][j].item() == 1:
# k += 1
# valid_output[i][k] = sequence_output[i][j]
# sequence_output = self.dropout(valid_output)
# logits = self.classifier(sequence_output)
# active_loss = active_mask.view(-1) == 1 # [False, True, ...]
# active_logits = logits.view(-1, self.num_labels)[active_loss] # False
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# active_labels = labels.view(-1)[active_loss]
# loss = loss_fct(active_logits, active_labels)
# return loss
# else:
# return active_logits | 39.604938 | 100 | 0.525873 |
c53de294624ca76064953734c3d429a4be9be00c | 4,021 | py | Python | mainwin.py | mmahnic/libreoffice-review | 658722cbb0e14e574e4867cfd0ea06a000bc4989 | [
"MIT"
] | 2 | 2018-12-11T19:02:38.000Z | 2019-09-12T06:21:58.000Z | mainwin.py | mmahnic/git-libreoffice-review | 658722cbb0e14e574e4867cfd0ea06a000bc4989 | [
"MIT"
] | null | null | null | mainwin.py | mmahnic/git-libreoffice-review | 658722cbb0e14e574e4867cfd0ea06a000bc4989 | [
"MIT"
] | null | null | null | import os, re
import gitjobs
import mainwin_ui_support as support
import textreport
from generator import DiffGeneratorSettings, DiffGenerator, OverviewGenerator
from odt import OdtGenerator
from tktext import TkTextGenerator
from settings import globalSettings, APPTITLE
def setupSupport():
"""Replace the globals in mainwin_ui_support with globals from this module
so that ui_support can be freely regenerated."""
support.generateDiffDocument = generateDiffDocumentCb
support.addBranchDiffFromCommonAncestor = addBranchDiffFromCommonAncestorCb
support.displayDiffPreview = displayDiffPreviewCb
support.init = onInit
def onInit(top, gui, *args, **kwargs):
# global w, top_level, root
support.w = gui
support.top_level = top
support.root = top
gui.top_level = top
prepareMainWindow(gui)
def prepareMainWindow(gui):
frame = gui.top_level
gitroot = globalSettings.gitRoot()
branches, curBranch = gitjobs.getBranches( gitroot )
title = "{} - {} ({})".format(
APPTITLE,
os.path.basename(gitroot),
os.path.dirname(gitroot))
frame.title(title)
gui.comboBaseBranch.configure(values=branches)
gui.comboToBranch.configure(values=branches)
if "develop" in branches:
gui.varBaseBranch.set( "develop" )
elif "master" in branches:
gui.varBaseBranch.set( "master" )
if curBranch not in ["develop", "master"]:
gui.varToBranch.set( curBranch )
# TODO: default ignore patterns should be read from a config file
ignored = [ "*.sln", "*.vcxproj", "*.filters", "*.svg", "*.rc", "**/autogen/**",
"*.odt", "*.fodt", "*.odg", "*.fodg" ]
gui.txtFilters.insert( 1.0, "\n".join(ignored) )
def _findCommitIdForName( lines ):
commitId = ""
for l in lines:
if l.find("..") > 0:
commitId = l.split("..")[1].strip(". \t")
break
if len(commitId) < 1:
for l in lines:
if len(l.split()) > 1:
commitId = l.split()[1]
break
return re.sub( "[^a-zA-Z0-9]+", "_", commitId )
def updateDocumentNameCb(gui):
txtIds = gui.txtCommitIds
text = txtIds.get( "1.0", "end-1c" ).strip()
lines = [ l.strip() for l in text.split("\n") if len(l.strip()) > 0 ]
commitId = _findCommitIdForName( lines )
repo = os.path.basename(globalSettings.gitRoot())
repo = re.sub( "[^a-zA-Z0-9]+", "_", repo )
if len(commitId) > 0:
gui.varName.set( "{}-{}".format(repo, commitId) )
else:
gui.varName.set( "{}".format(repo) )
def generateDiffDocumentCb(gui):
settings = DiffGeneratorSettings.fromGuiFields(gui)
settings.rootDir = globalSettings.gitRoot()
diffcmd = DiffGenerator(settings)
overviewCmd = OverviewGenerator(settings)
diffgen = OdtGenerator(settings)
diffgen.writeDocument( diffcmd, overviewCmd )
def displayDiffPreviewCb(gui):
settings = DiffGeneratorSettings.fromGuiFields(gui)
settings.rootDir = globalSettings.gitRoot()
(reportFrame, reportGui) = textreport.getOrCreateTextReport(gui.top_level)
reportGui.top_level.title( "Diff Preview" )
diffcmd = DiffGenerator(settings)
overviewCmd = OverviewGenerator(settings)
tkText = reportGui.txtReport
tkText.delete( 0.0, "end" )
diffgen = TkTextGenerator(settings, tkText)
diffgen.writeDocument( diffcmd, overviewCmd )
def addBranchDiffFromCommonAncestorCb(gui):
def fixBranch( branch ):
return "HEAD" if len(branch.strip()) == 0 else branch.strip()
fromBranch = fixBranch(gui.varBaseBranch.get())
toBranch = fixBranch(gui.varToBranch.get())
txtIds = gui.txtCommitIds
text = txtIds.get( "1.0", "end-1c" ).strip()
lines = text.split( "\n" ) if len(text) > 0 else []
lines.append( "{}...{}".format( fromBranch, toBranch ) )
txtIds.delete( 0.0, "end" )
txtIds.insert( 0.0, "\n".join( lines ))
if len(gui.varName.get().strip()) < 1:
updateDocumentNameCb(gui)
| 30.462121 | 84 | 0.654812 |
2646b2c12ecfe4c10d1164ec6c74806e16e9413a | 907 | py | Python | rviz/src/test/send_polygon.py | romi2002/rviz | 8b2fcc1838e079d0e365894abd7cfd7b255b8d8b | [
"BSD-3-Clause-Clear"
] | 9 | 2017-12-17T07:43:15.000Z | 2021-10-10T15:03:39.000Z | rviz/src/test/send_polygon.py | romi2002/rviz | 8b2fcc1838e079d0e365894abd7cfd7b255b8d8b | [
"BSD-3-Clause-Clear"
] | 2 | 2019-02-12T21:55:08.000Z | 2019-02-20T01:01:24.000Z | rviz/src/test/send_polygon.py | romi2002/rviz | 8b2fcc1838e079d0e365894abd7cfd7b255b8d8b | [
"BSD-3-Clause-Clear"
] | 9 | 2018-09-09T20:48:17.000Z | 2021-03-11T11:22:52.000Z | #!/usr/bin/env python
import roslib; roslib.load_manifest('rviz')
from geometry_msgs.msg import PolygonStamped
from geometry_msgs.msg import Point32
import math
import rospy
topic = 'test_polygon'
publisher = rospy.Publisher(topic, PolygonStamped)
rospy.init_node('send_polygon')
t = 0
while not rospy.is_shutdown():
p = PolygonStamped()
p.header.frame_id = "/base_link"
p.header.stamp = rospy.Time.now()
dr = 0.5 * math.cos( t )
radii = [ 1-dr, 1+dr ]
radius_index = 0
num_points = 10
for i in range( 0, num_points ):
point = Point32()
radius = radii[ radius_index ]
radius_index = (radius_index + 1) % 2
point.x = radius * math.cos( i * 2 * math.pi / num_points )
point.y = radius * math.sin( i * 2 * math.pi / num_points )
point.z = 0
p.polygon.points.append( point )
publisher.publish( p )
t += .1
rospy.sleep(0.03)
| 23.25641 | 65 | 0.647189 |
b0aaf7380acacebcdfdc3606adbd3211bfa9837a | 28,459 | py | Python | BERT_NER_copy2.py | zjsuper/cancerber_ner | e84e70ea08c267310ebd7acdf127d5357bfd63a4 | [
"Apache-2.0"
] | null | null | null | BERT_NER_copy2.py | zjsuper/cancerber_ner | e84e70ea08c267310ebd7acdf127d5357bfd63a4 | [
"Apache-2.0"
] | null | null | null | BERT_NER_copy2.py | zjsuper/cancerber_ner | e84e70ea08c267310ebd7acdf127d5357bfd63a4 | [
"Apache-2.0"
] | null | null | null | #! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
# Copyright 2018 The Google AI Language Team Authors.
# Copyright 2019 The BioNLP-HZAU Kaiyin Zhou
# Time:2019/04/08
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
from absl import flags,logging
from bert import modeling
from bert import optimization
from bert import tokenization
import tensorflow as tf
import metrics
import numpy as np
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
# if you download cased checkpoint you should use "False",if uncased you should use
# "True"
# if we used in bio-medical field,don't do lower case would be better!
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_string("middle_output", "middle_data", "Dir was used to store middle data!")
flags.DEFINE_bool("crf", True, "use crf!")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text = text
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
mask,
segment_ids,
label_ids,
is_real_example=True):
self.input_ids = input_ids
self.mask = mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_data(cls,input_file):
"""Read a BIO data!"""
rf = open(input_file,'r')
lines = [];words = [];labels = []
for line in rf:
word = line.strip().split(' ')[0]
label = line.strip().split(' ')[-1]
# here we dont do "DOCSTART" check
if len(line.strip())==0 and words[-1] == '.':
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append((l,w))
words=[]
labels = []
words.append(word)
labels.append(label)
rf.close()
return lines
class NerProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "train.txt")), "train"
)
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev"
)
def get_test_examples(self,data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "test.txt")), "test"
)
def get_labels(self):
"""
here "X" used to represent "##eer","##soo" and so on!
"[PAD]" for padding
:return:
"""
return ["[PAD]","B-stage","B-grade-value","I-htype-value", "B-laterality-value", "B-size-value","B-htype", "B-stage-value", "B-grade", "I-site-value","B-receptor","B-site","B-laterality","O","B-site-value","B-htype-value","B-size","B-receptor-status","I-size-value","I-receptor", "X","[CLS]","[SEP]"]
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
texts = tokenization.convert_to_unicode(line[1])
labels = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text=texts, label=labels))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, mode):
"""
:param ex_index: example num
:param example:
:param label_list: all labels
:param max_seq_length:
:param tokenizer: WordPiece tokenization
:param mode:
:return: feature
IN this part we should rebuild input sentences to the following format.
example:[Jim,Hen,##son,was,a,puppet,##eer]
labels: [I-PER,I-PER,X,O,O,O,X]
"""
label_map = {}
#here start with zero this means that "[PAD]" is zero
for (i,label) in enumerate(label_list):
label_map[label] = i
with open(FLAGS.middle_output+"/label2id.pkl",'wb') as w:
pickle.dump(label_map,w)
textlist = example.text.split(' ')
labellist = example.label.split(' ')
tokens = []
labels = []
for i,(word,label) in enumerate(zip(textlist,labellist)):
token = tokenizer.tokenize(word)
tokens.extend(token)
for i,_ in enumerate(token):
if i==0:
labels.append(label)
else:
labels.append("X")
# only Account for [CLS] with "- 1".
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 1)]
labels = labels[0:(max_seq_length - 1)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
label_ids.append(label_map["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label_map[labels[i]])
# after that we don't add "[SEP]" because we want a sentence don't have
# stop tag, because i think its not very necessary.
# or if add "[SEP]" the model even will cause problem, special the crf layer was used.
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
mask = [1]*len(input_ids)
#use zero to padding and you should
while len(input_ids) < max_seq_length:
input_ids.append(0)
mask.append(0)
segment_ids.append(0)
label_ids.append(0)
ntokens.append("[PAD]")
assert len(input_ids) == max_seq_length
assert len(mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(ntokens) == max_seq_length
if ex_index < 3:
logging.info("*** Example ***")
logging.info("guid: %s" % (example.guid))
logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in mask]))
logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
mask=mask,
segment_ids=segment_ids,
label_ids=label_ids,
)
# we need ntokens because if we do predict it can help us return to original token.
return feature,ntokens,label_ids
def filed_based_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_file,mode=None):
writer = tf.python_io.TFRecordWriter(output_file)
batch_tokens = []
batch_labels = []
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature,ntokens,label_ids = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer, mode)
batch_tokens.extend(ntokens)
batch_labels.extend(label_ids)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["mask"] = create_int_feature(feature.mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
# sentence token in each batch
writer.close()
return batch_tokens,batch_labels
def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
batch_size = params["batch_size"]
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder
))
return d
return input_fn
# all above are related to data preprocess
# Following i about the model
#def hidden2tag(hiddenlayer,numclass):
# bilstm = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128,return_sequences = True))
# linear = tf.keras.layers.Dense(numclass,activation=None)
# hiddenlayer = bilstm(hiddenlayer)
# hiddenlayer = linear(hiddenlayer)
# return hiddenlayer
def hidden2tag(hiddenlayer,numclass):
linear = tf.keras.layers.Dense(numclass,activation=None)
return linear(hiddenlayer)
def crf_loss(logits,labels,mask,num_labels,mask2len):
"""
:param logits:
:param labels:
:param mask2len:each sample's length
:return:
"""
#TODO
with tf.variable_scope("crf_loss"):
trans = tf.get_variable(
"transition",
shape=[num_labels,num_labels],
initializer=tf.contrib.layers.xavier_initializer()
)
log_likelihood,transition = tf.contrib.crf.crf_log_likelihood(logits,labels,transition_params =trans ,sequence_lengths=mask2len)
loss = tf.math.reduce_mean(-log_likelihood)
return loss,transition
def softmax_layer(logits,labels,num_labels,mask):
logits = tf.reshape(logits, [-1, num_labels])
labels = tf.reshape(labels, [-1])
mask = tf.cast(mask,dtype=tf.float32)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
loss = tf.losses.softmax_cross_entropy(logits=logits,onehot_labels=one_hot_labels)
loss *= tf.reshape(mask, [-1])
loss = tf.reduce_sum(loss)
total_size = tf.reduce_sum(mask)
total_size += 1e-12 # to avoid division by 0 for all-0 weights
loss /= total_size
# predict not mask we could filtered it in the prediction part.
probabilities = tf.math.softmax(logits, axis=-1)
predict = tf.math.argmax(probabilities, axis=-1)
return loss, predict
def create_model(bert_config, is_training, input_ids, mask,
segment_ids, labels, num_labels, use_one_hot_embeddings):
model = modeling.BertModel(
config = bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings
)
output_layer = model.get_sequence_output()
#output_layer shape is
if is_training:
output_layer = tf.keras.layers.Dropout(rate=0.1)(output_layer)
logits = hidden2tag(output_layer,num_labels)
# TODO test shape
logits = tf.reshape(logits,[-1,FLAGS.max_seq_length,num_labels])
if FLAGS.crf:
mask2len = tf.reduce_sum(mask,axis=1)
loss, trans = crf_loss(logits,labels,mask,num_labels,mask2len)
predict,viterbi_score = tf.contrib.crf.crf_decode(logits, trans, mask2len)
return (loss, logits,predict)
else:
loss,predict = softmax_layer(logits, labels, num_labels, mask)
return (loss, logits, predict)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
logging.info("*** Features ***")
for name in sorted(features.keys()):
logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
mask = features["mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if FLAGS.crf:
(total_loss, logits,predicts) = create_model(bert_config, is_training, input_ids,
mask, segment_ids, label_ids,num_labels,
use_one_hot_embeddings)
else:
(total_loss, logits, predicts) = create_model(bert_config, is_training, input_ids,
mask, segment_ids, label_ids,num_labels,
use_one_hot_embeddings)
tvars = tf.trainable_variables()
scaffold_fn = None
initialized_variable_names=None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(label_ids, logits,num_labels,mask):
predictions = tf.math.argmax(logits, axis=-1, output_type=tf.int32)
cm = metrics.streaming_confusion_matrix(label_ids, predictions, num_labels-1, weights=mask)
return {
"confusion_matrix":cm
}
#
eval_metrics = (metric_fn, [label_ids, logits, num_labels, mask])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predicts, scaffold_fn=scaffold_fn
)
return output_spec
return model_fn
def _write_base(batch_tokens,id2label,prediction,batch_labels,wf,i):
token = batch_tokens[i]
predict = id2label[prediction]
true_l = id2label[batch_labels[i]]
if token!="[PAD]" and token!="[CLS]" and true_l!="X":
#
if predict=="X" and not predict.startswith("##"):
predict="O"
line = "{}\t{}\t{}\n".format(token,true_l,predict)
wf.write(line)
def Writer(output_predict_file,result,batch_tokens,batch_labels,id2label):
with open(output_predict_file,'w') as wf:
if FLAGS.crf:
predictions = []
for m,pred in enumerate(result):
predictions.extend(pred)
for i,prediction in enumerate(predictions):
_write_base(batch_tokens,id2label,prediction,batch_labels,wf,i)
else:
for i,prediction in enumerate(result):
_write_base(batch_tokens,id2label,prediction,batch_labels,wf,i)
def main(_):
logging.set_verbosity(logging.INFO)
processors = {"ner": NerProcessor}
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
_,_ = filed_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
logging.info("***** Running training *****")
logging.info(" Num examples = %d", len(train_examples))
logging.info(" Batch size = %d", FLAGS.train_batch_size)
logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
batch_tokens,batch_labels = filed_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
logging.info("***** Running evaluation *****")
logging.info(" Num examples = %d", len(eval_examples))
logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# if FLAGS.use_tpu:
# eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
# eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
result = estimator.evaluate(input_fn=eval_input_fn)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with open(output_eval_file,"w") as wf:
logging.info("***** Eval results *****")
confusion_matrix = result["confusion_matrix"]
p,r,f = metrics.calculate(confusion_matrix,len(label_list)-1)
logging.info("***********************************************")
logging.info("********************P = %s*********************", str(p))
logging.info("********************R = %s*********************", str(r))
logging.info("********************F = %s*********************", str(f))
logging.info("***********************************************")
if FLAGS.do_predict:
with open(FLAGS.middle_output+'/label2id.pkl', 'rb') as rf:
label2id = pickle.load(rf)
id2label = {value: key for key, value in label2id.items()}
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
batch_tokens,batch_labels = filed_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
logging.info("***** Running prediction*****")
logging.info(" Num examples = %d", len(predict_examples))
logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "label_test.txt")
#here if the tag is "X" means it belong to its before token, here for convenient evaluate use
# conlleval.pl we discarding it directly
Writer(output_predict_file,result,batch_tokens,batch_labels,id2label)
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 40.539886 | 309 | 0.628167 |
0ae3c9a212fa13441a6eb86078deb45ac3a9be2e | 546 | py | Python | backend-services/Models/byteToAudio.py | Off-Top-App/off-top-python | af51a7da0e52d6ad978835cb05986d7c2a917861 | [
"bzip2-1.0.6"
] | 3 | 2019-12-01T23:09:12.000Z | 2020-12-22T03:02:37.000Z | backend-services/Models/byteToAudio.py | Off-Top-App/off-top-python | af51a7da0e52d6ad978835cb05986d7c2a917861 | [
"bzip2-1.0.6"
] | 5 | 2020-03-05T17:17:12.000Z | 2020-06-16T07:02:27.000Z | backend-services/Models/byteToAudio.py | Off-Top-App/off-top-python | af51a7da0e52d6ad978835cb05986d7c2a917861 | [
"bzip2-1.0.6"
] | 1 | 2020-05-18T12:57:14.000Z | 2020-05-18T12:57:14.000Z |
import scipy.io.wavfile
import numpy as np
import soundfile as sf
# scipy writes the bytes to audio file using numpyarray. Here 16 denotes sample rate (samples/sec)
# opened a file containg byte strings and then converted them to a numpyarray and to .wav file
def convert_bytes_to_audio():
f = open("test.txt","r")
for line in f:
scipy.io.wavfile.write('format.wav',16, np.frombuffer(str.encode(line),dtype ='B'))
f.close()
def main():
convert_bytes_to_audio()
if __name__ == '__main__':
convert_bytes_to_audio()
| 27.3 | 98 | 0.716117 |
a8e57319beb6f7b2f1e471080bebb96136d420ad | 4,395 | py | Python | metrics_v2/handler/alerts_test.py | yeqingli/ml-testing-accelerators | 5bf6322e5eca258317dcdaef26c070d988b055a0 | [
"Apache-2.0"
] | 36 | 2020-03-04T15:22:44.000Z | 2022-03-31T19:00:58.000Z | metrics_v2/handler/alerts_test.py | yeqingli/ml-testing-accelerators | 5bf6322e5eca258317dcdaef26c070d988b055a0 | [
"Apache-2.0"
] | 144 | 2020-03-05T21:22:21.000Z | 2022-03-31T19:31:25.000Z | metrics_v2/handler/alerts_test.py | ultrons/ml-testing-accelerators | d33a18e1782875dfe647c149553d54cc0c5e88cd | [
"Apache-2.0"
] | 30 | 2020-03-10T17:56:40.000Z | 2022-03-31T19:10:25.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from datetime import datetime
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import pytz
import alerts
import metrics_pb2
LOGS_LINK = """https://console.cloud.google.com/logs?project=xl-ml-test&advancedFilter=resource.type%3Dk8s_container%0Aresource.labels.project_id%3Dxl-ml-test%0Aresource.labels.location=us-central1-b%0Aresource.labels.cluster_name=xl-ml-test%0Aresource.labels.namespace_name=automated%0Aresource.labels.pod_name:pt-1.5-fs-transformer-functional-v3-8-1585756800&extra_junk&dateRangeUnbound=backwardInTime"""
WORKLOAD_LINK = """https://console.cloud.google.com/kubernetes/job/us-central1-b/xl-ml-test/automated/pt-1.5-fs-transformer-functional-v3-8-1585756800?project=xl-ml-test&pt-1.5-fs-transformer-functional-v3-8-1585756800_events_tablesize=50&tab=events&duration=P30D&pod_summary_list_tablesize=20&service_list_datatablesize=20"""
class AlertHandlerTest(parameterized.TestCase):
def setUp(self):
self._logger = logging.get_absl_logger()
self._handler = alerts.AlertHandler(
project_id='my-project-id',
benchmark_id='benchmark-id',
debug_info=None,
)
self._logger.addHandler(self._handler)
def tearDown(self):
self._logger.removeHandler(self._handler)
@parameterized.named_parameters(
('debug', 'DEBUG', logging.debug),
('info', 'INFO', logging.info),
('warning', 'WARNING', logging.warning),
)
def test_log_no_email(self, log_level, log_method):
self._handler.setLevel('ERROR')
with self.assertLogs(level=log_level) as cm:
log_method('log message')
self.assertEqual(cm.output, [f'{log_level}:absl:log message'])
self.assertEmpty(self._handler._records)
@parameterized.named_parameters(
('error', 'ERROR', logging.error),
('fatal', 'CRITICAL', logging.fatal),
)
def test_log_with_email(self, log_level, log_method):
self._handler.setLevel('ERROR')
with self.assertLogs() as cm:
log_method('msg1')
log_method('msg2')
log_method('msg3')
self.assertEqual(cm.output, [
f'{log_level}:absl:msg1',
f'{log_level}:absl:msg2',
f'{log_level}:absl:msg3',
])
subject, body = self._handler.generate_email_content()
self.assertIn('msg1', body.content)
self.assertIn('msg2', body.content)
self.assertIn('msg3', body.content)
def test_has_errors_when_empty(self):
self.assertFalse(self._handler.has_errors)
self._handler.generate_email_content()
def test_generate_email_body_with_debug_info(self):
self._handler._debug_info = metrics_pb2.DebugInfo(
logs_link=LOGS_LINK,
details_link=WORKLOAD_LINK,
)
logging.error('error_message')
_, body = self._handler.generate_email_content()
self.assertIn('error_message', body.content)
self.assertIn(LOGS_LINK, body.content)
self.assertIn(WORKLOAD_LINK, body.content)
def test_generate_email_subject(self):
subject = self._handler.generate_email_content()[0].subject
date_str = subject[subject.find('202'):] # Grab substring from start of year onward.
tz = pytz.timezone('US/Pacific')
sj_date = tz.localize(datetime.strptime(date_str, '%Y/%m/%d %H:%M:%S'))
after_call = datetime.now(tz)
# This checks that the datetime string used in the email is 1. using
# current time and 2. is using US/Pacific tz and not e.g. UTC.
self.assertTrue((after_call - sj_date).total_seconds() < 2.0)
self.assertIn('benchmark-id', subject)
def test_no_html_injection(self):
injection_str = '<marquee>HTML injection is fun!</marquee>'
logging.error(injection_str)
_, body = self._handler.generate_email_content()
self.assertNotIn(injection_str, body.content)
if __name__ == '__main__':
absltest.main()
| 39.241071 | 406 | 0.732878 |
cc42e1bcd70bfe5fd717b0039e288701aad4dfcf | 2,184 | py | Python | src/utils/train_utils.py | RayWilliam46/FineTune-DistilBERT | 09cbed8f58ba65cb75610f72a2196e88e0dabc59 | [
"Apache-2.0"
] | 13 | 2021-05-06T17:31:26.000Z | 2022-03-28T04:55:37.000Z | src/utils/train_utils.py | RayWilliam46/FineTune-DistilBERT | 09cbed8f58ba65cb75610f72a2196e88e0dabc59 | [
"Apache-2.0"
] | null | null | null | src/utils/train_utils.py | RayWilliam46/FineTune-DistilBERT | 09cbed8f58ba65cb75610f72a2196e88e0dabc59 | [
"Apache-2.0"
] | 7 | 2021-07-06T18:46:54.000Z | 2022-03-28T04:55:47.000Z | def batch_encode(tokenizer, texts, batch_size=256, max_length=params['MAX_LENGTH']):
"""""""""
A function that encodes a batch of texts and returns the texts'
corresponding encodings and attention masks that are ready to be fed
into a pre-trained transformer model.
Input:
- tokenizer: Tokenizer object from the PreTrainedTokenizer Class
- texts: List of strings where each string represents a text
- batch_size: Integer controlling number of texts in a batch
- max_length: Integer controlling max number of words to tokenize in a given text
Output:
- input_ids: sequence of texts encoded as a tf.Tensor object
- attention_mask: the texts' attention mask encoded as a tf.Tensor object
"""""""""
input_ids = []
attention_mask = []
for i in range(0, len(texts), batch_size):
batch = texts[i:i+batch_size]
inputs = tokenizer.batch_encode_plus(batch,
max_length=max_length,
padding='longest', #implements dynamic padding
truncation=True,
return_attention_mask=True,
return_token_type_ids=False
)
input_ids.extend(inputs['input_ids'])
attention_mask.extend(inputs['attention_mask'])
return tf.convert_to_tensor(input_ids), tf.convert_to_tensor(attention_mask)
def focal_loss(gamma=params['FL_GAMMA'], alpha=params['FL_ALPHA']):
"""""""""
Function that computes the focal loss.
Code adapted from https://gist.github.com/mkocabas/62dcd2f14ad21f3b25eac2d39ec2cc95
"""""""""
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
| 45.5 | 131 | 0.592033 |
4dfaded092c5c406695432d03d27fad6f3a2d560 | 23,065 | py | Python | miseq_portal/miseq_viewer/models.py | BFSSI-Bioinformatics-Lab/miseq_portal | 79a0dea0e35a07d86e044acc39c9407cdc128428 | [
"MIT"
] | null | null | null | miseq_portal/miseq_viewer/models.py | BFSSI-Bioinformatics-Lab/miseq_portal | 79a0dea0e35a07d86e044acc39c9407cdc128428 | [
"MIT"
] | null | null | null | miseq_portal/miseq_viewer/models.py | BFSSI-Bioinformatics-Lab/miseq_portal | 79a0dea0e35a07d86e044acc39c9407cdc128428 | [
"MIT"
] | null | null | null | import logging
from pathlib import Path
from typing import Optional
import pandas as pd
from dataclasses import dataclass
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from config.settings.base import MEDIA_ROOT
from miseq_portal.core.models import TimeStampedModel
from miseq_portal.users.models import User
logger = logging.getLogger('django')
def validate_sample_id(value: str, length: int = 15):
"""
Strict validation of BMH Sample ID
:param value: sample_id
:param length: expected length of string
"""
components = value.split("-")
if len(value) != length:
raise ValidationError(f"Sample ID '{value}' does not meet the expected length of 15 characters. "
f"Sample ID must be in the following format: 'BMH-2018-000001'")
if len(components) != 3:
raise ValidationError(f"Sample ID '{value}' does not appear to meet expected format. "
f"Sample ID must be in the following format: 'BMH-2018-000001'")
elif components[0] != 'BMH' and components[0] != 'MER' and components[0] != 'EXT':
raise ValidationError(
f"TEXT component of Sample ID ('{components[0]}') does not equal expected 'BMH', 'MER', or 'EXT'")
elif not components[1].isdigit() or len(components[1]) != 4:
raise ValidationError(f"YEAR component of Sample ID ('{components[1]}') does not equal expected 'YYYY' format")
elif not components[2].isdigit() or len(components[2]) != 6:
raise ValidationError(f"ID component of Sample ID ('{components[2]}') does not equal expected 'XXXXXX' format")
return True
def upload_run_file(instance, filename: str):
"""instance must be Run"""
if instance.run_type == "BMH":
return f'uploads/runs/{instance.run_id}/{filename}'
elif instance.run_type == "EXT":
return f'external_samples/runs/{instance.run_id}/{filename}'
def upload_interop_file(instance, filename: str):
"""instance must be RunInterOpData"""
if instance.run_id.run_type == "BMH":
return f'uploads/runs/{instance.run_id}/InterOp/{filename}'
elif instance.run_id.run_type == "EXT":
return f'external_samples/runs/{instance.run_id}/InterOp/{filename}'
def upload_interop_dir(instance):
"""instance must be Run"""
if instance.run_type == "BMH":
return f'uploads/runs/{instance.run_id}/InterOp/'
elif instance.run_type == "EXT":
return f'external_samples/runs/{instance.run_id}/InterOp/'
def upload_reads(instance, filename: str):
"""instance must be Sample"""
if instance.sample_type == 'BMH':
return f'uploads/runs/{instance.run_id}/{instance.sample_id}/{filename}'
elif instance.sample_type == 'MER':
return f'merged_samples/{instance.sample_id}/{filename}'
elif instance.sample_type == 'EXT':
return f'external_samples/runs/{instance.run_id}/{instance.sample_id}/{filename}'
def upload_assembly(instance, filename: str):
"""instance must be SampleAssemblyData"""
if instance.sample_id.sample_type == 'BMH':
return f'uploads/runs/{instance.sample_id.run_id}/{instance.sample_id}/assembly/{filename}'
elif instance.sample_id.sample_type == 'MER':
return f'merged_samples/{instance.sample_id}/assembly/{filename}'
elif instance.sample_id.sample_type == 'EXT':
return f'external_samples/runs/{instance.sample_id.run_id}/{instance.sample_id}/assembly/{filename}'
def upload_merged_sample(instance, filename: str):
"""Deprecated, replaced by upload_reads, can't delete due to it being stuck in earlier migrations"""
return f'uploads/merged_samples/{instance.sample_id}/{filename}'
@dataclass
class SampleDataObject:
"""
Dataclass to store metadata for a sample which will eventually be passed to the Sample and SampleLogData models
"""
# Must be instantiated with these attributes
sample_id: str
run_id: str
project_id: str
sample_name: str
# Updated later in the lifecycle
sample_type: str = None # i.e. BMH, EXT
sequencing_type: str = None # i.e. META, WGS, RNA, AMP
fwd_read_path: Path = None
rev_read_path: Path = None
number_reads: int = None
sample_yield: int = None
r1_qualityscoresum: int = None
r2_qualityscoresum: int = None
r1_trimmedbases: int = None
r2_trimmedbases: int = None
r1_yield: int = None
r2_yield: int = None
r1_yieldq30: int = None
r2_yieldq30: int = None
@dataclass
class RunDataObject:
"""
Dataclass to store metadata for a run
"""
run_id: str
run_type: str = None
interop_dir: Path = None
sample_sheet: Path = None
json_stats_file: Path = None
runinfoxml: Path = None
runparametersxml: Path = None
control_metrics: Path = None
correctedintmetrics: Path = None
errormetrics: Path = None
extractionmetrics: Path = None
indexmetrics: Path = None
qmetrics2030: Path = None
qmetricsbylane: Path = None
qmetrics: Path = None
tilemetrics: Path = None
logfiles: list = None
@dataclass
class MinIONRunDataObject:
"""
Dataclass to store metadata for a run
"""
run_id: str
run_type: str = None
sample_sheet: Path = None
# Create your models here.
class Project(TimeStampedModel):
"""
Samples can belong to Projects. Used to help organize sample data.
"""
project_id = models.CharField(max_length=256, unique=True)
project_owner = models.ForeignKey(User, on_delete=models.CASCADE)
# TODO: Consider adding flags for viral, prokaryotic, eukaryotic, metagenomic, mixed sample types
def last_updated(self) -> str:
""" Finds the most recently created Sample object belonging to this project """
samples = Sample.objects.filter(project_id=self.id).order_by('-created')
return samples[0].modified
@property
def num_samples(self):
return len(Sample.objects.filter(project_id=self.pk))
def __str__(self):
return self.project_id
def __len__(self):
return self.num_samples
class Meta:
verbose_name = 'Project'
verbose_name_plural = 'Projects'
class UserProjectRelationship(TimeStampedModel):
"""
Stores relationship between a Project and User, as well as their access level for the project
"""
ACCESS_LEVELS = (
('MANAGER', 'Manager'), # Delete, modify
('ADMIN', 'Admin'), # Same as MANAGER
('USER', 'User'), # View
('NONE', 'None'), # No access
)
project_id = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='relationship_project', null=True)
user_id = models.ForeignKey(User, on_delete=models.CASCADE, related_name='relationship_user', null=True)
access_level = models.CharField(max_length=32, choices=ACCESS_LEVELS, default='NONE')
def __str__(self):
return str(self.project_id) + ' : ' + str(self.user_id) + ' : ' + str(self.access_level)
class Meta:
verbose_name = 'User Project Relationship'
verbose_name_plural = 'User Project Relationships'
class Run(TimeStampedModel):
"""
Stores information relating to a single sequencing run
"""
run_id = models.CharField(max_length=256, unique=True)
sample_sheet = models.FileField(upload_to=upload_run_file, blank=True, max_length=1000)
runinfoxml = models.FileField(upload_to=upload_run_file, blank=True, null=True, max_length=1000)
runparametersxml = models.FileField(upload_to=upload_run_file, blank=True, null=True, max_length=1000)
interop_directory_path = models.CharField(unique=True, blank=True, null=True, max_length=1000)
# run_type indicates whether the Sample was generated by the BMH sequencing lab or by an external lab
RUN_TYPES = (
('BMH', 'BMH'),
('EXT', 'EXT'),
)
run_type = models.CharField(max_length=3, choices=RUN_TYPES, default="BMH")
def get_interop_directory(self) -> Path:
return Path(self.interop_directory_path)
@property
def run_url(self) -> str:
return reverse('miseq_viewer:miseq_viewer_run_detail', args=(self.pk,))
@property
def num_samples(self) -> int:
return len(Sample.objects.filter(run_id=self.pk))
def __str__(self):
return str(self.run_id)
def __len__(self):
return self.num_samples
class Meta:
verbose_name = 'Run'
verbose_name_plural = 'Runs'
class RunSamplesheet(TimeStampedModel):
run_id = models.OneToOneField(Run, on_delete=models.CASCADE, primary_key=True)
# Attempts to capture all possible fields within the [Header] section of a MiSeq or iSeq samplesheet
iemfileversion = models.CharField(blank=True, max_length=1028)
local_run_manager_analysis_id = models.CharField(blank=True, max_length=1028)
investigator_name = models.CharField(blank=True, max_length=1028)
experiment_name = models.CharField(blank=True, max_length=1028)
samplesheet_date = models.CharField(blank=True, max_length=1028)
workflow = models.CharField(blank=True, max_length=1028)
date = models.CharField(blank=True, max_length=1028)
instrument_type = models.CharField(blank=True, max_length=1028)
module = models.CharField(blank=True, max_length=1028)
library_prep_kit = models.CharField(blank=True, max_length=1028)
application = models.CharField(blank=True, max_length=1028)
assay = models.CharField(blank=True, max_length=1028)
index_adapters = models.CharField(blank=True, max_length=1028)
description = models.CharField(blank=True, max_length=1028)
chemistry = models.CharField(blank=True, max_length=1028)
# Capture the [Settings] section
# TODO: Populate these values post-hoc
reversecomplement = models.CharField(blank=True, max_length=256)
adapter = models.CharField(blank=True, max_length=256)
def get_fields(self):
return [(field.name, field.value_to_string(self)) for field in RunSamplesheet._meta.fields]
class Meta:
verbose_name = 'Run Samplesheet'
verbose_name_plural = 'Run Samplesheets'
def __str__(self):
return str(self.run_id)
class RunInterOpData(TimeStampedModel):
"""
Stores metadata on a MiSeq run derived from the Illumnina InterOp files
"""
run_id = models.OneToOneField(Run, on_delete=models.CASCADE, primary_key=True)
control_metrics = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
correctedintmetrics = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
errormetrics = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
extractionmetrics = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
indexmetrics = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
qmetrics2030 = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
qmetricsbylane = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
qmetrics = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
tilemetrics = models.FileField(upload_to=upload_interop_file, blank=True, null=True, max_length=1000)
def __str__(self):
return str(self.run_id) + "_InterOp"
class Meta:
verbose_name = 'Run InterOp Data'
verbose_name_plural = 'Run InterOp Data'
class MergedSampleComponentGroup(models.Model):
"""
Model for reference by MergedSample.
Maintains the linkage between a MergedSample and its constituent Sample objects via MergedSampleComponent
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
@property
def get_components(self):
components = MergedSampleComponent.objects.filter(group_id=self.pk)
return components
def __str__(self):
return f"{str(self.pk)} - {self.created.date()} - " + ", ".join(
[component.component_id.sample_id for component in self.get_components])
class Meta:
verbose_name = 'Merged Sample Component Group'
verbose_name_plural = 'Merged Sample Component Groups'
class Sample(TimeStampedModel):
"""
Stores basic information relating to a single BMH sample (i.e. R1, R2, corresponding assembly, etc.)
- Must follow the XXX-YYYY-ZZZZZZ format, e.g. "BMH-2018-000001", "MER-2019-000004"
- Each Sample can be associated with a Project and Run
- sample_type controls how the sample will be handled within the analysis/assembly pipelines + upload destination
- component_group is used to track the relationship between a merged sample and its constituent components
"""
sample_id = models.CharField(max_length=15, unique=True, validators=[validate_sample_id])
sample_name = models.TextField(blank=True)
# The sample_type and component_group fields exist to accommodate merged samples
sample_type_choices = (
('BMH', 'BMH'),
('MER', 'MERGED'),
('EXT', 'EXTERNAL')
)
sample_type = models.CharField(choices=sample_type_choices, max_length=3, default='BMH')
# sequencing_type is used to track the sample sequencing type which can be used to determine assembly method
SEQUENCING_TYPES = (
('WGS', 'Whole-Genome Sequence'),
('META', 'Metagenomic Sequence'),
('RNA', 'RNA-Seq'),
('AMP', 'Amplicon')
)
sequencing_type = models.CharField(max_length=32, choices=SEQUENCING_TYPES, default="WGS")
component_group = models.ForeignKey(MergedSampleComponentGroup, on_delete=models.CASCADE, blank=True, null=True)
# All BMH samples must be associated with a Project and Run
project_id = models.ForeignKey(Project, on_delete=models.CASCADE, blank=True, null=True)
run_id = models.ForeignKey(Run, on_delete=models.CASCADE, blank=True, null=True)
# Read upload location varies depending on sample_type
fwd_reads = models.FileField(upload_to=upload_reads, blank=True, max_length=1000)
rev_reads = models.FileField(upload_to=upload_reads, blank=True, max_length=1000)
hide_flag = models.BooleanField(default=False) # Activate this to hide the sample from view for regular users
additional_notes = models.TextField(blank=True)
def generate_sample_id(self):
"""
This method must be used for EXT or MER samples. First, instantiate the object, then call this method and assign
the generated value to Sample.sample_id
"""
return f'{self.sample_type}-{self.sample_year}-{self.pk:06}'
@property
def sample_year(self):
return str(self.created.year)
def __str__(self):
return self.sample_id
class Meta:
verbose_name = 'Sample'
verbose_name_plural = 'Samples'
class SampleSheetSampleData(TimeStampedModel):
"""
Model to store data from a SampleSheet for a Sample. Must be populated after the Sample has been instantiated.
This model was built with a different design pattern than the others, with a 'fat model' approach where much of the
logic is baked in. All new models going forward should follow this pattern.
- Note that all data is pulled from the SampleSheet.csv
- Columns for SampleSheets are not always the same - model attempts to accommodate all possibilities, but it
may need to be expanded
"""
sample_id = models.OneToOneField(Sample, on_delete=models.CASCADE, primary_key=True)
# SampleSheet data
sample_name = models.TextField(blank=True)
sample_plate = models.TextField(blank=True)
sample_well = models.TextField(blank=True)
i7_index_id = models.TextField(blank=True)
index = models.TextField(blank=True)
i5_index_id = models.TextField(blank=True)
index2 = models.TextField(blank=True)
sample_project = models.TextField(blank=True)
description = models.TextField(blank=True)
@staticmethod
def read_samplesheet(samplesheet: Path) -> pd.DataFrame:
"""
Reads SampleSheet.csv and returns dataframe (all header information will be stripped)
:param samplesheet: Path to SampleSheet.csv
:return: pandas df of SampleSheet.csv with head section stripped away
"""
counter = 1
with open(str(samplesheet)) as f:
for line in f:
if '[Data]' in line:
break
else:
counter += 1
df = pd.read_csv(samplesheet, sep=",", index_col=False, skiprows=counter)
# Force Sample_Name and Sample_Project into str types
df['Sample_Name'] = df['Sample_Name'].astype(str)
df['Sample_Project'] = df['Sample_Project'].astype(str)
# Fill in missing projects
df['Sample_Project'] = df['Sample_Project'].replace(r"\s+", "MISSING_PROJECT", regex=True)
df['Sample_Project'] = df['Sample_Project'].fillna(value="MISSING_PROJECT")
return df
def extract_sample_row_from_samplesheet(self, samplesheet: Path) -> Optional[pd.DataFrame]:
""" Given the [Data] section of a SampleSheet as a DataFrame, will filter to row for sample """
# Filter df to only our row of interest
df = self.read_samplesheet(samplesheet=samplesheet)
if self.sample_id.sample_type != 'BMH':
sample_id = self.sample_id.sample_name
else:
sample_id = str(self.sample_id)
df_ = df[df['Sample_ID'] == sample_id]
# If the df is empty, try to find the ID according to Sample_Name
if len(df_) == 0:
df_ = df[df['Sample_Name'] == sample_id]
if len(df_) == 0:
logger.warning(f"Could not detect '{sample_id}' in provided DataFrame")
return None
return df_
@property
def attribute_dict(self):
""" Stores relationship between Model field names and column names from SampleSheet """
attribute_dict = {
'sample_name': 'Sample_Name',
'sample_plate': 'Sample_Plate',
'sample_well': 'Sample_Well',
'i7_index_id': 'I7_Index_ID',
'index': 'index',
'i5_index_id': 'I5_Index_ID',
'index2': 'index2',
'sample_project': 'Sample_Project',
'description': 'Description',
}
return attribute_dict
def samplesheet_row_to_dict(self, row: pd.DataFrame) -> dict:
# These keys should correspond 1:1 with this model's CharFields
value_dict = {attr: '' for attr in self.attribute_dict.keys()}
# Populate value_dict which stores the values from the row in a dictionary with relevant model fields as keys
for attr, col in self.attribute_dict.items():
value_dict[attr] = row[col].values[0]
return value_dict
def __str__(self):
return str(self.sample_id)
class Meta:
verbose_name = 'SampleSheet Sample Data'
verbose_name_plural = 'SampleSheet Sample Data'
class MergedSampleComponent(models.Model):
"""
Model to store the relationship between a Sample (i.e. a "component") and a MergedSampleComponentGroup
"""
component_id = models.ForeignKey(Sample, on_delete=models.CASCADE)
group_id = models.ForeignKey(MergedSampleComponentGroup, on_delete=models.CASCADE)
def __str__(self):
return f"{self.component_id} ({self.group_id})"
class Meta:
verbose_name = 'Merged Sample Component'
verbose_name_plural = 'Merged Sample Components'
class SampleLogData(TimeStampedModel):
"""
Stores Sample metadata derived from Stats.json, a file generated by BaseSpace for a MiSeq run
"""
sample_id = models.OneToOneField(Sample, on_delete=models.CASCADE, primary_key=True)
number_reads = models.BigIntegerField(blank=True, null=True)
sample_yield = models.BigIntegerField(blank=True, null=True)
# R1
r1_qualityscoresum = models.BigIntegerField(blank=True, null=True)
r1_trimmedbases = models.BigIntegerField(blank=True, null=True)
r1_yield = models.BigIntegerField(blank=True, null=True)
r1_yieldq30 = models.BigIntegerField(blank=True, null=True)
# R2
r2_qualityscoresum = models.BigIntegerField(blank=True, null=True)
r2_trimmedbases = models.BigIntegerField(blank=True, null=True)
r2_yield = models.BigIntegerField(blank=True, null=True)
r2_yieldq30 = models.BigIntegerField(blank=True, null=True)
@property
def sample_yield_mbp(self):
if self.sample_yield is not None:
return float(self.sample_yield / 1000000)
def __str__(self):
return str(self.sample_id)
class Meta:
verbose_name = 'Sample Log Data'
verbose_name_plural = 'Sample Log Data'
class SampleAssemblyData(TimeStampedModel):
"""
Stores metadata on a Sample assembly.
"""
sample_id = models.OneToOneField(Sample, on_delete=models.CASCADE, primary_key=True)
assembly = models.FileField(blank=True, max_length=512)
# Assembly metrics
num_contigs = models.IntegerField(blank=True, null=True)
largest_contig = models.BigIntegerField(blank=True, null=True)
total_length = models.BigIntegerField(blank=True, null=True)
gc_percent = models.FloatField(blank=True, null=True)
n50 = models.BigIntegerField(blank=True, null=True)
num_predicted_genes = models.BigIntegerField(blank=True, null=True)
mean_coverage = models.TextField(blank=True, null=True)
std_coverage = models.TextField(blank=True, null=True)
# Pipeline versioning
bbduk_version = models.TextField(blank=True)
bbmap_version = models.TextField(blank=True)
tadpole_version = models.TextField(blank=True)
skesa_version = models.TextField(blank=True)
pilon_version = models.TextField(blank=True)
quast_version = models.TextField(blank=True)
def get_assembly_path(self) -> Path:
"""
Returns the expected assembly path, if it exists
"""
assembly_path = MEDIA_ROOT / Path(str(self.assembly))
if self.assembly_exists():
return assembly_path
else:
raise FileNotFoundError(f"Assembly at {self.assembly} for {self.sample_id} does not exist!")
def assembly_exists(self) -> bool:
"""
Returns True if assembly file exists, False if not
"""
assembly_path = MEDIA_ROOT / Path(str(self.assembly))
if not assembly_path.exists() or self.assembly is None or str(self.assembly) == "":
return False
elif assembly_path.exists():
return True
def __str__(self):
return str(self.sample_id)
class Meta:
verbose_name = 'Sample Assembly Data'
verbose_name_plural = 'Sample Assembly Data'
| 39.027073 | 120 | 0.693041 |
9b999268e54b2b40f8961017c9663bc71bd01c02 | 1,240 | py | Python | django_project/virtualornithology/timelines/views.py | carnby/aurora | f31e3de28694fd5763643d90ee91e99a55d4ec97 | [
"MIT"
] | 8 | 2016-02-25T13:48:10.000Z | 2020-06-06T07:23:30.000Z | django_project/virtualornithology/timelines/views.py | carnby/aurora | f31e3de28694fd5763643d90ee91e99a55d4ec97 | [
"MIT"
] | null | null | null | django_project/virtualornithology/timelines/views.py | carnby/aurora | f31e3de28694fd5763643d90ee91e99a55d4ec97 | [
"MIT"
] | 2 | 2016-03-07T11:54:59.000Z | 2020-06-06T07:23:31.000Z | from django.shortcuts import render, render_to_response, redirect
from django.template import RequestContext
from user_agents import parse as agentparse
from virtualornithology.birds.views import check_referer
from virtualornithology.interactions.views import prepare_session
from .models import Timeline
def last_timeline_json(request):
check_referer(request)
latest = Timeline.objects.all().order_by('-datetime')[0]
return render_to_response('json.html', {'json': latest.json}, context_instance=RequestContext(request), content_type='application/json')
def timeline_home(request, timeline_id=None):
ua = agentparse(request.META.get('HTTP_USER_AGENT', ''))
print(ua)
print(request.GET)
if ua.is_bot:
return render_to_response('timelines/bots.html', context_instance=RequestContext(request))
session = request.session
print(session, session.session_key, session.items())
prepare_session(request, 'all', 'aurora')
return render_to_response('timelines/timeline-baseline.html', {
'timeline_home_tweets': 10,
'record_interactions': True,
'current_app': 'aurora',
'client_datetime_var': 'client_datetime'
}, context_instance=RequestContext(request))
| 37.575758 | 140 | 0.754839 |
7e3b56e040b826b250ca9f3d052f9474f3b23628 | 6,089 | py | Python | modules.py | ifoyooo/ml-network | 15028fdfdf74540ae96ede4bb7c0f6fd62feb5e2 | [
"MIT"
] | null | null | null | modules.py | ifoyooo/ml-network | 15028fdfdf74540ae96ede4bb7c0f6fd62feb5e2 | [
"MIT"
] | null | null | null | modules.py | ifoyooo/ml-network | 15028fdfdf74540ae96ede4bb7c0f6fd62feb5e2 | [
"MIT"
] | null | null | null | '''
完成网络层
'''
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
import torch.nn.functional as F
import numpy as np
#不变层。
__all__ = ['Identity','Embedding', 'LSTMEncoder', 'MLAttention', 'AttentionWeights', 'FastMLAttention', 'MLLinear']
class Identity(nn.Module):
def forward(self, x):
return x
class Embedding(nn.Module):
"""
"""
def __init__(self, vocab_size=None, emb_size=None, emb_init=None, emb_trainable=True, padding_idx=0, dropout=0.2):
super(Embedding, self).__init__()
if emb_init is not None:
if vocab_size is not None:
assert vocab_size == emb_init.shape[0]
if emb_size is not None:
assert emb_size == emb_init.shape[1]
vocab_size, emb_size = emb_init.shape
self.emb = nn.Embedding(vocab_size, emb_size, padding_idx=padding_idx, sparse=True,
_weight=torch.from_numpy(emb_init).float() if emb_init is not None else None)
self.emb.weight.requires_grad = emb_trainable
self.dropout = nn.Dropout(dropout)
self.padding_idx = padding_idx
def forward(self, inputs):
emb_out = self.dropout(self.emb(inputs))
lengths, masks = (inputs != self.padding_idx).sum(dim=-1), inputs != self.padding_idx
return emb_out[:, :lengths.max()], lengths, masks[:, :lengths.max()]
class LSTMEncoder(nn.Module):
"""
"""
def __init__(self, input_size, hidden_size, layers_num, dropout):
super(LSTMEncoder, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, layers_num, batch_first=True, bidirectional=True)
self.init_state = nn.Parameter(torch.zeros(2*2*layers_num, 1, hidden_size))
self.dropout = nn.Dropout(dropout)
def forward(self, inputs, lengths, **kwargs):
self.lstm.flatten_parameters()
init_state = self.init_state.repeat([1, inputs.size(0), 1])
cell_init, hidden_init = init_state[:init_state.size(0)//2], init_state[init_state.size(0)//2:]
idx = torch.argsort(lengths, descending=True)
packed_inputs = nn.utils.rnn.pack_padded_sequence(inputs[idx], lengths[idx], batch_first=True)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
self.lstm(packed_inputs, (hidden_init, cell_init))[0], batch_first=True)
return self.dropout(outputs[torch.argsort(idx)])
###
class MLAttention(nn.Module):
"""
"""
def __init__(self, labels_num, hidden_size):
super(MLAttention, self).__init__()
self.attention = nn.Linear(hidden_size, labels_num, bias=False)
nn.init.xavier_uniform_(self.attention.weight)
# input:N,l,h->N,h,l
def forward(self, inputs, masks):
masks = torch.unsqueeze(masks, 1) # N, 1, L
attention = self.attention(inputs).transpose(1, 2).masked_fill(1.0 - masks, -np.inf) # N, labels_num, L
attention = F.softmax(attention, -1)
return attention @ inputs # N, labels_num, hidden_size
class AttentionWeights(nn.Module):
"""
"""
def __init__(self, labels_num, hidden_size, device_ids=None):
super(AttentionWeights, self).__init__()
if device_ids is None:
device_ids = list(range(1, torch.cuda.device_count()))
assert labels_num >= len(device_ids)
group_size, plus_num = labels_num // len(device_ids), labels_num % len(device_ids)
self.group = [group_size + 1] * plus_num + [group_size] * (len(device_ids) - plus_num)
assert sum(self.group) == labels_num
self.emb = nn.ModuleList(nn.Embedding(size, hidden_size, sparse=True).cuda(device_ids[i])
for i, size in enumerate(self.group))
std = (6.0 / (labels_num + hidden_size)) ** 0.5
with torch.no_grad():
for emb in self.emb:
emb.weight.data.uniform_(-std, std)
self.group_offset, self.hidden_size = np.cumsum([0] + self.group), hidden_size
def forward(self, inputs: torch.Tensor):
outputs = torch.zeros(*inputs.size(), self.hidden_size, device=inputs.device)
for left, right, emb in zip(self.group_offset[:-1], self.group_offset[1:], self.emb):
index = (left <= inputs) & (inputs < right)
group_inputs = (inputs[index] - left).to(emb.weight.device)
outputs[index] = emb(group_inputs).to(inputs.device)
return outputs
class FastMLAttention(nn.Module):
"""
"""
def __init__(self, labels_num, hidden_size, parallel_attn=False):
super(FastMLAttention, self).__init__()
if parallel_attn:
self.attention = nn.Embedding(labels_num + 1, hidden_size, sparse=True)
nn.init.xavier_uniform_(self.attention.weight)
def forward(self, inputs, masks, candidates, attn_weights: nn.Module):
masks = torch.unsqueeze(masks, 1) # N, 1, L
attn_inputs = inputs.transpose(1, 2) # N, hidden, L
attn_weights = self.attention(candidates) if hasattr(self, 'attention') else attn_weights(candidates)
attention = (attn_weights @ attn_inputs).masked_fill(1.0 - masks, -np.inf) # N, sampled_size, L
attention = F.softmax(attention, -1) # N, sampled_size, L
return attention @ inputs # N, sampled_size, hidden_size
# 多层线性感知机 效果不好
class MLLinear(nn.Module):
def __init__(self, linear_size :list, output_size:int):
super(MLLinear, self).__init__()
self.linear = nn.ModuleList(nn.Linear(in_s, out_s)
for in_s, out_s in zip(linear_size[:-1], linear_size[1:]))
for linear in self.linear:
nn.init.xavier_uniform_(linear.weight)
self.output = nn.Linear(linear_size[-1], output_size)
nn.init.xavier_uniform_(self.output.weight)
def forward(self, inputs):
linear_out = torch.flatten(inputs,start_dim=1)
for linear in self.linear:
linear_out = F.relu(linear(linear_out))
return torch.squeeze(self.output(linear_out), -1)
| 41.141892 | 118 | 0.645262 |
6b6660922a612ff2150e02971c2f1863a23dc52f | 84 | py | Python | ejercicios_python/Clase08/practica8-6.py | hcgalvan/UNSAM-Python-programming | c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f | [
"MIT"
] | null | null | null | ejercicios_python/Clase08/practica8-6.py | hcgalvan/UNSAM-Python-programming | c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f | [
"MIT"
] | null | null | null | ejercicios_python/Clase08/practica8-6.py | hcgalvan/UNSAM-Python-programming | c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 18:12:59 2021
@author: User
"""
| 10.5 | 35 | 0.559524 |
5e3475e829103f36719c3d5b86748ae52b34c9b6 | 93 | py | Python | Katna/__init__.py | viddik13/katna | 12256602a5fd24368ffffe2c1a82a46a49215c15 | [
"MIT"
] | 125 | 2019-08-22T06:53:55.000Z | 2022-03-24T05:53:41.000Z | Katna/__init__.py | viddik13/katna | 12256602a5fd24368ffffe2c1a82a46a49215c15 | [
"MIT"
] | 19 | 2020-02-13T07:14:59.000Z | 2021-12-01T15:13:33.000Z | Katna/__init__.py | viddik13/katna | 12256602a5fd24368ffffe2c1a82a46a49215c15 | [
"MIT"
] | 28 | 2019-09-03T07:00:29.000Z | 2021-12-30T04:20:14.000Z | from Katna.video import Video
from Katna.image import Image
from .version import __version__
| 23.25 | 32 | 0.83871 |
183dc74c777b7056734435310b1269d124c0e995 | 346 | py | Python | src/sentry/analytics/events/sentry_app_token_exchanged.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/analytics/events/sentry_app_token_exchanged.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/analytics/events/sentry_app_token_exchanged.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from sentry import analytics
class SentryAppTokenExchangedEvent(analytics.Event):
type = "sentry_app.token_exchanged"
attributes = (
analytics.Attribute("sentry_app_installation_id"),
analytics.Attribute("exchange_type"),
)
analytics.register(SentryAppTokenExchangedEvent)
| 21.625 | 58 | 0.768786 |
7b43ffee87d181efff419e361a959387756b244b | 3,662 | py | Python | adapters/actuators/roboteq_command_wrapper_examples.py | diydsp/thirtybirds3.0 | 8d57c73f1c6597a3a5dddaaaca07511eaa2adaf8 | [
"MIT"
] | 2 | 2020-05-13T02:53:02.000Z | 2021-03-21T05:54:53.000Z | adapters/actuators/roboteq_command_wrapper_examples.py | diydsp/thirtybirds3.0 | 8d57c73f1c6597a3a5dddaaaca07511eaa2adaf8 | [
"MIT"
] | null | null | null | adapters/actuators/roboteq_command_wrapper_examples.py | diydsp/thirtybirds3.0 | 8d57c73f1c6597a3a5dddaaaca07511eaa2adaf8 | [
"MIT"
] | 1 | 2021-05-06T18:42:41.000Z | 2021-05-06T18:42:41.000Z |
import queue
import threading
import roboteq_macro_functions as roboteq
config = {
"boards":{
"300:1058:3014688:1429493507:540422710":{},
"300:1058:2031663:1429493506:540422710":{},
},
"motors":{
"pitch_slider":{
"mcu_id":"300:1058:2031663:1429493506:540422710",
"channel":"1",
},
"bow_position_slider":{
"mcu_id":"300:1058:2031663:1429493506:540422710",
"channel":"2",
},
"bow_height":{
"mcu_id":"300:1058: 3014688:1429493507:540422710",
"channel":"1",
},
"bow_rotation":{
"mcu_id":"300:1058:3014688:1429493507:540422710",
"channel":"2",
}
}
}
class Status_Receiver(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.queue = queue.Queue()
def add_to_queue(self, name, value):
self.queue.put((name, value))
def run(self):
while True:
name, value = self.queue.get(True)
print("status",name, value)
status_receiver = Status_Receiver()
status_receiver.start()
class Roboteq_Data_Receiver(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.queue = queue.Queue()
self.start()
def add_to_queue(self, message):
self.queue.put(message)
def run(self):
while True:
message = self.queue.get(True)
print("data",message)
if "internal_event" in message:
do_tests()
data_receiver = Data_Receiver()
data_receiver.start()
class Exception_Receiver(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.queue = queue.Queue()
def add_to_queue(self, *args):
print(1, args)
self.queue.put(args)
def run(self):
while True:
message = self.queue.get(True)
print("exception",message)
exception_receiver = Exception_Receiver()
exception_receiver.start()
controllers = roboteq.init(
data_receiver.add_to_queue,
status_receiver.add_to_queue,
exception_receiver.add_to_queue,
config
)
def do_tests():
for board_name in controllers.boards:
controllers.boards[board_name].set_serial_data_watchdog(0)
controllers.motors["pitch_slider"].go_to_speed_or_relative_position(200)
controllers.motors["bow_position_slider"].go_to_speed_or_relative_position(200)
controllers.motors["bow_height"].go_to_speed_or_relative_position(200)
controllers.motors["bow_rotation"].go_to_speed_or_relative_position(200)
time.sleep(5)
controllers.motors["pitch_slider"].go_to_speed_or_relative_position(00)
controllers.motors["bow_position_slider"].go_to_speed_or_relative_position(00)
controllers.motors["bow_height"].go_to_speed_or_relative_position(00)
controllers.motors["bow_rotation"].go_to_speed_or_relative_position(00)
time.sleep(5)
controllers.motors["pitch_slider"].go_to_speed_or_relative_position(-200)
controllers.motors["bow_position_slider"].go_to_speed_or_relative_position(-200)
controllers.motors["bow_height"].go_to_speed_or_relative_position(-200)
controllers.motors["bow_rotation"].go_to_speed_or_relative_position(-200)
time.sleep(5)
controllers.motors["pitch_slider"].go_to_speed_or_relative_position(00)
controllers.motors["bow_position_slider"].go_to_speed_or_relative_position(00)
controllers.motors["bow_height"].go_to_speed_or_relative_position(0)
controllers.motors["bow_rotation"].go_to_speed_or_relative_position(0)
time.sleep(5)
| 32.122807 | 84 | 0.67941 |
09688f44b7a3643d913c596d88fae688a06c9e46 | 3,600 | py | Python | tests/test_rand_spatial_crop_samplesd.py | charliebudd/MONAI | 9f4da6acded249bba24c85eaee4ece256ed45815 | [
"Apache-2.0"
] | 1 | 2021-04-13T08:10:28.000Z | 2021-04-13T08:10:28.000Z | tests/test_rand_spatial_crop_samplesd.py | mxochicale/MONAI | d72f61ea9f73821602f507b5e80a97c3c6937e05 | [
"Apache-2.0"
] | null | null | null | tests/test_rand_spatial_crop_samplesd.py | mxochicale/MONAI | d72f61ea9f73821602f507b5e80a97c3c6937e05 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import RandSpatialCropSamplesd
TEST_CASE_1 = [
{"keys": ["img", "seg"], "num_samples": 4, "roi_size": [2, 2, 2], "random_center": True},
{"img": np.arange(81).reshape(3, 3, 3, 3), "seg": np.arange(81, 0, -1).reshape(3, 3, 3, 3)},
[(3, 3, 3, 2), (3, 2, 2, 2), (3, 3, 3, 2), (3, 3, 2, 2)],
{
"img": np.array(
[
[[[0, 1], [3, 4]], [[9, 10], [12, 13]], [[18, 19], [21, 22]]],
[[[27, 28], [30, 31]], [[36, 37], [39, 40]], [[45, 46], [48, 49]]],
[[[54, 55], [57, 58]], [[63, 64], [66, 67]], [[72, 73], [75, 76]]],
]
),
"seg": np.array(
[
[[[81, 80], [78, 77]], [[72, 71], [69, 68]], [[63, 62], [60, 59]]],
[[[54, 53], [51, 50]], [[45, 44], [42, 41]], [[36, 35], [33, 32]]],
[[[27, 26], [24, 23]], [[18, 17], [15, 14]], [[9, 8], [6, 5]]],
]
),
},
]
TEST_CASE_2 = [
{"keys": ["img", "seg"], "num_samples": 8, "roi_size": [2, 2, 3], "random_center": False},
{"img": np.arange(81).reshape(3, 3, 3, 3), "seg": np.arange(81, 0, -1).reshape(3, 3, 3, 3)},
[(3, 3, 3, 3), (3, 2, 3, 3), (3, 2, 2, 3), (3, 2, 3, 3), (3, 3, 3, 3), (3, 3, 3, 3), (3, 2, 2, 3), (3, 3, 2, 3)],
{
"img": np.array(
[
[[[0, 1, 2], [3, 4, 5]], [[9, 10, 11], [12, 13, 14]], [[18, 19, 20], [21, 22, 23]]],
[[[27, 28, 29], [30, 31, 32]], [[36, 37, 38], [39, 40, 41]], [[45, 46, 47], [48, 49, 50]]],
[[[54, 55, 56], [57, 58, 59]], [[63, 64, 65], [66, 67, 68]], [[72, 73, 74], [75, 76, 77]]],
]
),
"seg": np.array(
[
[[[81, 80, 79], [78, 77, 76]], [[72, 71, 70], [69, 68, 67]], [[63, 62, 61], [60, 59, 58]]],
[[[54, 53, 52], [51, 50, 49]], [[45, 44, 43], [42, 41, 40]], [[36, 35, 34], [33, 32, 31]]],
[[[27, 26, 25], [24, 23, 22]], [[18, 17, 16], [15, 14, 13]], [[9, 8, 7], [6, 5, 4]]],
]
),
},
]
class TestRandSpatialCropSamplesd(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_shape(self, input_param, input_data, expected_shape, expected_last):
xform = RandSpatialCropSamplesd(**input_param)
xform.set_random_state(1234)
result = xform(input_data)
for item, expected in zip(result, expected_shape):
self.assertTupleEqual(item["img"].shape, expected)
self.assertTupleEqual(item["seg"].shape, expected)
for i, item in enumerate(result):
self.assertEqual(item["img_meta_dict"]["patch_index"], i)
self.assertEqual(item["seg_meta_dict"]["patch_index"], i)
np.testing.assert_allclose(item["img"], expected_last["img"])
np.testing.assert_allclose(item["seg"], expected_last["seg"])
if __name__ == "__main__":
unittest.main()
| 43.902439 | 117 | 0.501667 |
5e87f94accd112fb3e23987a2637352462addc61 | 416 | py | Python | Python/BasicPracticeProblems/Simple_Questions/Area.py | smainc/ProgrammingConcepts | f45fe38f2c9c5b95c3f17f26d19a1059cac6063c | [
"MIT"
] | null | null | null | Python/BasicPracticeProblems/Simple_Questions/Area.py | smainc/ProgrammingConcepts | f45fe38f2c9c5b95c3f17f26d19a1059cac6063c | [
"MIT"
] | null | null | null | Python/BasicPracticeProblems/Simple_Questions/Area.py | smainc/ProgrammingConcepts | f45fe38f2c9c5b95c3f17f26d19a1059cac6063c | [
"MIT"
] | null | null | null | def areaOfCircle(radius): # area of circle = pi * r^2
return 3.14 * radius ** 2 # ** is the power operator
def circumferenceOfCircle(radius): # circumference of circle = 2 * pi * r
return 2 * 3.14 * radius
radius = float(input('Enter the radius of the circle: '))
print('The area of the circle is:', areaOfCircle(radius))
print('The circumference of the circle is:', circumferenceOfCircle(radius)) | 46.222222 | 75 | 0.689904 |
187ebbec9349e86c84d45a1c0257448fe0cdb201 | 7,990 | py | Python | sdk/python/pulumi_azure_nextgen/labservices/v20181015/get_user.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/labservices/v20181015/get_user.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/labservices/v20181015/get_user.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetUserResult',
'AwaitableGetUserResult',
'get_user',
]
@pulumi.output_type
class GetUserResult:
"""
The User registered to a lab
"""
def __init__(__self__, email=None, family_name=None, given_name=None, latest_operation_result=None, location=None, name=None, provisioning_state=None, tags=None, tenant_id=None, total_usage=None, type=None, unique_identifier=None):
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
pulumi.set(__self__, "email", email)
if family_name and not isinstance(family_name, str):
raise TypeError("Expected argument 'family_name' to be a str")
pulumi.set(__self__, "family_name", family_name)
if given_name and not isinstance(given_name, str):
raise TypeError("Expected argument 'given_name' to be a str")
pulumi.set(__self__, "given_name", given_name)
if latest_operation_result and not isinstance(latest_operation_result, dict):
raise TypeError("Expected argument 'latest_operation_result' to be a dict")
pulumi.set(__self__, "latest_operation_result", latest_operation_result)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if total_usage and not isinstance(total_usage, str):
raise TypeError("Expected argument 'total_usage' to be a str")
pulumi.set(__self__, "total_usage", total_usage)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_identifier and not isinstance(unique_identifier, str):
raise TypeError("Expected argument 'unique_identifier' to be a str")
pulumi.set(__self__, "unique_identifier", unique_identifier)
@property
@pulumi.getter
def email(self) -> str:
"""
The user email address, as it was specified during registration.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter(name="familyName")
def family_name(self) -> str:
"""
The user family name, as it was specified during registration.
"""
return pulumi.get(self, "family_name")
@property
@pulumi.getter(name="givenName")
def given_name(self) -> str:
"""
The user given name, as it was specified during registration.
"""
return pulumi.get(self, "given_name")
@property
@pulumi.getter(name="latestOperationResult")
def latest_operation_result(self) -> 'outputs.LatestOperationResultResponse':
"""
The details of the latest operation. ex: status, error
"""
return pulumi.get(self, "latest_operation_result")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The user tenant ID, as it was specified during registration.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="totalUsage")
def total_usage(self) -> str:
"""
How long the user has used his VMs in this lab
"""
return pulumi.get(self, "total_usage")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> Optional[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
class AwaitableGetUserResult(GetUserResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUserResult(
email=self.email,
family_name=self.family_name,
given_name=self.given_name,
latest_operation_result=self.latest_operation_result,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
tenant_id=self.tenant_id,
total_usage=self.total_usage,
type=self.type,
unique_identifier=self.unique_identifier)
def get_user(expand: Optional[str] = None,
lab_account_name: Optional[str] = None,
lab_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
user_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUserResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Specify the $expand query. Example: 'properties($select=email)'
:param str lab_account_name: The name of the lab Account.
:param str lab_name: The name of the lab.
:param str resource_group_name: The name of the resource group.
:param str user_name: The name of the user.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['labAccountName'] = lab_account_name
__args__['labName'] = lab_name
__args__['resourceGroupName'] = resource_group_name
__args__['userName'] = user_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:labservices/v20181015:getUser', __args__, opts=opts, typ=GetUserResult).value
return AwaitableGetUserResult(
email=__ret__.email,
family_name=__ret__.family_name,
given_name=__ret__.given_name,
latest_operation_result=__ret__.latest_operation_result,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
tenant_id=__ret__.tenant_id,
total_usage=__ret__.total_usage,
type=__ret__.type,
unique_identifier=__ret__.unique_identifier)
| 36.651376 | 235 | 0.647184 |
fc857d018095843ba15b6d03216227c9d0c381af | 2,578 | py | Python | resolwe_bio/tests/workflows/test_shrna.py | HudoGriz/resolwe-bio | 4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tests/workflows/test_shrna.py | HudoGriz/resolwe-bio | 4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea | [
"Apache-2.0"
] | null | null | null | resolwe_bio/tests/workflows/test_shrna.py | HudoGriz/resolwe-bio | 4f7363cfa7d9d5a43f1a70ef36c69be3faed7fea | [
"Apache-2.0"
] | null | null | null | # pylint: disable=missing-docstring
from resolwe.test import tag_process
from resolwe.flow.models import Data
from resolwe_bio.utils.test import KBBioProcessTestCase
class SHRNATestCase(KBBioProcessTestCase):
@tag_process('workflow-trim-align-quant')
def test_run_trim_align_quant(self):
with self.preparation_stage():
pf_in = './shrna_diffexp/input/'
pf_out = './shrna_diffexp/output/'
species = 'Homo sapiens'
build = 'customshRNA'
reads = self.prepare_reads([pf_in + 'SM31_ss.fastq.gz'])
# Larger data ran on this reduced genome yield similar alignment statistics (~2 % aligned reads).
genome = self.run_process('upload-genome', {'src': pf_in + 'SM31_library.fasta.gz',
'species': species,
'build': build})
input_workflow = {
'reads': reads.id,
'trimming_options': {
'up_primers_seq': ['TAGTGAAGCCACAGAT'],
'down_primers_seq': ['TACTGCCTCGGA'],
'error_rate_5end': 0.2,
'error_rate_3end': 0.2
},
'alignment_options': {
'genome': genome.id,
'mode': '--end-to-end', # as default
'N': 1,
'L': 9,
'gbar': 1,
'mp': '2',
'rdg': '0,2',
'rfg': '0,2',
'score_min': 'C,-20,0'
},
'quant_options': {
'readlengths': 26,
'alignscores': -6
}
}
self.run_process('workflow-trim-align-quant', input_workflow)
workflow = Data.objects.last()
self.assertFile(workflow, 'rc', pf_out + 'SM31_ss_trimmed_trimmed_count_matrix.txt.gz', compression='gzip')
self.assertFile(workflow, 'exp', pf_out + 'SM31_ss_trimmed_trimmed_count_matrix.txt.gz', compression='gzip')
self.assertFields(workflow, 'exp_type', 'RC')
self.assertJSON(workflow, workflow.output['exp_json'], '', pf_out + 'SM31_ss_json.txt.gz')
self.assertFields(workflow, 'source', 'shRNA-gene-sequences')
self.assertFields(workflow, 'species', species)
self.assertFields(workflow, 'build', build)
self.assertFields(workflow, 'feature_type', 'shRNA')
self.assertFile(workflow, 'mapped_species', pf_out + 'SM31_ss_trimmed_trimmed_mapped_species.txt.gz',
compression='gzip')
| 42.966667 | 116 | 0.551202 |
569959ae1b7bf2a0ce23e9e86eb44d65f783e697 | 3,437 | py | Python | machine-learning/ml-projects/stereo-scan-v2/models/double-scan-v3/unet_normal.py | olaals/masteroppgave2 | 9fc181325b6e3ef74d81cdb323d3e47a79bb889e | [
"MIT"
] | null | null | null | machine-learning/ml-projects/stereo-scan-v2/models/double-scan-v3/unet_normal.py | olaals/masteroppgave2 | 9fc181325b6e3ef74d81cdb323d3e47a79bb889e | [
"MIT"
] | null | null | null | machine-learning/ml-projects/stereo-scan-v2/models/double-scan-v3/unet_normal.py | olaals/masteroppgave2 | 9fc181325b6e3ef74d81cdb323d3e47a79bb889e | [
"MIT"
] | 2 | 2021-09-17T12:26:04.000Z | 2021-09-27T12:59:55.000Z | import torch
from torch import nn
import numpy as np
class Unet2D(nn.Module):
def __init__(self, in_channels, out_channels, channel_ratio=1):
super().__init__()
ch = np.array([32, 64, 128, 256, 512])
ch = channel_ratio*ch
ch = ch.round().astype(int)
self.ch = ch
self.conv1 = self.double_conv(in_channels, ch[0], 7, 3)
self.conv2 = self.double_conv(ch[0], ch[1], 3, 1)
self.conv3 = self.double_conv(ch[1], ch[2], 3, 1)
self.conv4 = self.double_conv(ch[2], ch[3], 3, 1)
self.conv5 = self.double_conv(ch[3], ch[4], 3, 1)
self.upconv4 = self.double_conv(ch[3]*2, ch[3], 3, 1)
self.upconv3 = self.double_conv(ch[2]*2, ch[2], 3, 1)
self.upconv2 = self.double_conv(ch[1]*2, ch[1], 3, 1)
self.upconv1 = self.double_conv(ch[0]*2, ch[0], 3, 1)
self.last_conv = self.single_conv(ch[0], out_channels, 3, 1)
self.conv_transp54 =torch.nn.ConvTranspose2d(self.ch[4], self.ch[3], kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv_transp43 =torch.nn.ConvTranspose2d(self.ch[3], self.ch[2], kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv_transp32 =torch.nn.ConvTranspose2d(self.ch[2], self.ch[1], kernel_size=3, stride=2, padding=1, output_padding=1)
self.conv_transp21 =torch.nn.ConvTranspose2d(self.ch[1], self.ch[0], kernel_size=3, stride=2, padding=1, output_padding=1)
def __call__(self, x):
# downsampling part
out1 = self.conv1(x)
in2 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(out1)
out2 = self.conv2(in2)
in3 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(out2)
out3 = self.conv3(in3)
in4 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(out3)
out4 = self.conv4(in4)
in5 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(out4)
out5 = self.conv5(in5)
up4_in = self.conv_transp54(out5)
cat4 = torch.cat([up4_in,out4], 1)
up4_out = self.upconv4(cat4)
up3_in = self.conv_transp43(up4_out)
cat3 = torch.cat([up3_in,out3], 1)
up3_out = self.upconv3(cat3)
up2_in = self.conv_transp32(up3_out)
cat2 = torch.cat([up2_in, out2], 1)
up2_out = self.upconv2(cat2)
up1_in = self.conv_transp21(up2_out)
cat1 = torch.cat([up1_in, out1], 1)
out = self.upconv1(cat1)
out = self.last_conv(out)
return out
def double_conv(self, in_channels, out_channels, kernel_size, padding):
seq = nn.Sequential(
torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(),
torch.nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU(),
)
return seq
def single_conv(self, in_channels, out_channels, kernel_size, padding):
seq = nn.Sequential(
torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=padding),
torch.nn.BatchNorm2d(out_channels),
torch.nn.ReLU()
)
return seq
| 34.717172 | 131 | 0.606052 |
4a37e769b72f3424982a7c3b313779f523250b9d | 501 | py | Python | beautiful_fields/validators.py | Excentrics/beautiful-fields | 7117cde6ac5646a5d65e166706b94898946a92b7 | [
"BSD-3-Clause"
] | null | null | null | beautiful_fields/validators.py | Excentrics/beautiful-fields | 7117cde6ac5646a5d65e166706b94898946a92b7 | [
"BSD-3-Clause"
] | null | null | null | beautiful_fields/validators.py | Excentrics/beautiful-fields | 7117cde6ac5646a5d65e166706b94898946a92b7 | [
"BSD-3-Clause"
] | null | null | null | #-*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from phonenumbers import parse
from phonenumbers.phonenumberutil import NumberParseException
from beautiful_fields.phonenumber import PhoneNumber, to_python
def validate_international_phonenumber(value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(_(u'The phone number entered is not valid.'))
| 38.538462 | 75 | 0.808383 |
a6fc2919689a7d758b431220895f212e3de49cb9 | 1,534 | py | Python | airflow/providers/redis/sensors/redis_key.py | gtossou/airflow | 0314a3a218f864f78ec260cc66134e7acae34bc5 | [
"Apache-2.0"
] | 4 | 2020-02-16T18:13:54.000Z | 2021-01-01T03:22:19.000Z | airflow/providers/redis/sensors/redis_key.py | gtossou/airflow | 0314a3a218f864f78ec260cc66134e7acae34bc5 | [
"Apache-2.0"
] | 14 | 2019-11-22T09:24:20.000Z | 2021-07-09T06:06:59.000Z | airflow/providers/redis/sensors/redis_key.py | gtossou/airflow | 0314a3a218f864f78ec260cc66134e7acae34bc5 | [
"Apache-2.0"
] | 2 | 2020-10-23T18:55:05.000Z | 2022-02-16T21:53:10.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict
from airflow.providers.redis.hooks.redis import RedisHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class RedisKeySensor(BaseSensorOperator):
"""Checks for the existence of a key in a Redis"""
template_fields = ('key',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self, *, key: str, redis_conn_id: str, **kwargs) -> None:
super().__init__(**kwargs)
self.redis_conn_id = redis_conn_id
self.key = key
def poke(self, context: Dict) -> bool:
self.log.info('Sensor checks for existence of key: %s', self.key)
return RedisHook(self.redis_conn_id).get_conn().exists(self.key)
| 38.35 | 74 | 0.740548 |
6c13e302cc54cacfb97a59024624c088e7ec3337 | 21,261 | py | Python | bleak/backends/dotnet/client.py | kupiakos/bleak | 11e7adf1ae1774b22667fcc4e6533ad69597951a | [
"MIT"
] | null | null | null | bleak/backends/dotnet/client.py | kupiakos/bleak | 11e7adf1ae1774b22667fcc4e6533ad69597951a | [
"MIT"
] | 4 | 2019-12-06T13:36:16.000Z | 2022-02-14T09:34:48.000Z | bleak/backends/dotnet/client.py | kupiakos/bleak | 11e7adf1ae1774b22667fcc4e6533ad69597951a | [
"MIT"
] | 4 | 2020-01-15T10:45:55.000Z | 2022-02-07T09:07:53.000Z | # -*- coding: utf-8 -*-
"""
BLE Client for Windows 10 systems.
Created on 2017-12-05 by hbldh <henrik.blidh@nedomkull.com>
"""
import logging
import asyncio
from asyncio.events import AbstractEventLoop
from functools import wraps
from typing import Callable, Any
from bleak.exc import BleakError, BleakDotNetTaskError
from bleak.backends.client import BaseBleakClient
from bleak.backends.dotnet.discovery import discover
from bleak.backends.dotnet.utils import (
wrap_Task,
wrap_IAsyncOperation,
IAsyncOperationAwaitable,
)
from bleak.backends.service import BleakGATTServiceCollection
from bleak.backends.dotnet.service import BleakGATTServiceDotNet
from bleak.backends.dotnet.characteristic import BleakGATTCharacteristicDotNet
from bleak.backends.dotnet.descriptor import BleakGATTDescriptorDotNet
# CLR imports
# Import of Bleak CLR->UWP Bridge.
from BleakBridge import Bridge
# Import of other CLR components needed.
from System import Array, Byte, UInt64
from Windows.Foundation import IAsyncOperation, TypedEventHandler
from Windows.Storage.Streams import DataReader, DataWriter, IBuffer
from Windows.Devices.Bluetooth import (
BluetoothLEDevice,
BluetoothConnectionStatus,
BluetoothCacheMode,
BluetoothAddressType,
)
from Windows.Devices.Bluetooth.GenericAttributeProfile import (
GattDeviceService,
GattDeviceServicesResult,
GattCharacteristic,
GattCharacteristicsResult,
GattDescriptor,
GattDescriptorsResult,
GattCommunicationStatus,
GattReadResult,
GattWriteOption,
GattWriteResult,
GattValueChangedEventArgs,
GattCharacteristicProperties,
GattClientCharacteristicConfigurationDescriptorValue,
)
logger = logging.getLogger(__name__)
class BleakClientDotNet(BaseBleakClient):
"""The native Windows Bleak Client.
Implemented using `pythonnet <https://pythonnet.github.io/>`_, a package that provides an integration to the .NET
Common Language Runtime (CLR). Therefore, much of the code below has a distinct C# feel.
Args:
address (str): The MAC address of the BLE peripheral to connect to.
loop (asyncio.events.AbstractEventLoop): The event loop to use.
Keyword Args:
timeout (float): Timeout for required ``discover`` call. Defaults to 2.0.
"""
def __init__(self, address: str, loop: AbstractEventLoop = None, **kwargs):
super(BleakClientDotNet, self).__init__(address, loop, **kwargs)
# Backend specific. Python.NET objects.
self._device_info = None
self._requester = None
self._bridge = Bridge()
self._callbacks = {}
self._address_type = (
kwargs["address_type"]
if "address_type" in kwargs
and kwargs["address_type"] in ("public", "random")
else None
)
def __str__(self):
return "BleakClientDotNet ({0})".format(self.address)
# Connectivity methods
async def connect(self, **kwargs) -> bool:
"""Connect to the specified GATT server.
Keyword Args:
timeout (float): Timeout for required ``discover`` call. Defaults to 2.0.
Returns:
Boolean representing connection status.
"""
# Try to find the desired device.
timeout = kwargs.get("timeout", self._timeout)
devices = await discover(timeout=timeout, loop=self.loop)
sought_device = list(
filter(lambda x: x.address.upper() == self.address.upper(), devices)
)
if len(sought_device):
self._device_info = sought_device[0].details
else:
raise BleakError(
"Device with address {0} was " "not found.".format(self.address)
)
logger.debug("Connecting to BLE device @ {0}".format(self.address))
args = [UInt64(self._device_info.BluetoothAddress)]
if self._address_type is not None:
args.append(
BluetoothAddressType.Public
if self._address_type == "public"
else BluetoothAddressType.Random
)
self._requester = await wrap_IAsyncOperation(
IAsyncOperation[BluetoothLEDevice](
BluetoothLEDevice.FromBluetoothAddressAsync(*args)
),
return_type=BluetoothLEDevice,
loop=self.loop,
)
def _ConnectionStatusChanged_Handler(sender, args):
logger.debug("_ConnectionStatusChanged_Handler: " + args.ToString())
self._requester.ConnectionStatusChanged += _ConnectionStatusChanged_Handler
# Obtain services, which also leads to connection being established.
services = await self.get_services()
connected = False
if self._services_resolved:
# If services has been resolved, then we assume that we are connected. This is due to
# some issues with getting `is_connected` to give correct response here.
connected = True
else:
for _ in range(5):
await asyncio.sleep(0.2, loop=self.loop)
connected = await self.is_connected()
if connected:
break
if connected:
logger.debug("Connection successful.")
else:
raise BleakError(
"Connection to {0} was not successful!".format(self.address)
)
return connected
async def disconnect(self) -> bool:
"""Disconnect from the specified GATT server.
Returns:
Boolean representing connection status.
"""
logger.debug("Disconnecting from BLE device...")
# Remove notifications
# TODO: Make sure all notifications are removed prior to Dispose.
# Dispose all components that we have requested and created.
for service in self.services:
service.obj.Dispose()
self.services = BleakGATTServiceCollection()
self._requester.Dispose()
self._requester = None
return not await self.is_connected()
async def is_connected(self) -> bool:
"""Check connection status between this client and the server.
Returns:
Boolean representing connection status.
"""
if self._requester:
return (
self._requester.ConnectionStatus == BluetoothConnectionStatus.Connected
)
else:
return False
def set_disconnected_callback(
self, callback: Callable[[BaseBleakClient], None], **kwargs
) -> None:
"""Set the disconnected callback.
N.B. This is not implemented in the .NET backend yet.
Args:
callback: callback to be called on disconnection.
"""
raise NotImplementedError("This is not implemented in the .NET backend yet")
# GATT services methods
async def get_services(self) -> BleakGATTServiceCollection:
"""Get all services registered for this GATT server.
Returns:
A :py:class:`bleak.backends.service.BleakGATTServiceCollection` with this device's services tree.
"""
# Return the Service Collection.
if self._services_resolved:
return self.services
else:
logger.debug("Get Services...")
services_result = await wrap_IAsyncOperation(
IAsyncOperation[GattDeviceServicesResult](
self._requester.GetGattServicesAsync()
),
return_type=GattDeviceServicesResult,
loop=self.loop,
)
if services_result.Status != GattCommunicationStatus.Success:
raise BleakDotNetTaskError("Could not get GATT services.")
# TODO: Check if fetching yeilds failures...
for service in services_result.Services:
characteristics_result = await wrap_IAsyncOperation(
IAsyncOperation[GattCharacteristicsResult](
service.GetCharacteristicsAsync()
),
return_type=GattCharacteristicsResult,
loop=self.loop,
)
self.services.add_service(BleakGATTServiceDotNet(service))
if characteristics_result.Status != GattCommunicationStatus.Success:
raise BleakDotNetTaskError(
"Could not get GATT characteristics for {0}.".format(service)
)
for characteristic in characteristics_result.Characteristics:
descriptors_result = await wrap_IAsyncOperation(
IAsyncOperation[GattDescriptorsResult](
characteristic.GetDescriptorsAsync()
),
return_type=GattDescriptorsResult,
loop=self.loop,
)
self.services.add_characteristic(
BleakGATTCharacteristicDotNet(characteristic)
)
if descriptors_result.Status != GattCommunicationStatus.Success:
raise BleakDotNetTaskError(
"Could not get GATT descriptors for {0}.".format(
characteristic
)
)
for descriptor in list(descriptors_result.Descriptors):
self.services.add_descriptor(
BleakGATTDescriptorDotNet(
descriptor, characteristic.Uuid.ToString()
)
)
self._services_resolved = True
return self.services
# I/O methods
async def read_gatt_char(self, _uuid: str, use_cached=False, **kwargs) -> bytearray:
"""Perform read operation on the specified GATT characteristic.
Args:
_uuid (str or UUID): The uuid of the characteristics to read from.
use_cached (bool): `False` forces Windows to read the value from the
device again and not use its own cached value. Defaults to `False`.
Returns:
(bytearray) The read data.
"""
characteristic = self.services.get_characteristic(str(_uuid))
if not characteristic:
raise BleakError("Characteristic {0} was not found!".format(_uuid))
read_result = await wrap_IAsyncOperation(
IAsyncOperation[GattReadResult](
characteristic.obj.ReadValueAsync(
BluetoothCacheMode.Cached
if use_cached
else BluetoothCacheMode.Uncached
)
),
return_type=GattReadResult,
loop=self.loop,
)
if read_result.Status == GattCommunicationStatus.Success:
reader = DataReader.FromBuffer(IBuffer(read_result.Value))
output = Array.CreateInstance(Byte, reader.UnconsumedBufferLength)
reader.ReadBytes(output)
value = bytearray(output)
logger.debug("Read Characteristic {0} : {1}".format(_uuid, value))
else:
raise BleakError(
"Could not read characteristic value for {0}: {1}".format(
characteristic.uuid, read_result.Status
)
)
return value
async def read_gatt_descriptor(
self, handle: int, use_cached=False, **kwargs
) -> bytearray:
"""Perform read operation on the specified GATT descriptor.
Args:
handle (int): The handle of the descriptor to read from.
use_cached (bool): `False` forces Windows to read the value from the
device again and not use its own cached value. Defaults to `False`.
Returns:
(bytearray) The read data.
"""
descriptor = self.services.get_descriptor(handle)
if not descriptor:
raise BleakError("Descriptor with handle {0} was not found!".format(handle))
read_result = await wrap_IAsyncOperation(
IAsyncOperation[GattReadResult](
descriptor.obj.ReadValueAsync(
BluetoothCacheMode.Cached
if use_cached
else BluetoothCacheMode.Uncached
)
),
return_type=GattReadResult,
loop=self.loop,
)
if read_result.Status == GattCommunicationStatus.Success:
reader = DataReader.FromBuffer(IBuffer(read_result.Value))
output = Array.CreateInstance(Byte, reader.UnconsumedBufferLength)
reader.ReadBytes(output)
value = bytearray(output)
logger.debug("Read Descriptor {0} : {1}".format(handle, value))
else:
raise BleakError(
"Could not read Descriptor value for {0}: {1}".format(
descriptor.uuid, read_result.Status
)
)
return value
async def write_gatt_char(
self, _uuid: str, data: bytearray, response: bool = False
) -> None:
"""Perform a write operation of the specified GATT characteristic.
Args:
_uuid (str or UUID): The uuid of the characteristics to write to.
data (bytes or bytearray): The data to send.
response (bool): If write-with-response operation should be done. Defaults to `False`.
"""
characteristic = self.services.get_characteristic(str(_uuid))
if not characteristic:
raise BleakError("Characteristic {0} was not found!".format(_uuid))
writer = DataWriter()
writer.WriteBytes(Array[Byte](data))
response = (
GattWriteOption.WriteWithResponse
if response
else GattWriteOption.WriteWithoutResponse
)
write_result = await wrap_IAsyncOperation(
IAsyncOperation[GattWriteResult](
characteristic.obj.WriteValueWithResultAsync(
writer.DetachBuffer(), response
)
),
return_type=GattWriteResult,
loop=self.loop,
)
if write_result.Status == GattCommunicationStatus.Success:
logger.debug("Write Characteristic {0} : {1}".format(_uuid, data))
else:
raise BleakError(
"Could not write value {0} to characteristic {1}: {2}".format(
data, characteristic.uuid, write_result.Status
)
)
async def write_gatt_descriptor(self, handle: int, data: bytearray) -> None:
"""Perform a write operation on the specified GATT descriptor.
Args:
handle (int): The handle of the descriptor to read from.
data (bytes or bytearray): The data to send.
"""
descriptor = self.services.get_descriptor(handle)
if not descriptor:
raise BleakError("Descriptor {0} was not found!".format(handle))
writer = DataWriter()
writer.WriteBytes(Array[Byte](data))
write_result = await wrap_IAsyncOperation(
IAsyncOperation[GattWriteResult](
descriptor.obj.WriteValueAsync(writer.DetachBuffer())
),
return_type=GattWriteResult,
loop=self.loop,
)
if write_result.Status == GattCommunicationStatus.Success:
logger.debug("Write Descriptor {0} : {1}".format(handle, data))
else:
raise BleakError(
"Could not write value {0} to descriptor {1}: {2}".format(
data, descriptor.uuid, write_result.Status
)
)
async def start_notify(
self, _uuid: str, callback: Callable[[str, Any], Any], **kwargs
) -> None:
"""Activate notifications/indications on a characteristic.
Callbacks must accept two inputs. The first will be a uuid string
object and the second will be a bytearray.
.. code-block:: python
def callback(sender, data):
print(f"{sender}: {data}")
client.start_notify(char_uuid, callback)
Args:
_uuid (str or UUID): The uuid of the characteristics to start notification/indication on.
callback (function): The function to be called on notification.
"""
characteristic = self.services.get_characteristic(str(_uuid))
if self._notification_callbacks.get(str(_uuid)):
await self.stop_notify(_uuid)
status = await self._start_notify(characteristic.obj, callback)
if status != GattCommunicationStatus.Success:
raise BleakError(
"Could not start notify on {0}: {1}".format(characteristic.uuid, status)
)
async def _start_notify(
self,
characteristic_obj: GattCharacteristic,
callback: Callable[[str, Any], Any],
):
"""Internal method performing call to BleakUWPBridge method.
Args:
characteristic_obj: The Managed Windows.Devices.Bluetooth.GenericAttributeProfile.GattCharacteristic Object
callback: The function to be called on notification.
Returns:
(int) The GattCommunicationStatus of the operation.
"""
if (
characteristic_obj.CharacteristicProperties
& GattCharacteristicProperties.Indicate
):
cccd = GattClientCharacteristicConfigurationDescriptorValue.Indicate
elif (
characteristic_obj.CharacteristicProperties
& GattCharacteristicProperties.Notify
):
cccd = GattClientCharacteristicConfigurationDescriptorValue.Notify
else:
cccd = getattr(GattClientCharacteristicConfigurationDescriptorValue, "None")
try:
# TODO: Enable adding multiple handlers!
self._callbacks[characteristic_obj.Uuid.ToString()] = TypedEventHandler[
GattCharacteristic, GattValueChangedEventArgs
](_notification_wrapper(callback))
self._bridge.AddValueChangedCallback(
characteristic_obj, self._callbacks[characteristic_obj.Uuid.ToString()]
)
except Exception as e:
logger.debug("Start Notify problem: {0}".format(e))
if characteristic_obj.Uuid.ToString() in self._callbacks:
callback = self._callbacks.pop(characteristic_obj.Uuid.ToString())
self._bridge.RemoveValueChangedCallback(characteristic_obj, callback)
return GattCommunicationStatus.AccessDenied
status = await wrap_IAsyncOperation(
IAsyncOperation[GattCommunicationStatus](
characteristic_obj.WriteClientCharacteristicConfigurationDescriptorAsync(
cccd
)
),
return_type=GattCommunicationStatus,
loop=self.loop,
)
if status != GattCommunicationStatus.Success:
# This usually happens when a device reports that it support indicate, but it actually doesn't.
if characteristic_obj.Uuid.ToString() in self._callbacks:
callback = self._callbacks.pop(characteristic_obj.Uuid.ToString())
self._bridge.RemoveValueChangedCallback(characteristic_obj, callback)
return GattCommunicationStatus.AccessDenied
return status
async def stop_notify(self, _uuid: str) -> None:
"""Deactivate notification/indication on a specified characteristic.
Args:
_uuid: The characteristic to stop notifying/indicating on.
"""
characteristic = self.services.get_characteristic(str(_uuid))
status = await wrap_IAsyncOperation(
IAsyncOperation[GattCommunicationStatus](
characteristic.obj.WriteClientCharacteristicConfigurationDescriptorAsync(
getattr(
GattClientCharacteristicConfigurationDescriptorValue, "None"
)
)
),
return_type=GattCommunicationStatus,
loop=self.loop,
)
if status != GattCommunicationStatus.Success:
raise BleakError(
"Could not stop notify on {0}: {1}".format(characteristic.uuid, status)
)
else:
callback = self._callbacks.pop(characteristic.uuid)
self._bridge.RemoveValueChangedCallback(characteristic.obj, callback)
def _notification_wrapper(func: Callable):
@wraps(func)
def dotnet_notification_parser(sender: Any, args: Any):
# Return only the UUID string representation as sender.
# Also do a conversion from System.Bytes[] to bytearray.
reader = DataReader.FromBuffer(args.CharacteristicValue)
output = Array.CreateInstance(Byte, reader.UnconsumedBufferLength)
reader.ReadBytes(output)
return func(sender.Uuid.ToString(), bytearray(output))
return dotnet_notification_parser
| 37.16958 | 119 | 0.615493 |
5df0dd5e295594e022a6e36b6fb433dc353513fe | 4,895 | py | Python | portfolio/Python/scrapy/shoemetro/shoesteal.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/shoemetro/shoesteal.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/shoemetro/shoesteal.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | import csv
import os
import copy
import shutil
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from scrapy import log
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class shoestealSpider(BaseSpider):
name = 'shoesteal.com'
allowed_domains = ['shoesteal.com','www.shoesteal.com']
def start_requests(self):
shutil.copy(os.path.join(HERE, 'shoemetroall.csv'),os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur'))
with open(os.path.join(HERE, 'shoemetroall.csv.' + self.name + '.cur')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['sku']
"""
brand = row['brand']
style = row['style']
query = (brand + ' ' + style).replace(' ', '+')
"""
brand = ""
style = ""
query = row['name'].replace(' ', '+')
url = 'http://www.shoesteal.com/Shopping/Results.aspx?Ntt=' + query + '&x=0&y=0&N=0&Ntk=SearchInterface&Nty=1&Ntx=mode%2Bmatchallany'
yield Request(url, meta={'sku': sku, 'name': row['name'], 'brand': brand, 'style': style})
def parse(self, response):
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
products = hxs.select('//div[@class="productCellWrapper"]')
if not products:
return
for product in products:
loader = ProductLoader(item=Product(), selector=product)
brand = "".join(product.select('.//div[@class="productBrandTitleColor"]/a/span[@class="brand"]/text()').extract()).strip()
style = "".join(product.select('.//div[@class="productBrandTitleColor"]/a/span[@class="styleName color"]/text()').extract()).strip()
name = "".join(product.select('.//div[@class="productBrandTitleColor"]/a/span[@class="styleName name"]/text()').extract()).strip()
name = brand + ' ' + name + ' ' + style
product_words = name.lower().split(' ')
search_words = response.meta['name'].lower().split()
diff = [w for w in search_words if not w in product_words]
if not diff:
url = product.select('.//div[@class="productBrandTitleColor"]/a/@href').extract()[0]
price = "".join(product.select('.//div[@class="price"]/span[@class="salePrice"]/text()').re(r'([0-9\,\. ]+)')).strip()
if not price:
price = "".join(product.select('.//div[@class="price"]/text()').re(r'([0-9\,\. ]+)')).strip()
loader.add_value('name', name)
loader.add_value('url', urljoin_rfc(base_url,url))
loader.add_value('price', price)
loader.add_value('sku', response.meta['sku'])
if not 'apparelsave' in loader.get_output_value('name').lower():
yield loader.load_item()
break
"""
name = "".join(product.select('.//div[@class="productBrandTitleColor"]/a/span[@class="brandName"]/text()').extract()).strip()
if name and response.meta['brand'].lower() in name.lower():
name2 = "".join(product.select('.//div[@class="productBrandTitleColor"]/a/span[@class="styleName"]/text()').extract()).strip()
#log.msg('NAME2: ' + self.words_replace(name2) + ' ---- ORIG: ' + response.meta['style'].lower())
if name2 and response.meta['style'].lower() == self.words_replace(name2):
url = product.select('.//div[@class="productBrandTitleColor"]/a/@href').extract()[0]
price = "".join(product.select('.//div[@class="productPrice"]/span[@class="variantSalePrice"]/text()').re(r'([0-9\,\. ]+)')).strip()
if not price:
price = "".join(product.select('.//div[@class="productPrice"]/text()').re(r'([0-9\,\. ]+)')).strip()
loader.add_value('name', name + ' ' + name2)
loader.add_value('url', urljoin_rfc(base_url,url))
loader.add_value('price', price)
loader.add_value('sku', response.meta['sku'])
if not 'apparelsave' in loader.get_output_value('name').lower():
yield loader.load_item()
break"""
def words_replace(self, s):
patterns = ["Men's", "Women's", "Kids'", "Girl's", "Boy's", 'Unisex']
for pattern in patterns:
s = s.replace(pattern,'');
return s.strip().lower()
| 53.206522 | 152 | 0.561798 |
4ca0fdc0281f2aa15086db9c56ef3cf1040ffbc4 | 1,956 | py | Python | pycmo/bin/agent.py | duyminh1998/pycmo | d863794e207414b4356c8c538db586176e45b91f | [
"MIT"
] | 7 | 2021-09-29T04:08:02.000Z | 2022-03-11T01:48:12.000Z | pycmo/bin/agent.py | duyminh1998/pycmo | d863794e207414b4356c8c538db586176e45b91f | [
"MIT"
] | 5 | 2021-08-19T19:25:10.000Z | 2022-03-03T13:39:26.000Z | pycmo/bin/agent.py | duyminh1998/pycmo | d863794e207414b4356c8c538db586176e45b91f | [
"MIT"
] | 4 | 2022-02-26T07:59:00.000Z | 2022-03-11T01:48:18.000Z | # Author: Minh Hua
# Date: 08/22/2021
# Purpose: Run an agent.
from pycmo.lib.run_loop import *
from pycmo.agents.random_agent import RandomAgent
from pycmo.agents.rule_based_agent import RuleBasedAgent
from pycmo.agents.scripted_agent import ScriptedAgent
from pycmo.configs import config
import argparse
def main(player_side: str, step_size: list, config, agent=None, max_steps=None, server=False, scen_file=None):
"""Run an agent."""
run_loop(player_side, step_size, config=config, agent=agent)
if __name__ == "__main__":
# open config
config = config.get_config()
# add args parser
parser = argparse.ArgumentParser(description='Arguments for choosing agent and timestep size.')
parser.add_argument('-agent', help='Select an agent. 0 for RandomAgent, 1 for ScriptedAgent, 2 for RuleBasedAgent.')
parser.add_argument('-size', help='Size of a timestep, must be in "hh:mm:ss" format.')
parser.add_argument('-scenario', help='The name of the scenario, used for ScriptedAgent and RuleBasedAgent. Usually the literal name of the .scen file.')
parser.add_argument('-player', help="The name of player's side.")
args = parser.parse_args()
# scenario file and player side
if args.scenario == None:
scen_short_name = 'Wooden Leg, 1985'
else:
scen_short_name = args.scenario
if args.player == None:
player_side = "Israel"
else:
player_side = args.player
# get step size
if args.size == None:
step_size = ["00", "01", "00"]
else:
step_size = args.size.split(":")
# initalize agent, RandomAgent by default
if args.agent == None:
player_agent = RandomAgent(scen_short_name, player_side)
else:
agents = [RandomAgent, ScriptedAgent, RuleBasedAgent]
player_agent = agents[int(args.agent)](scen_short_name, player_side)
# run main loop
main(player_side, step_size, config=config, agent=player_agent) | 39.918367 | 157 | 0.701943 |
9b8c2285ebd9452e2eda21e8f133b2d8a1e2c4ab | 991 | py | Python | lib/surface/network_services/tls_routes/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/network_services/tls_routes/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/network_services/tls_routes/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for Network Services Tls Routes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class TlsRoutes(base.Group):
"""Manage Network Services TlsRoutes."""
category = base.MANAGEMENT_TOOLS_CATEGORY
| 35.392857 | 74 | 0.773966 |
6e9338281b0a1bb50df028dc4c8412e1bd9781cb | 1,263 | py | Python | allauth/socialaccount/providers/other/hubic/provider.py | Cairnica/django-allauth | 43ddfc81f7fd06fc6502d425bf78833d5dbf49d0 | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/other/hubic/provider.py | Cairnica/django-allauth | 43ddfc81f7fd06fc6502d425bf78833d5dbf49d0 | [
"MIT"
] | null | null | null | allauth/socialaccount/providers/other/hubic/provider.py | Cairnica/django-allauth | 43ddfc81f7fd06fc6502d425bf78833d5dbf49d0 | [
"MIT"
] | null | null | null | import requests
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.core.oauth2.provider import OAuth2Provider
class HubicAccount(ProviderAccount):
pass
class HubicProvider(OAuth2Provider):
id = 'hubic'
name = 'Hubic'
account_class = HubicAccount
access_token_url = 'https://api.hubic.com/oauth/token'
authorize_url = 'https://api.hubic.com/oauth/auth'
profile_url = 'https://api.hubic.com/1.0/account'
redirect_uri_protocol = 'https'
def complete_login(self, request, app, token, **kwargs):
token_type = kwargs['response']['token_type']
resp = requests.get(self.get_profile_url(request), headers={'Authorization': '%s %s' % (token_type, token.token)})
extra_data = resp.json()
return self.sociallogin_from_response(request, extra_data)
def extract_uid(self, data):
return str(data['email'])
def extract_common_fields(self, data):
return dict(
email=data.get('email'),
username=data.get('firstname').lower() + data.get(
'lastname').lower(),
first_name=data.get('firstname'),
last_name=data.get('lastname'))
provider_classes = [HubicProvider]
| 31.575 | 122 | 0.673001 |
cfa2fde71703b900ec93f9a863075934a2c1a83b | 2,056 | py | Python | Bugscan_exploits-master/exp_list/exp-1985.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-1985.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-1985.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:小光
#refer:http://www.wooyun.org/bugs/wooyun-2015-0143430
import re
def assign(service, arg):
if service == "hanweb":
return True, arg
def audit(arg):
getdata1 = 'vipchat/VerifyCodeServlet?var=clusterid'
code, head, res, errcode, _ = curl.curl2(arg+getdata1)
m1 = re.search('JSESSIONID=(.*?);',head)
if code!= 200:
return false
raw = """
POST /vipchat/servlet/upfile.do HTTP/1.1
Host: www.notedyy.com
Proxy-Connection: keep-alive
Content-Length: 404
Cache-Control: max-age=0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
Origin: null
User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 Safari/537.36 SE 2.X MetaSr 1.0
Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryUfIZSnIoUZx9mHpA
Accept-Encoding: gzip,deflate
Accept-Language: zh-CN,zh;q=0.8
Cookie: JSESSIONID="""+m1.group(1)+"""
------WebKitFormBoundaryUfIZSnIoUZx9mHpA
Content-Disposition: form-data; name="isdefault"
true
------WebKitFormBoundaryUfIZSnIoUZx9mHpA
Content-Disposition: form-data; name="allowtype"
jsp
------WebKitFormBoundaryUfIZSnIoUZx9mHpA
Content-Disposition: form-data; name="picfile"; filename="1.jsp"
Content-Type: application/octet-stream
just test c4ca4238a0b923820dcc509a6f75849b
------WebKitFormBoundaryUfIZSnIoUZx9mHpA--
"""
getdata2 = 'vipchat/servlet/upfile.do'
url = arg + getdata2
code, head, res, errcode, _ = curl.curl2(url,raw=raw)
m = re.search('/vipchat/home/info/(.*?).jsp',res)
if m :
url = arg+ m.group(0)
code, head, res, errcode, _ = curl.curl2(url)
if code ==200 and 'c4ca4238a0b923820dcc509a6f75849b' in res:
security_hole(arg+getdata2+' :file upload Vulnerable:')
if __name__ == '__main__':
from dummy import *
audit(assign('hanweb', 'http://www.notedyy.com/')[1]) | 32.634921 | 141 | 0.660992 |
93641abec37afd128cb255528447afb6eb9d2aae | 1,432 | py | Python | setup.py | mgesoftware/mge-graphql | 25fb74ef29b5c792a51793ab66b0f4175fb5b324 | [
"BSD-3-Clause"
] | 2 | 2022-01-26T02:22:50.000Z | 2022-02-03T22:38:52.000Z | setup.py | mgesoftware/mge-graphql | 25fb74ef29b5c792a51793ab66b0f4175fb5b324 | [
"BSD-3-Clause"
] | null | null | null | setup.py | mgesoftware/mge-graphql | 25fb74ef29b5c792a51793ab66b0f4175fb5b324 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='mge_graphql',
version='1.1.0',
license='BSD 3-Clause "New" or "Revised"',
author="Alexandru Plesoiu",
author_email='alexandru@mgesoftware.com',
description="GraphQL support with data validations, error handle and permission support built on top of graphene.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(exclude=["examples*"]),
url='https://github.com/mgesoftware/mge-graphql',
keywords='api graphene mongodb flask graphql mge_graphql mge-graphql rest relay mgesoftware mge',
install_requires=[
'graphene',
],
project_urls= {
"Documentation": "https://mge-graphql.readthedocs.io/en/latest/",
"Source": "https://github.com/mgesoftware/mge-graphql",
"Tracker": "https://github.com/mgesoftware/mge-graphql/issues",
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
) | 39.777778 | 119 | 0.653631 |
8f7351e35c901f78b267fed6a9e1782ab6f3c95e | 1,008 | py | Python | optalg/line_search/bisection_wolfe.py | ShkalikovOleh/OptAlg | 03399eee50203dcba834a4d9ab48751142a6de2b | [
"MIT"
] | null | null | null | optalg/line_search/bisection_wolfe.py | ShkalikovOleh/OptAlg | 03399eee50203dcba834a4d9ab48751142a6de2b | [
"MIT"
] | 3 | 2021-01-31T09:34:50.000Z | 2021-02-21T09:01:42.000Z | optalg/line_search/bisection_wolfe.py | ShkalikovOleh/OptAlg | 03399eee50203dcba834a4d9ab48751142a6de2b | [
"MIT"
] | 1 | 2022-03-31T14:02:20.000Z | 2022-03-31T14:02:20.000Z | import numpy as np
from typing import Callable
from autograd import grad as agrad
from .line_searcher import LineSearcher
class BisectionWolfe(LineSearcher):
"""
Bisection method that either computes a step size
satisfying the weak Wolfe conditions or sends
the function values to -inf
"""
def __init__(self, a=1, c1=10**-4, c2=0.9) -> None:
self.__a = a
self.__c1 = c1
self.__c2 = c2
def optimize(self, f: Callable, xk: np.ndarray, pk: np.ndarray):
def gdk(x):
return np.dot(pk.T, agrad(f)(x))
ak = self.__a
d = 0
b = None
while True:
if f(xk - ak * pk) - f(xk) > - self.__c1 * ak * gdk(xk):
b = ak
ak = (d+b)/2
elif gdk(xk - ak * pk) > self.__c2 * gdk(xk):
d = ak
if b is None:
ak = 2 * d
else:
ak = (d+b)/2
else:
return ak
| 25.846154 | 68 | 0.488095 |
561966b11e493fcd88de8d147d3eb87d50b1c979 | 12,886 | py | Python | otp/namepanel/NameTumbler.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 81 | 2018-04-08T18:14:24.000Z | 2022-01-11T07:22:15.000Z | otp/namepanel/NameTumbler.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 4 | 2018-09-13T20:41:22.000Z | 2022-01-08T06:57:00.000Z | otp/namepanel/NameTumbler.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 26 | 2018-05-26T12:49:27.000Z | 2021-09-11T09:11:59.000Z | from pandac.PandaModules import *
from direct.showbase import DirectObject
import random
from direct.task import Task
from direct.gui.DirectGui import *
import string
from direct.gui import OnscreenText
class NameTumbler(DirectFrame):
def __init__(self, nameList, category):
DirectFrame.__init__(self, parent=aspect2d, relief='flat', scale=(1, 1, 1), state='disabled', frameColor=(1,
1,
1,
0))
self.initialiseoptions(NameTumbler)
self.nameList = nameList
self.nameList.sort()
self.category = category
self.tumblerColor = Vec4(1, 1, 1, 1)
self.displayList = [
' '] + [' '] + self.nameList + [' '] + [' ']
self.nameIndex = -1
self.isActive = 1
self.loadTumblerGUI()
def loadTumblerGUI(self):
self.circle = 'src/maps/NameTumblerCheck.tif'
self.background = 'src/maps/NameTumbler.tif'
self.upArrow = 'src/maps/NameTumblerUpArrow.tif'
self.downArrow = 'src/maps/NameTumblerDownArrow.tif'
self.tumblerscrollList = self.makeScrollList(self.displayList, self.makeLabel, [
TextNode.ACenter, 'title'])
self.tumblerscrollList['command'] = self.listsChanged
self.tumblerscrollList.reparentTo(self)
self.hilight = self.makeHighlight((0, 0, -0.15))
self.hilight.reparentTo(self.tumblerscrollList)
if self.category != '':
self.check = self.makeCheckBox((-0.617, 0, 0.374), self.category, (0, 0.25,
0.5,
1), self.toggleTumbler)
self.check.reparentTo(self)
self.getRandomResult()
def unloadTumblerGUI(self):
if self.category != '':
self.check.destroy()
del self.check
self.tumblerscrollList.destroy()
del self.tumblerscrollList
self.hilight.destroy()
del self.hilight
def toggleTumbler(self, value):
if self.isActive:
if self.priority == 1:
messenger.send('CheckTumblerPriority', [self.category])
else:
self.deactivateTumbler()
else:
self.activateTumbler()
if self.linkage > 0:
messenger.send('CheckTumblerLinkage', [self.category])
self.listsChanged()
if self.isActive:
self.tumblerscrollList.refresh()
self.updateCheckBoxes()
def listsChanged(self):
newname = ''
self.nameIndex = self.tumblerscrollList.index + 2
messenger.send('updateNameResult')
def updateLists(self):
self.tumblerscrollList.scrollTo(self.nameIndex - 2)
messenger.send('updateNameResult')
def updateCheckBoxes(self):
if self.category != '':
if self.isActive:
self.check['indicatorValue'] = self.isActive
else:
self.check['indicatorValue'] = -1
self.check.setIndicatorValue()
def nameClickedOn(self, index):
self.nameIndex = index
self.updateLists()
self.listsChanged()
def activateTumbler(self):
self.hilight.show()
self.isActive = 1
self.tumblerscrollList.itemFrame['frameColor'] = self.tumblerColor
def deactivateTumbler(self):
self.hilight.hide()
self.isActive = 0
self.tumblerscrollList.itemFrame['frameColor'] = (0.7, 0.7, 0.7, 1)
def getName(self):
if self.isActive:
name = self.nameList[self.nameIndex - 2]
else:
name = ''
return name
def makeLabel(self, te, index, others):
alig = others[0]
if alig == TextNode.ARight:
newpos = (0.44, 0, 0)
elif alig == TextNode.ALeft:
newpos = (0, 0, 0)
else:
newpos = (0.2, 0, 0)
df = DirectFrame(state='normal', relief=None, text=te, text_scale=0.1, text_pos=newpos, text_align=alig, textMayChange=0)
df.bind(DGG.B1PRESS, lambda x, df=df: self.nameClickedOn(index))
return df
def makeScrollList(self, nitems, nitemMakeFunction, nitemMakeExtraArgs):
it = nitems[:]
ds = DirectScrolledList(items=it, itemMakeFunction=nitemMakeFunction, itemMakeExtraArgs=nitemMakeExtraArgs, parent=aspect2d, relief=None, command=None, scale=0.6, pad=(0.1,
0.1), incButton_image=(self.downArrow, self.upArrow, self.circle, self.downArrow), incButton_relief=None, incButton_scale=(0.2,
0.05,
0.05), incButton_pos=(0, 0, -0.58), decButton_image=(self.upArrow, self.downArrow, self.circle, self.upArrow), decButton_relief=None, decButton_scale=(0.2,
0.05,
0.05), decButton_pos=(0,
0,
0.23), itemFrame_pos=(-0.2,
0,
0.028), itemFrame_scale=1.0, itemFrame_relief=None, itemFrame_image=self.background, itemFrame_image_scale=(0.38,
0,
0.33), itemFrame_image_pos=(0.2, 0, -0.2), itemFrame_frameSize=(-0.05, 0.48, -0.5, 0.1), itemFrame_borderWidth=(0.01,
0.01), numItemsVisible=5)
ds.setTransparency(1)
return ds
def makeCheckBox(self, npos, ntex, ntexcolor, comm):
dcf = DirectCheckButton(parent=aspect2d, relief=None, scale=0.1, boxBorder=0.08, boxImage=self.circle, boxImageScale=(0.4,
0.4,
0.4), boxRelief=None, pos=npos, text=ntex, text_fg=ntexcolor, text_scale=0.8, text_pos=(0.2,
0), indicator_pos=(-0.566667, 0, -0.045), indicator_image_pos=(-0.26, 0, 0.075), command=comm, text_align=TextNode.ALeft)
dcf.setTransparency(1)
return dcf
def makeHighlight(self, npos):
return DirectFrame(parent=aspect2d, relief='flat', state='disabled', frameSize=(-0.25, 0.26, -0.05, 0.05), borderWidth=(0.01,
0.01), pos=npos, frameColor=(1,
0,
1,
0.4))
def getRandomResult(self):
randomName = random.choice(self.nameList)
self.nameIndex = self.displayList.index(randomName)
self.updateCheckBoxes()
self.updateLists() | 82.076433 | 739 | 0.287211 |
491fb72d1bd76826e7f1d45841bef0b9517e09e7 | 3,568 | py | Python | flask_monitoringdashboard/core/config/parser.py | timgates42/Flask-MonitoringDashboard | 0404b05b9a8f1917796e0f314a77a53a754a0b15 | [
"MIT"
] | null | null | null | flask_monitoringdashboard/core/config/parser.py | timgates42/Flask-MonitoringDashboard | 0404b05b9a8f1917796e0f314a77a53a754a0b15 | [
"MIT"
] | null | null | null | flask_monitoringdashboard/core/config/parser.py | timgates42/Flask-MonitoringDashboard | 0404b05b9a8f1917796e0f314a77a53a754a0b15 | [
"MIT"
] | 1 | 2020-11-21T01:25:51.000Z | 2020-11-21T01:25:51.000Z | """
Helper functions for parsing the arguments from the config file
"""
import ast
import os
from flask_monitoringdashboard.core.logger import log
def parse_version(parser, header, version):
"""
Parse the version given in the config-file.
If both GIT and VERSION are used, the GIT argument is used.
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param version: the default version
"""
version = parse_string(parser, header, 'APP_VERSION', version)
if parser.has_option(header, 'GIT'):
git = parser.get(header, 'GIT')
try:
# current hash can be found in the link in HEAD-file in git-folder
# The file is specified by: 'ref: <location>'
git_file = (open(os.path.join(git, 'HEAD')).read().rsplit(': ', 1)[1]).rstrip()
# read the git-version
version_file = os.path.join(git , git_file)
if os.path.exists(version_file):
version = open(version_file).read()
# cut version to at most 6 chars
return version[:6]
else:
# Return "dummy" version in case of no git version file found
return version
except IOError:
log("Error reading one of the files to retrieve the current git-version.")
raise
return version
def parse_string(parser, header, arg_name, arg_value):
"""
Parse an argument from the given parser. If the argument is not specified, return the default
value
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param arg_name: name in the configuration file
:param arg_value: default value, the the value is not found
"""
env = get_environment_var(arg_name)
arg_value = env if env else arg_value
if parser.has_option(header, arg_name):
return parser.get(header, arg_name)
return arg_value
def parse_bool(parser, header, arg_name, arg_value):
"""
Parse an argument from the given parser. If the argument is not specified, return the default
value
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param arg_name: name in the configuration file
:param arg_value: default value, the the value is not found
"""
env = get_environment_var(arg_name)
arg_value = env if env else arg_value
if parser.has_option(header, arg_name):
return parser.get(header, arg_name) == 'True'
return arg_value
def parse_literal(parser, header, arg_name, arg_value):
"""
Parse an argument from the given parser. If the argument is not specified, return the default
value
:param parser: the parser to be used for parsing
:param header: name of the header in the configuration file
:param arg_name: name in the configuration file
:param arg_value: default value, the the value is not found
"""
env = get_environment_var(arg_name)
arg_value = ast.literal_eval(env) if env else arg_value
if parser.has_option(header, arg_name):
return ast.literal_eval(parser.get(header, arg_name))
return arg_value
def get_environment_var(environment_var):
"""
Retrieve the arg_value from the environment variable
:param environment_var: name of the environment variable
:return: either the value of the environment_var or None
"""
return os.environ.get(environment_var, None)
| 37.557895 | 97 | 0.677971 |
4c935e9c346140de9b820e43097e9d12ccb38216 | 38,444 | py | Python | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_assets.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_assets.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_assets.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | import os
import time
from dagster_graphql.client.query import (
LAUNCH_PIPELINE_EXECUTION_MUTATION,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
)
from dagster_graphql.test.utils import (
execute_dagster_graphql,
infer_job_or_pipeline_selector,
infer_pipeline_selector,
infer_repository_selector,
)
from dagster import AssetKey, DagsterEventType
from dagster.utils import safe_tempfile_path
# from .graphql_context_test_suite import GraphQLContextVariant, make_graphql_context_test_suite
from .graphql_context_test_suite import (
AllRepositoryGraphQLContextTestMatrix,
ExecutingGraphQLContextTestMatrix,
)
GET_ASSET_KEY_QUERY = """
query AssetKeyQuery {
assetsOrError {
__typename
...on AssetConnection {
nodes {
key {
path
}
}
}
}
}
"""
GET_ASSET_MATERIALIZATION = """
query AssetQuery($assetKey: AssetKeyInput!) {
assetOrError(assetKey: $assetKey) {
... on Asset {
assetMaterializations(limit: 1) {
label
assetLineage {
assetKey {
path
}
partitions
}
}
}
... on AssetNotFoundError {
__typename
}
}
}
"""
GET_ASSET_MATERIALIZATION_WITH_PARTITION = """
query AssetQuery($assetKey: AssetKeyInput!) {
assetOrError(assetKey: $assetKey) {
... on Asset {
assetMaterializations(limit: 1) {
partition
label
}
}
}
}
"""
WIPE_ASSETS = """
mutation AssetKeyWipe($assetKeys: [AssetKeyInput!]!) {
wipeAssets(assetKeys: $assetKeys) {
__typename
}
}
"""
GET_ASSET_MATERIALIZATION_TIMESTAMP = """
query AssetQuery($assetKey: AssetKeyInput!, $asOf: String) {
assetOrError(assetKey: $assetKey) {
... on Asset {
assetMaterializations(beforeTimestampMillis: $asOf) {
timestamp
}
}
}
}
"""
GET_ASSET_IN_PROGRESS_RUNS = """
query AssetGraphLiveQuery($assetKeys: [AssetKeyInput!]) {
assetsLatestInfo(assetKeys: $assetKeys) {
assetKey {
path
}
latestMaterialization {
timestamp
runId
}
unstartedRunIds
inProgressRunIds
}
}
"""
GET_ASSET_LATEST_RUN_STATS = """
query AssetGraphQuery($repositorySelector: RepositorySelector!) {
repositoryOrError(repositorySelector: $repositorySelector) {
... on Repository {
latestRunByStep{
... on LatestRun {
stepKey
run {
runId
}
}
}
}
}
}
"""
GET_ASSET_NODES_FROM_KEYS = """
query AssetNodeQuery($pipelineSelector: PipelineSelector!, $assetKeys: [AssetKeyInput!]) {
assetNodes(pipeline: $pipelineSelector, assetKeys: $assetKeys) {
id
}
}
"""
GET_ASSET_PARTITIONS = """
query AssetNodeQuery($pipelineSelector: PipelineSelector!) {
assetNodes(pipeline: $pipelineSelector) {
id
partitionKeys
}
}
"""
GET_LATEST_MATERIALIZATION_PER_PARTITION = """
query AssetNodeQuery($pipelineSelector: PipelineSelector!, $partitions: [String!]) {
assetNodes(pipeline: $pipelineSelector) {
id
partitionKeys
latestMaterializationByPartition(partitions: $partitions) {
partition
stepStats {
startTime
}
}
}
}
"""
GET_ASSET_OBSERVATIONS = """
query AssetGraphQuery($assetKey: AssetKeyInput!) {
assetOrError(assetKey: $assetKey) {
... on Asset {
assetObservations {
label
description
runOrError {
... on Run {
jobName
}
}
assetKey {
path
}
metadataEntries {
label
description
... on TextMetadataEntry {
text
}
}
}
}
}
}
"""
GET_MATERIALIZATION_COUNT_BY_PARTITION = """
query AssetNodeQuery($pipelineSelector: PipelineSelector!) {
assetNodes(pipeline: $pipelineSelector) {
id
materializationCountByPartition {
... on MaterializationCountByPartition {
partition
materializationCount
}
}
}
}
"""
GET_ASSET_MATERIALIZATION_AFTER_TIMESTAMP = """
query AssetQuery($assetKey: AssetKeyInput!, $afterTimestamp: String) {
assetOrError(assetKey: $assetKey) {
... on Asset {
assetMaterializations(afterTimestampMillis: $afterTimestamp) {
timestamp
}
}
}
}
"""
GET_ASSET_OP = """
query AssetQuery($assetKey: AssetKeyInput!) {
assetOrError(assetKey: $assetKey) {
... on Asset {
definition {
op {
name
description
inputDefinitions {
name
}
outputDefinitions {
name
}
}
}
}
}
}
"""
GET_OP_ASSETS = """
query OpQuery($repositorySelector: RepositorySelector!, $opName: String!) {
repositoryOrError(repositorySelector: $repositorySelector) {
... on Repository {
usedSolid(name: $opName) {
definition {
assetNodes {
assetKey {
path
}
}
}
}
}
}
}
"""
CROSS_REPO_ASSET_GRAPH = """
query AssetNodeQuery {
assetNodes {
id
dependencyKeys {
path
}
dependedByKeys {
path
}
}
}
"""
GET_RUN_MATERIALIZATIONS = """
query RunAssetsQuery {
runsOrError {
... on Runs {
results {
assetMaterializations {
assetKey {
path
}
}
}
}
}
}
"""
def _create_run(
graphql_context, pipeline_name, mode="default", step_keys=None, asset_selection=None
):
if asset_selection:
selector = infer_job_or_pipeline_selector(
graphql_context, pipeline_name, asset_selection=asset_selection
)
else:
selector = infer_pipeline_selector(
graphql_context,
pipeline_name,
)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={"executionParams": {"selector": selector, "mode": mode, "stepKeys": step_keys}},
)
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
graphql_context.instance.run_launcher.join()
return result.data["launchPipelineExecution"]["run"]["runId"]
def _get_sorted_materialization_events(graphql_context, run_id):
return sorted(
[
event
for event in graphql_context.instance.all_logs(run_id=run_id)
if event.dagster_event_type == DagsterEventType.ASSET_MATERIALIZATION
],
key=lambda event: event.get_dagster_event().asset_key,
)
class TestAssetAwareEventLog(ExecutingGraphQLContextTestMatrix):
def test_all_asset_keys(self, graphql_context, snapshot):
_create_run(graphql_context, "multi_asset_pipeline")
result = execute_dagster_graphql(graphql_context, GET_ASSET_KEY_QUERY)
assert result.data
assert result.data["assetsOrError"]
assert result.data["assetsOrError"]["nodes"]
# sort by materialization asset key to keep list order is consistent for snapshot
result.data["assetsOrError"]["nodes"].sort(key=lambda e: e["key"]["path"][0])
snapshot.assert_match(result.data)
def test_get_asset_key_materialization(self, graphql_context, snapshot):
_create_run(graphql_context, "single_asset_pipeline")
result = execute_dagster_graphql(
graphql_context, GET_ASSET_MATERIALIZATION, variables={"assetKey": {"path": ["a"]}}
)
assert result.data
snapshot.assert_match(result.data)
def test_get_asset_key_not_found(self, graphql_context, snapshot):
_create_run(graphql_context, "single_asset_pipeline")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION,
variables={"assetKey": {"path": ["bogus", "asset"]}},
)
assert result.data
snapshot.assert_match(result.data)
def test_get_partitioned_asset_key_materialization(self, graphql_context, snapshot):
_create_run(graphql_context, "partitioned_asset_pipeline")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_WITH_PARTITION,
variables={"assetKey": {"path": ["a"]}},
)
assert result.data
snapshot.assert_match(result.data)
def test_get_asset_key_lineage(self, graphql_context, snapshot):
selector = infer_pipeline_selector(graphql_context, "asset_lineage_pipeline")
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={"executionParams": {"selector": selector, "mode": "default"}},
)
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
graphql_context.instance.run_launcher.join()
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION,
variables={"assetKey": {"path": ["b"]}},
)
assert result.data
snapshot.assert_match(result.data)
def test_get_partitioned_asset_key_lineage(self, graphql_context, snapshot):
selector = infer_pipeline_selector(graphql_context, "partitioned_asset_lineage_pipeline")
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={"executionParams": {"selector": selector, "mode": "default"}},
)
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
graphql_context.instance.run_launcher.join()
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION,
variables={"assetKey": {"path": ["b"]}},
)
assert result.data
snapshot.assert_match(result.data)
def test_asset_wipe(self, graphql_context):
_create_run(graphql_context, "single_asset_pipeline")
_create_run(graphql_context, "multi_asset_pipeline")
asset_keys = graphql_context.instance.all_asset_keys()
assert AssetKey("a") in asset_keys
result = execute_dagster_graphql(
graphql_context, WIPE_ASSETS, variables={"assetKeys": [{"path": ["a"]}]}
)
assert result.data
assert result.data["wipeAssets"]
assert result.data["wipeAssets"]["__typename"] == "AssetWipeSuccess"
asset_keys = graphql_context.instance.all_asset_keys()
assert AssetKey("a") not in asset_keys
def test_asset_asof_timestamp(self, graphql_context):
_create_run(graphql_context, "asset_tag_pipeline")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_TIMESTAMP,
variables={"assetKey": {"path": ["a"]}},
)
assert result.data
assert result.data["assetOrError"]
materializations = result.data["assetOrError"]["assetMaterializations"]
assert len(materializations) == 1
first_timestamp = int(materializations[0]["timestamp"])
as_of_timestamp = first_timestamp + 1
time.sleep(1.1)
_create_run(graphql_context, "asset_tag_pipeline")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_TIMESTAMP,
variables={"assetKey": {"path": ["a"]}},
)
assert result.data
assert result.data["assetOrError"]
materializations = result.data["assetOrError"]["assetMaterializations"]
assert len(materializations) == 2
second_timestamp = int(materializations[0]["timestamp"])
assert second_timestamp > as_of_timestamp
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_TIMESTAMP,
variables={"assetKey": {"path": ["a"]}, "asOf": as_of_timestamp},
)
assert result.data
assert result.data["assetOrError"]
materializations = result.data["assetOrError"]["assetMaterializations"]
assert len(materializations) == 1
assert first_timestamp == int(materializations[0]["timestamp"])
# Test afterTimestamp before the first timestamp, which should return both results
after_timestamp = first_timestamp - 1
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_AFTER_TIMESTAMP,
variables={"assetKey": {"path": ["a"]}, "afterTimestamp": after_timestamp},
)
assert result.data
assert result.data["assetOrError"]
materializations = result.data["assetOrError"]["assetMaterializations"]
assert len(materializations) == 2
# Test afterTimestamp between the two timestamps, which should only return the first result
after_timestamp = first_timestamp + 1
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_AFTER_TIMESTAMP,
variables={"assetKey": {"path": ["a"]}, "afterTimestamp": after_timestamp},
)
assert result.data
assert result.data["assetOrError"]
materializations = result.data["assetOrError"]["assetMaterializations"]
assert len(materializations) == 1
assert second_timestamp == int(materializations[0]["timestamp"])
def test_asset_node_in_pipeline(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "two_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_NODES_FROM_KEYS,
variables={"pipelineSelector": selector, "assetKeys": [{"path": ["asset_one"]}]},
)
assert result.data
assert result.data["assetNodes"]
assert len(result.data["assetNodes"]) == 1
asset_node = result.data["assetNodes"][0]
assert asset_node["id"] == '["asset_one"]'
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_NODES_FROM_KEYS,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
assert len(result.data["assetNodes"]) == 2
asset_node = result.data["assetNodes"][0]
assert asset_node["id"] == '["asset_one"]'
def test_asset_partitions_in_pipeline(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "two_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_PARTITIONS,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
assert len(result.data["assetNodes"]) == 2
asset_node = result.data["assetNodes"][0]
assert asset_node["partitionKeys"] == []
selector = infer_pipeline_selector(graphql_context, "static_partitioned_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_PARTITIONS,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
assert len(result.data["assetNodes"]) == 2
asset_node = result.data["assetNodes"][0]
assert asset_node["partitionKeys"] and asset_node["partitionKeys"] == [
"a",
"b",
"c",
"d",
]
asset_node = result.data["assetNodes"][1]
assert asset_node["partitionKeys"] and asset_node["partitionKeys"] == [
"a",
"b",
"c",
"d",
]
selector = infer_pipeline_selector(graphql_context, "time_partitioned_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_PARTITIONS,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
assert len(result.data["assetNodes"]) == 2
asset_node = result.data["assetNodes"][0]
# test partition starts at "2021-05-05-01:00". Should be > 100 partition keys
# since partition is hourly
assert asset_node["partitionKeys"] and len(asset_node["partitionKeys"]) > 100
assert asset_node["partitionKeys"][0] == "2021-05-05-01:00"
assert asset_node["partitionKeys"][1] == "2021-05-05-02:00"
def test_latest_materialization_per_partition(self, graphql_context):
_create_run(graphql_context, "partition_materialization_job")
selector = infer_pipeline_selector(graphql_context, "partition_materialization_job")
result = execute_dagster_graphql(
graphql_context,
GET_LATEST_MATERIALIZATION_PER_PARTITION,
variables={"pipelineSelector": selector, "partitions": ["a"]},
)
assert result.data
assert result.data["assetNodes"]
asset_node = result.data["assetNodes"][0]
assert len(asset_node["latestMaterializationByPartition"]) == 1
assert asset_node["latestMaterializationByPartition"][0] == None
result = execute_dagster_graphql(
graphql_context,
GET_LATEST_MATERIALIZATION_PER_PARTITION,
variables={"pipelineSelector": selector, "partitions": ["c"]},
)
assert result.data
assert result.data["assetNodes"]
asset_node = result.data["assetNodes"][0]
assert len(asset_node["latestMaterializationByPartition"]) == 1
materialization = asset_node["latestMaterializationByPartition"][0]
start_time = materialization["stepStats"]["startTime"]
assert materialization["partition"] == "c"
_create_run(graphql_context, "partition_materialization_job")
result = execute_dagster_graphql(
graphql_context,
GET_LATEST_MATERIALIZATION_PER_PARTITION,
variables={"pipelineSelector": selector, "partitions": ["c", "a"]},
)
assert result.data and result.data["assetNodes"]
asset_node = result.data["assetNodes"][0]
assert len(asset_node["latestMaterializationByPartition"]) == 2
materialization = asset_node["latestMaterializationByPartition"][0]
new_start_time = materialization["stepStats"]["startTime"]
assert new_start_time > start_time
assert asset_node["latestMaterializationByPartition"][1] == None
def test_materialization_count_by_partition(self, graphql_context):
# test for unpartitioned asset
selector = infer_pipeline_selector(graphql_context, "two_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_MATERIALIZATION_COUNT_BY_PARTITION,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
materialization_count = result.data["assetNodes"][0]["materializationCountByPartition"]
assert len(materialization_count) == 0
# test for partitioned asset with no materializations
selector = infer_pipeline_selector(graphql_context, "partition_materialization_job")
result = execute_dagster_graphql(
graphql_context,
GET_MATERIALIZATION_COUNT_BY_PARTITION,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
materialization_count_result = result.data["assetNodes"][0][
"materializationCountByPartition"
]
assert len(materialization_count_result) == 4
for materialization_count in materialization_count_result:
assert materialization_count["materializationCount"] == 0
# test for partitioned asset with 1 materialization in 1 partition
_create_run(graphql_context, "partition_materialization_job")
selector = infer_pipeline_selector(graphql_context, "partition_materialization_job")
result = execute_dagster_graphql(
graphql_context,
GET_MATERIALIZATION_COUNT_BY_PARTITION,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
asset_node = result.data["assetNodes"][0]
materialization_count = asset_node["materializationCountByPartition"]
assert len(materialization_count) == 4
assert materialization_count[0]["partition"] == "a"
assert materialization_count[0]["materializationCount"] == 0
assert materialization_count[2]["partition"] == "c"
assert materialization_count[2]["materializationCount"] == 1
# test for partitioned asset with 2 materializations in 1 partition
_create_run(graphql_context, "partition_materialization_job")
result = execute_dagster_graphql(
graphql_context,
GET_MATERIALIZATION_COUNT_BY_PARTITION,
variables={"pipelineSelector": selector},
)
assert result.data
assert result.data["assetNodes"]
asset_node = result.data["assetNodes"][0]
materialization_count = asset_node["materializationCountByPartition"]
assert len(materialization_count) == 4
assert materialization_count[0]["partition"] == "a"
assert materialization_count[0]["materializationCount"] == 0
assert materialization_count[2]["partition"] == "c"
assert materialization_count[2]["materializationCount"] == 2
def test_asset_observations(self, graphql_context):
_create_run(graphql_context, "observation_job")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_OBSERVATIONS,
variables={"assetKey": {"path": ["asset_yields_observation"]}},
)
assert result.data
assert result.data["assetOrError"]
observations = result.data["assetOrError"]["assetObservations"]
assert observations
assert observations[0]["runOrError"]["jobName"] == "observation_job"
asset_key_path = observations[0]["assetKey"]["path"]
assert asset_key_path
assert asset_key_path == ["asset_yields_observation"]
metadata = observations[0]["metadataEntries"]
assert metadata
assert metadata[0]["text"] == "FOO"
assert observations[0]["label"] == "asset_yields_observation"
def test_asset_op(self, graphql_context, snapshot):
_create_run(graphql_context, "two_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_OP,
variables={"assetKey": {"path": ["asset_two"]}},
)
assert result.data
snapshot.assert_match(result.data)
def test_op_assets(self, graphql_context, snapshot):
_create_run(graphql_context, "two_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_OP_ASSETS,
variables={
"repositorySelector": infer_repository_selector(graphql_context),
"opName": "asset_two",
},
)
assert result.data
snapshot.assert_match(result.data)
def test_latest_run_by_asset(self, graphql_context):
def get_response_by_step(response):
return {stat["stepKey"]: stat for stat in response}
selector = infer_repository_selector(graphql_context)
# Confirm that when no runs are present, run returned is None
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_LATEST_RUN_STATS,
variables={"repositorySelector": selector},
)
assert result.data
assert result.data["repositoryOrError"]
assert result.data["repositoryOrError"]["latestRunByStep"]
result = get_response_by_step(result.data["repositoryOrError"]["latestRunByStep"])
assert result["asset_1"]["stepKey"] == "asset_1"
assert result["asset_1"]["run"] == None
# Test with 1 run on all assets
first_run_id = _create_run(graphql_context, "failure_assets_job")
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_LATEST_RUN_STATS,
variables={"repositorySelector": selector},
)
assert result.data
assert result.data["repositoryOrError"]
result = get_response_by_step(result.data["repositoryOrError"]["latestRunByStep"])
assert result["asset_1"]["run"]["runId"] == first_run_id
assert result["asset_2"]["run"]["runId"] == first_run_id
assert result["asset_3"]["run"]["runId"] == first_run_id
# Confirm that step selection is respected among 5 latest runs
run_id = _create_run(graphql_context, "failure_assets_job", step_keys=["asset_3"])
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_LATEST_RUN_STATS,
variables={"repositorySelector": selector},
)
assert result.data
assert result.data["repositoryOrError"]
assert result.data["repositoryOrError"]["latestRunByStep"]
result = get_response_by_step(result.data["repositoryOrError"]["latestRunByStep"])
assert result["asset_1"]["run"]["runId"] == first_run_id
assert result["asset_2"]["run"]["runId"] == first_run_id
assert result["asset_3"]["run"]["runId"] == run_id
def test_get_run_materialization(self, graphql_context, snapshot):
_create_run(graphql_context, "single_asset_pipeline")
result = execute_dagster_graphql(graphql_context, GET_RUN_MATERIALIZATIONS)
assert result.data
assert result.data["runsOrError"]
assert result.data["runsOrError"]["results"]
assert len(result.data["runsOrError"]["results"]) == 1
assert len(result.data["runsOrError"]["results"][0]["assetMaterializations"]) == 1
snapshot.assert_match(result.data)
def test_asset_selection_in_run(self, graphql_context):
# Generate materializations for bar asset
run_id = _create_run(graphql_context, "foo_job", asset_selection=[{"path": ["bar"]}])
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
assert run.asset_selection == {AssetKey("bar")}
def test_execute_pipeline_subset(self, graphql_context):
# Assets foo and bar are upstream dependencies of asset foo_bar
# Execute subselection with asset bar
run_id = _create_run(graphql_context, "foo_job", asset_selection=[{"path": ["bar"]}])
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
events = _get_sorted_materialization_events(graphql_context, run_id)
assert len(events) == 1
assert events[0].get_dagster_event().asset_key == AssetKey("bar")
# Execute subselection with assets foo and foo_bar
run_id = _create_run(
graphql_context, "foo_job", asset_selection=[{"path": ["foo"]}, {"path": ["foo_bar"]}]
)
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
events = _get_sorted_materialization_events(graphql_context, run_id)
assert len(events) == 2
assert events[0].get_dagster_event().asset_key == AssetKey("foo")
assert events[1].get_dagster_event().asset_key == AssetKey("foo_bar")
def test_execute_dependent_subset(self, graphql_context):
# Asset foo is upstream of baz but not directly connected
# Generate materializations for all assets upstream of baz
run_id = _create_run(
graphql_context,
"foo_job",
asset_selection=[{"path": ["foo"]}, {"path": ["bar"]}, {"path": ["foo_bar"]}],
)
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
# Generate materializations with subselection of foo and baz
run_id = _create_run(
graphql_context, "foo_job", asset_selection=[{"path": ["foo"]}, {"path": ["baz"]}]
)
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
events = _get_sorted_materialization_events(graphql_context, run_id)
assert len(events) == 2
assert events[0].get_dagster_event().asset_key == AssetKey("baz")
assert events[1].get_dagster_event().asset_key == AssetKey("foo")
def test_execute_unconnected_subset(self, graphql_context):
# Assets "foo" and "unconnected" are disconnected assets
run_id = _create_run(
graphql_context,
"foo_job",
asset_selection=[{"path": ["foo"]}, {"path": ["unconnected"]}],
)
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
events = _get_sorted_materialization_events(graphql_context, run_id)
assert len(events) == 2
assert events[0].get_dagster_event().asset_key == AssetKey("foo")
assert events[1].get_dagster_event().asset_key == AssetKey("unconnected")
def test_reexecute_subset(self, graphql_context):
run_id = _create_run(graphql_context, "foo_job", asset_selection=[{"path": ["bar"]}])
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
events = _get_sorted_materialization_events(graphql_context, run_id)
assert len(events) == 1
assert events[0].get_dagster_event().asset_key == AssetKey("bar")
assert run.asset_selection == {AssetKey("bar")}
selector = infer_job_or_pipeline_selector(
graphql_context, "foo_job", asset_selection=[{"path": ["bar"]}]
)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_REEXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"executionMetadata": {"parentRunId": run_id, "rootRunId": run_id},
},
},
)
graphql_context.instance.run_launcher.join()
run_id = result.data["launchPipelineReexecution"]["run"]["runId"]
run = graphql_context.instance.get_run_by_id(run_id)
assert run.is_finished
events = _get_sorted_materialization_events(graphql_context, run_id)
assert len(events) == 1
assert events[0].get_dagster_event().asset_key == AssetKey("bar")
assert run.asset_selection == {AssetKey("bar")}
class TestPersistentInstanceAssetInProgress(ExecutingGraphQLContextTestMatrix):
def test_asset_in_progress(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "hanging_job")
run_id = "foo"
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {
"resources": {"hanging_asset_resource": {"config": {"file": path}}}
},
"executionMetadata": {"runId": run_id},
}
},
)
assert not result.errors
assert result.data
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_IN_PROGRESS_RUNS,
variables={
"assetKeys": [
{"path": "first_asset"},
{"path": "hanging_asset"},
{"path": "never_runs_asset"},
]
},
)
graphql_context.instance.run_launcher.terminate(run_id)
assert result.data
assert result.data["assetsLatestInfo"]
assets_live_info = result.data["assetsLatestInfo"]
assets_live_info = sorted(assets_live_info, key=lambda res: res["assetKey"]["path"])
assert len(assets_live_info) == 3
assert assets_live_info[0]["assetKey"]["path"] == ["first_asset"]
assert assets_live_info[0]["latestMaterialization"]["runId"] == "foo"
assert assets_live_info[0]["unstartedRunIds"] == []
assert assets_live_info[0]["inProgressRunIds"] == []
assert assets_live_info[1]["assetKey"]["path"] == ["hanging_asset"]
assert assets_live_info[1]["latestMaterialization"] == None
assert assets_live_info[1]["unstartedRunIds"] == []
assert assets_live_info[1]["inProgressRunIds"] == ["foo"]
assert assets_live_info[2]["assetKey"]["path"] == ["never_runs_asset"]
assert assets_live_info[2]["latestMaterialization"] == None
assert assets_live_info[2]["unstartedRunIds"] == ["foo"]
assert assets_live_info[2]["inProgressRunIds"] == []
def test_graph_asset_in_progress(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "hanging_graph_asset_job")
run_id = "foo"
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {
"resources": {"hanging_asset_resource": {"config": {"file": path}}}
},
"executionMetadata": {"runId": run_id},
}
},
)
assert not result.errors
assert result.data
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_IN_PROGRESS_RUNS,
variables={
"assetKeys": [
{"path": "hanging_graph"},
{"path": "downstream_asset"},
]
},
)
graphql_context.instance.run_launcher.terminate(run_id)
assert result.data
assert result.data["assetsLatestInfo"]
assets_live_info = result.data["assetsLatestInfo"]
assets_live_info = sorted(assets_live_info, key=lambda res: res["assetKey"]["path"])
assert len(assets_live_info) == 2
assert assets_live_info[1]["assetKey"]["path"] == ["hanging_graph"]
assert assets_live_info[1]["latestMaterialization"] == None
assert assets_live_info[1]["unstartedRunIds"] == []
assert assets_live_info[1]["inProgressRunIds"] == ["foo"]
assert assets_live_info[0]["assetKey"]["path"] == ["downstream_asset"]
assert assets_live_info[0]["latestMaterialization"] == None
assert assets_live_info[0]["unstartedRunIds"] == ["foo"]
assert assets_live_info[0]["inProgressRunIds"] == []
class TestCrossRepoAssetDependedBy(AllRepositoryGraphQLContextTestMatrix):
def test_cross_repo_assets(self, graphql_context):
repository_location = graphql_context.get_repository_location("test")
repository = repository_location.get_repository("upstream_assets_repository")
selector = {
"repositoryLocationName": repository_location.name,
"repositoryName": repository.name,
}
result = execute_dagster_graphql(
graphql_context, CROSS_REPO_ASSET_GRAPH, variables={"repositorySelector": selector}
)
asset_nodes = result.data["assetNodes"]
upstream_asset = [node for node in asset_nodes if node["id"] == '["upstream_asset"]'][0]
dependent_asset_keys = [{"path": ["downstream_asset1"]}, {"path": ["downstream_asset2"]}]
result_dependent_keys = sorted(
upstream_asset["dependedByKeys"], key=lambda node: node.get("path")[0]
)
assert result_dependent_keys == dependent_asset_keys
| 36.753346 | 99 | 0.601966 |
354e36ed619b6cf028203a1a33deb14ad56fbb0d | 36,275 | py | Python | libpy/durable/engine.py | pauljurczak/rules | 72070f4d686142dcc44ee26a6cad0e8c531f9d24 | [
"MIT"
] | 1,045 | 2015-01-05T16:11:45.000Z | 2022-03-31T10:48:53.000Z | libpy/durable/engine.py | subhasis-dev/rules | 2b1020d5f73785ea0e944fc8f9641056e0d9ef59 | [
"MIT"
] | 370 | 2015-02-08T20:11:01.000Z | 2022-02-09T18:59:10.000Z | libpy/durable/engine.py | subhasis-dev/rules | 2b1020d5f73785ea0e944fc8f9641056e0d9ef59 | [
"MIT"
] | 222 | 2015-03-19T18:17:46.000Z | 2022-03-06T00:54:30.000Z | import json
import copy
import durable_rules_engine
import threading
import inspect
import random
import time
import datetime
import os
import sys
import traceback
from . import logger
def _unix_now():
dt = datetime.datetime.now()
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
class MessageNotHandledException(Exception):
def __init__(self, message):
self.message = 'Could not handle message: {0}'.format(json.dumps(message, ensure_ascii=False))
class MessageObservedException(Exception):
def __init__(self, message):
self.message = 'Message has already been observed: {0}'.format(json.dumps(message, ensure_ascii=False))
class Closure(object):
def __init__(self, host, ruleset, state, message, handle):
self.host = host
self.s = Content(state)
self._handle = handle
self._ruleset = ruleset
self._completed = False
self._deleted = False
self._start_time = _unix_now()
if isinstance(message, dict):
self._m = message
else:
self.m = []
for one_message in message:
if ('m' in one_message) and len(one_message) == 1:
one_message = one_message['m']
self.m.append(Content(one_message))
def post(self, ruleset_name, message = None):
if message:
if not 'sid' in message:
message['sid'] = self.s.sid
if isinstance(message, Content):
message = message._d
self.host.assert_event(ruleset_name, message)
else:
message = ruleset_name
if not 'sid' in message:
message['sid'] = self.s.sid
if isinstance(message, Content):
message = message._d
self._ruleset.assert_event(message)
def assert_fact(self, ruleset_name, fact = None):
if fact:
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = fact._d
self.host.assert_fact(ruleset_name, fact)
else:
fact = ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = fact._d
self._ruleset.assert_fact(fact)
def retract_fact(self, ruleset_name, fact = None):
if fact:
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = fact._d
self.host.retract_fact(ruleset_name, fact)
else:
fact = ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = fact._d
self._ruleset.retract_fact(fact)
def start_timer(self, timer_name, duration, manual_reset = False):
self._ruleset.start_timer(self.s.sid, timer_name, duration, manual_reset)
def cancel_timer(self, timer_name):
self._ruleset.cancel_timer(self.s.sid, timer_name)
def renew_action_lease(self):
if _unix_now() - self._start_time < 10:
self._start_time = _unix_now()
self._ruleset.renew_action_lease(self.s.sid)
def delete_state(self):
self._deleted = True
def get_facts(self):
return self._ruleset.get_facts(self.s.sid)
def get_pending_events(self):
return self._ruleset.get_pending_events(self.s.sid)
def _has_completed(self):
if _unix_now() - self._start_time > 10:
self._completed = True
value = self._completed
self._completed = True
return value
def _is_deleted(self):
return self._deleted
def __getattr__(self, name):
if name == '_m':
return None
if name in self._m:
return Content(self._m[name])
else:
return None
class Content(object):
def items(self):
return self._d.items()
def __init__(self, data):
self._d = data
def __getitem__(self, key):
if key in self._d:
data = self._d[key]
if isinstance(data, dict):
data = Content(data)
return data
else:
return None
def __setitem__(self, key, value):
if value == None:
del self._d[key]
elif isinstance(value, Content):
self._d[key] = value._d
else:
self._d[key] = value
def __iter__(self):
return self._d.__iter__
def __contains__(self, key):
return key in self._d
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self, name, value):
if name == '_d':
self.__dict__['_d'] = value
else:
self.__setitem__(name, value)
def __repr__(self):
return repr(self._d)
def __str__(self):
return str(self._d)
class Promise(object):
def __init__(self, func):
self._func = func
self._next = None
self._sync = True
self._timer = None
self.root = self
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count == 2:
self._sync = False
elif arg_count != 1:
raise Exception('Invalid function signature')
def continue_with(self, next):
if (isinstance(next, Promise)):
self._next = next
elif (hasattr(next, '__call__')):
self._next = Promise(next)
else:
raise Exception('Unexpected Promise Type')
self._next.root = self.root
return self._next
def run(self, c, complete):
def timeout(max_time):
if _unix_now() > max_time:
c.s.exception = 'timeout expired'
complete(None)
else:
c.renew_action_lease()
self._timer = threading.Timer(5, timeout, (max_time, ))
self._timer.daemon = True
self._timer.start()
if self._sync:
try:
self._func(c)
except BaseException as error:
t, v, tb = sys.exc_info()
c.s.exception = 'exception caught {0}, traceback {1}'.format(str(error), traceback.format_tb(tb))
except:
c.s.exception = 'unknown exception'
if self._next:
self._next.run(c, complete)
else:
complete(None)
else:
try:
def callback(e):
if self._timer:
self._timer.cancel()
self._timer = None
if e:
c.s.exception = str(e)
if self._next:
self._next.run(c, complete)
else:
complete(None)
time_left = self._func(c, callback)
if time_left:
self._timer = threading.Timer(5, timeout, (_unix_now() + time_left, ))
self._timer.daemon = True
self._timer.start()
except BaseException as error:
t, v, tb = sys.exc_info()
c.s.exception = 'exception caught {0}, traceback {1}'.format(str(error), traceback.format_tb(tb))
complete(None)
except:
c.s.exception = 'unknown exception'
complete(None)
class To(Promise):
def __init__(self, from_state, to_state, assert_state):
super(To, self).__init__(self._execute)
self._from_state = from_state
self._to_state = to_state
self._assert_state = assert_state
def _execute(self, c):
c.s.running = True
if self._from_state != self._to_state:
try:
if self._from_state:
if c.m and isinstance(c.m, list):
c.retract_fact(c.m[0].chart_context)
else:
c.retract_fact(c.chart_context)
if self._assert_state:
c.assert_fact({ 'label': self._to_state, 'chart': 1 })
else:
c.post({ 'label': self._to_state, 'chart': 1 })
except MessageNotHandledException:
pass
class Ruleset(object):
def __init__(self, name, host, ruleset_definition):
self._actions = {}
self._name = name
self._host = host
for rule_name, rule in ruleset_definition.items():
action = rule['run']
del rule['run']
if isinstance(action, str):
self._actions[rule_name] = Promise(host.get_action(action))
elif isinstance(action, Promise):
self._actions[rule_name] = action.root
elif (hasattr(action, '__call__')):
self._actions[rule_name] = Promise(action)
self._handle = durable_rules_engine.create_ruleset(name, json.dumps(ruleset_definition, ensure_ascii=False))
self._definition = ruleset_definition
def _handle_result(self, result, message):
if result[0] == 1:
raise MessageNotHandledException(message)
elif result[0] == 2:
raise MessageObservedException(message)
elif result[0] == 3:
return 0
return result[1]
def assert_event(self, message):
return self._handle_result(durable_rules_engine.assert_event(self._handle, json.dumps(message, ensure_ascii=False)), message)
def assert_events(self, messages):
return self._handle_result(durable_rules_engine.assert_events(self._handle, json.dumps(messages, ensure_ascii=False)), messages)
def assert_fact(self, fact):
return self._handle_result(durable_rules_engine.assert_fact(self._handle, json.dumps(fact, ensure_ascii=False)), fact)
def assert_facts(self, facts):
return self._handle_result(durable_rules_engine.assert_facts(self._handle, json.dumps(facts, ensure_ascii=False)), facts)
def retract_fact(self, fact):
return self._handle_result(durable_rules_engine.retract_fact(self._handle, json.dumps(fact, ensure_ascii=False)), fact)
def retract_facts(self, facts):
return self._handle_result(durable_rules_engine.retract_facts(self._handle, json.dumps(facts, ensure_ascii=False)), facts)
def start_timer(self, sid, timer, timer_duration, manual_reset):
if sid != None:
sid = str(sid)
durable_rules_engine.start_timer(self._handle, timer_duration, manual_reset, timer, sid)
def cancel_timer(self, sid, timer_name):
if sid != None:
sid = str(sid)
durable_rules_engine.cancel_timer(self._handle, sid, timer_name)
def update_state(self, state):
state['$s'] = 1
return durable_rules_engine.update_state(self._handle, json.dumps(state, ensure_ascii=False))
def get_state(self, sid):
if sid != None:
sid = str(sid)
return json.loads(durable_rules_engine.get_state(self._handle, sid))
def delete_state(self, sid):
if sid != None:
sid = str(sid)
durable_rules_engine.delete_state(self._handle, sid)
def renew_action_lease(self, sid):
if sid != None:
sid = str(sid)
durable_rules_engine.renew_action_lease(self._handle, sid)
def get_facts(self, sid):
if sid != None:
sid = str(sid)
return json.loads(durable_rules_engine.get_facts(self._handle, sid))
def get_pending_events(self, sid):
if sid != None:
sid = str(sid)
return json.loads(durable_rules_engine.get_events(self._handle, sid))
def set_store_message_callback(self, func):
durable_rules_engine.set_store_message_callback(self._handle, func)
def set_delete_message_callback(self, func):
durable_rules_engine.set_delete_message_callback(self._handle, func)
def set_queue_message_callback(self, func):
durable_rules_engine.set_queue_message_callback(self._handle, func)
def set_get_stored_messages_callback(self, func):
durable_rules_engine.set_get_stored_messages_callback(self._handle, func)
def set_get_queued_messages_callback(self, func):
durable_rules_engine.set_get_queued_messages_callback(self._handle, func)
def complete_get_queued_messages(self, sid, queued_messages):
if sid != None:
sid = str(sid)
durable_rules_engine.complete_get_queued_messages(self._handle, sid, queued_messages)
def set_get_idle_state_callback(self, func):
durable_rules_engine.set_get_idle_state_callback(self._handle, func)
def complete_get_idle_state(self, sid, stored_messages):
if sid != None:
sid = str(sid)
durable_rules_engine.complete_get_idle_state(self._handle, sid, stored_messages)
def get_definition(self):
return self._definition
@staticmethod
def create_rulesets(host, ruleset_definitions):
branches = {}
for name, definition in ruleset_definitions.items():
if name.rfind('$state') != -1:
name = name[:name.rfind('$state')]
branches[name] = Statechart(name, host, definition)
elif name.rfind('$flow') != -1:
name = name[:name.rfind('$flow')]
branches[name] = Flowchart(name, host, definition)
else:
branches[name] = Ruleset(name, host, definition)
return branches
def dispatch_timers(self):
return durable_rules_engine.assert_timers(self._handle)
def _flush_actions(self, state, result_container, state_offset, complete):
while 'message' in result_container:
action_name = None
for action_name, message in result_container['message'].items():
break
del(result_container['message'])
c = Closure(self._host, self, state, message, state_offset)
def action_callback(e):
if c._has_completed():
return
if e:
durable_rules_engine.abandon_action(self._handle, c._handle)
complete(e, None)
else:
try:
durable_rules_engine.update_state(self._handle, json.dumps(c.s._d, ensure_ascii=False))
new_result = durable_rules_engine.complete_and_start_action(self._handle, c._handle)
if new_result:
result_container['message'] = json.loads(new_result)
else:
complete(None, state)
except BaseException as error:
t, v, tb = sys.exc_info()
logger.exception('base exception type %s, value %s, traceback %s', t, str(v), traceback.format_tb(tb))
durable_rules_engine.abandon_action(self._handle, c._handle)
complete(error, None)
except:
logger.exception('unknown exception type %s, value %s, traceback %s', t, str(v), traceback.format_tb(tb))
durable_rules_engine.abandon_action(self._handle, c._handle)
complete('unknown error', None)
if c._is_deleted():
try:
self.delete_state(c.s['sid'])
except:
pass
self._actions[action_name].run(c, action_callback)
def do_actions(self, state_handle, complete):
try:
result = durable_rules_engine.start_action_for_state(self._handle, state_handle)
if not result:
complete(None, None)
else:
self._flush_actions(json.loads(result[0]), {'message': json.loads(result[1])}, state_handle, complete)
except BaseException as error:
complete(error, None)
def dispatch(self):
def callback(error, result):
pass
result = durable_rules_engine.start_action(self._handle)
if result:
self._flush_actions(json.loads(result[0]), {'message': json.loads(result[1])}, result[2], callback)
class Statechart(Ruleset):
def __init__(self, name, host, chart_definition):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(None, None, None, chart_definition, ruleset_definition)
super(Statechart, self).__init__(name, host, ruleset_definition)
self._definition = chart_definition
self._definition['$type'] = 'stateChart'
def _transform(self, parent_name, parent_triggers, parent_start_state, chart_definition, rules):
start_state = {}
reflexive_states = {}
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
start_state[qualified_name] = True
for trigger_name, trigger in state.items():
if ('to' in trigger and trigger['to'] == state_name) or 'count' in trigger or 'cap' in trigger:
reflexive_states[qualified_name] = True
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
triggers = {}
if parent_triggers:
for parent_trigger_name, trigger in parent_triggers.items():
triggers['{0}.{1}'.format(qualified_name, parent_trigger_name)] = trigger
for trigger_name, trigger in state.items():
if trigger_name != '$chart':
if ('to' in trigger) and parent_name:
trigger['to'] = '{0}.{1}'.format(parent_name, trigger['to'])
triggers['{0}.{1}'.format(qualified_name, trigger_name)] = trigger
if '$chart' in state:
self._transform(qualified_name, triggers, start_state, state['$chart'], rules)
else:
for trigger_name, trigger in triggers.items():
rule = {}
state_test = {'chart_context': {'$and':[{'label': qualified_name}, {'chart': 1}]}}
if 'pri' in trigger:
rule['pri'] = trigger['pri']
if 'count' in trigger:
rule['count'] = trigger['count']
if 'cap' in trigger:
rule['cap'] = trigger['cap']
if 'all' in trigger:
rule['all'] = list(trigger['all'])
rule['all'].append(state_test)
elif 'any' in trigger:
rule['all'] = [state_test, {'m$any': trigger['any']}]
else:
rule['all'] = [state_test]
if 'run' in trigger:
if isinstance(trigger['run'], str):
rule['run'] = Promise(self._host.get_action(trigger['run']))
elif isinstance(trigger['run'], Promise):
rule['run'] = trigger['run']
elif hasattr(trigger['run'], '__call__'):
rule['run'] = Promise(trigger['run'])
if 'to' in trigger:
from_state = None
if qualified_name in reflexive_states:
from_state = qualified_name
to_state = trigger['to']
assert_state = False
if to_state in reflexive_states:
assert_state = True
if 'run' in rule:
rule['run'].continue_with(To(from_state, to_state, assert_state))
else:
rule['run'] = To(from_state, to_state, assert_state)
if to_state in start_state:
del start_state[to_state]
if parent_start_state and to_state in parent_start_state:
del parent_start_state[to_state]
else:
raise Exception('Trigger {0} destination not defined'.format(trigger_name))
rules[trigger_name] = rule;
started = False
for state_name in start_state.keys():
if started:
raise Exception('Chart {0} has more than one start state {1}'.format(self._name, state_name))
started = True
if parent_name:
rules[parent_name + '$start'] = {'all':[{'chart_context': {'$and': [{'label': parent_name}, {'chart':1}]}}], 'run': To(None, state_name, False)};
else:
rules['$start'] = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}], 'run': To(None, state_name, False)};
if not started:
raise Exception('Chart {0} has no start state'.format(self._name))
class Flowchart(Ruleset):
def __init__(self, name, host, chart_definition):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(chart_definition, ruleset_definition)
super(Flowchart, self).__init__(name, host, ruleset_definition)
self._definition = chart_definition
self._definition['$type'] = 'flowChart'
def _transform(self, chart_definition, rules):
visited = {}
reflexive_stages = {}
for stage_name, stage in chart_definition.items():
if 'to' in stage:
if isinstance(stage['to'], str):
if stage['to'] == stage_name:
reflexive_stages[stage_name] = True
else:
for transition_name, transition in stage['to'].items():
if transition_name == stage_name or 'count' in transition or 'cap' in transition:
reflexive_stages[stage_name] = True
for stage_name, stage in chart_definition.items():
stage_test = {'chart_context': {'$and':[{'label': stage_name}, {'chart':1}]}}
from_stage = None
if stage_name in reflexive_stages:
from_stage = stage_name
if 'to' in stage:
if isinstance(stage['to'], str):
next_stage = None
rule = {'all': [stage_test]}
if stage['to'] in chart_definition:
next_stage = chart_definition[stage['to']]
else:
raise Exception('Stage {0} not found'.format(stage['to']))
assert_stage = False
if stage['to'] in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, stage['to'], assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, stage['to'])] = rule
visited[stage['to']] = True
else:
for transition_name, transition in stage['to'].items():
rule = {}
next_stage = None
if 'pri' in transition:
rule['pri'] = transition['pri']
if 'count' in transition:
rule['count'] = transition['count']
if 'cap' in transition:
rule['cap'] = transition['cap']
if 'all' in transition:
rule['all'] = list(transition['all'])
rule['all'].append(stage_test)
elif 'any' in transition:
rule['all'] = [stage_test, {'m$any': transition['any']}]
else:
rule['all'] = [stage_test]
if transition_name in chart_definition:
next_stage = chart_definition[transition_name]
else:
raise Exception('Stage {0} not found'.format(transition_name))
assert_stage = False
if transition_name in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, transition_name, assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, transition_name)] = rule
visited[transition_name] = True
started = False
for stage_name, stage in chart_definition.items():
if not stage_name in visited:
if started:
raise Exception('Chart {0} has more than one start state'.format(self._name))
rule = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}]}
if not 'run' in stage:
rule['run'] = To(None, stage_name, False)
else:
if isinstance(stage['run'], str):
rule['run'] = To(None, stage_name, False).continue_with(Promise(self._host.get_action(stage['run'])))
elif isinstance(stage['run'], Promise) or hasattr(stage['run'], '__call__'):
rule['run'] = To(None, stage_name, False).continue_with(stage['run'])
rules['$start.{0}'.format(stage_name)] = rule
started = True
class Host(object):
def __init__(self, ruleset_definitions = None):
self._ruleset_directory = {}
self._ruleset_list = []
self.store_message_callback = None
self.delete_message_callback = None
self.queue_message_callback = None
self.get_stored_messages_callback = None
self.get_queued_messages_callback = None
self.get_idle_state_callback = None
if ruleset_definitions:
self.register_rulesets(ruleset_definitions)
self._run()
def get_action(self, action_name):
raise Exception('Action with name {0} not found'.format(action_name))
def load_ruleset(self, ruleset_name):
raise Exception('Ruleset with name {0} not found'.format(ruleset_name))
def save_ruleset(self, ruleset_name, ruleset_definition):
return
def get_ruleset(self, ruleset_name):
if ruleset_name in self._ruleset_directory:
return self._ruleset_directory[ruleset_name]
else:
ruleset_definition = self.load_ruleset(ruleset_name)
self.register_rulesets(ruleset_definition)
return self._ruleset_directory[ruleset_name]
def set_rulesets(self, ruleset_definitions):
self.register_rulesets(ruleset_definitions)
for ruleset_name, ruleset_definition in ruleset_definitions.items():
self.save_ruleset(ruleset_name, ruleset_definition)
def _handle_function(self, rules, func, args, complete):
error = [0]
result = [0]
def callback(e, state):
error[0] = e
result[0] = state
if not complete:
rules.do_actions(func(args), callback)
if error[0]:
raise error[0]
return result[0]
else:
try:
rules.do_actions(func(args), complete)
except BaseException as e:
complete(e, None)
def post(self, ruleset_name, message, complete = None):
if isinstance(message, list):
return self.post_batch(ruleset_name, message)
rules = self.get_ruleset(ruleset_name)
return self._handle_function(rules, rules.assert_event, message, complete)
def post_batch(self, ruleset_name, messages, complete = None):
rules = self.get_ruleset(ruleset_name)
return self._handle_function(rules, rules.assert_events, messages, complete)
def assert_fact(self, ruleset_name, fact, complete = None):
if isinstance(fact, list):
return self.assert_facts(ruleset_name, fact)
rules = self.get_ruleset(ruleset_name)
return self._handle_function(rules, rules.assert_fact, fact, complete)
def assert_facts(self, ruleset_name, facts, complete = None):
rules = self.get_ruleset(ruleset_name)
return self._handle_function(rules, rules.assert_facts, facts, complete)
def retract_fact(self, ruleset_name, fact, complete = None):
rules = self.get_ruleset(ruleset_name)
return self._handle_function(rules, rules.retract_fact, fact, complete)
def retract_facts(self, ruleset_name, facts, complete = None):
rules = self.get_ruleset(ruleset_name)
return self._handle_function(rules, rules.retract_facts, facts, complete)
def update_state(self, ruleset_name, state, complete = None):
rules = self.get_ruleset(ruleset_name)
self._handle_function(rules, rules.update_state, state, complete)
def get_state(self, ruleset_name, sid = None):
return self.get_ruleset(ruleset_name).get_state(sid)
def delete_state(self, ruleset_name, sid = None):
self.get_ruleset(ruleset_name).delete_state(sid)
def renew_action_lease(self, ruleset_name, sid = None):
self.get_ruleset(ruleset_name).renew_action_lease(sid)
def get_facts(self, ruleset_name, sid = None):
return self.get_ruleset(ruleset_name).get_facts(sid)
def get_pending_events(self, ruleset_name, sid = None):
return self.get_ruleset(ruleset_name).get_pending_events(sid)
def set_store_message_callback(self, func):
self.store_message_callback = func
for ruleset in self._ruleset_list:
ruleset.set_store_message_callback(func)
def set_delete_message_callback(self, func):
self.delete_message_callback = func
for ruleset in self._ruleset_list:
ruleset.set_delete_message_callback(func)
def set_queue_message_callback(self, func):
self.queue_message_callback = func
for ruleset in self._ruleset_list:
ruleset.set_queue_message_callback(func)
def set_get_queued_messages_callback(self, func):
self.get_queued_messages_callback = func
for ruleset in self._ruleset_list:
ruleset.set_get_queued_messages_callback(func)
def complete_get_queued_messages(self, ruleset_name, sid, queued_messages):
self.get_ruleset(ruleset_name).complete_get_queued_messages(sid, queued_messages)
def set_get_idle_state_callback(self, func):
self.get_idle_state_callback = func
for ruleset in self._ruleset_list:
ruleset.set_get_idle_state_callback(func)
def complete_get_idle_state(self, ruleset_name, sid, stored_messages):
self.get_ruleset(ruleset_name).complete_get_idle_state(sid, stored_messages)
def register_rulesets(self, ruleset_definitions):
rulesets = Ruleset.create_rulesets(self, ruleset_definitions)
for ruleset_name, ruleset in rulesets.items():
if ruleset_name in self._ruleset_directory:
raise Exception('Ruleset with name {0} already registered'.format(ruleset_name))
else:
self._ruleset_directory[ruleset_name] = ruleset
self._ruleset_list.append(ruleset)
if self.store_message_callback:
ruleset.set_store_message_callback(self.store_message_callback)
if self.delete_message_callback:
ruleset.set_delete_message_callback(self.delete_message_callback)
if self.queue_message_callback:
ruleset.set_queue_message_callback(self.queue_message_callback)
if self.get_stored_messages_callback:
ruleset.set_get_stored_messages_callback(self.get_stored_messages_callback)
if self.get_queued_messages_callback:
ruleset.set_get_queued_messages_callback(self.get_queued_messages_callback)
if self.get_idle_state_callback:
ruleset.set_get_idle_state_callback(self.get_idle_state_callback)
return list(rulesets.keys())
def _run(self):
def dispatch_ruleset(index):
if not len(self._ruleset_list):
self._d_timer = threading.Timer(0.5, dispatch_ruleset, (0, ))
self._d_timer.daemon = True
self._d_timer.start()
else:
ruleset = self._ruleset_list[index]
try:
ruleset.dispatch()
except BaseException as e:
logger.exception('Error dispatching ruleset')
timeout = 0
if (index == (len(self._ruleset_list) -1)):
timeout = 0.2
self._d_timer = threading.Timer(timeout, dispatch_ruleset, ((index + 1) % len(self._ruleset_list), ))
self._d_timer.daemon = True
self._d_timer.start()
def dispatch_timers(index):
if not len(self._ruleset_list):
self._t_timer = threading.Timer(0.5, dispatch_timers, (0, ))
self._t_timer.daemon = True
self._t_timer.start()
else:
ruleset = self._ruleset_list[index]
try:
ruleset.dispatch_timers()
except BaseException as e:
logger.exception('Error dispatching timers')
timeout = 0
if (index == (len(self._ruleset_list) -1)):
timeout = 0.2
self._t_timer = threading.Timer(timeout, dispatch_timers, ((index + 1) % len(self._ruleset_list), ))
self._t_timer.daemon = True
self._t_timer.start()
self._d_timer = threading.Timer(0.1, dispatch_ruleset, (0, ))
self._d_timer.daemon = True
self._d_timer.start()
self._t_timer = threading.Timer(0.1, dispatch_timers, (0, ))
self._t_timer.daemon = True
self._t_timer.start()
| 37.55176 | 161 | 0.564576 |
94141393796c6fb66523e0bde45d8a2ac773d4d4 | 25,947 | py | Python | arguments.py | puraminy/GLM | f18acd842507341a5cbcf0f39095ecc635018eb9 | [
"MIT"
] | null | null | null | arguments.py | puraminy/GLM | f18acd842507341a5cbcf0f39095ecc635018eb9 | [
"MIT"
] | null | null | null | arguments.py | puraminy/GLM | f18acd842507341a5cbcf0f39095ecc635018eb9 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""argparser configuration"""
import argparse
import os
import torch
import deepspeed
import json
from utils import get_hostname
def add_model_config_args(parser):
"""Model arguments"""
group = parser.add_argument_group('model', 'model configuration')
group.add_argument('--transformer-xl', action='store_true', help='use transformer-xl for training')
group.add_argument('--pretrained-bert', action='store_true',
help='use a pretrained bert-large-uncased model instead'
'of initializing from scratch. See '
'--tokenizer-model-type to specify which pretrained '
'BERT model to use')
group.add_argument('--encoder-decoder', action='store_true',
help="use the encoder-decoder architecture for blocklm")
group.add_argument('--attention-dropout', type=float, default=0.1,
help='dropout probability for attention weights')
group.add_argument('--num-attention-heads', type=int, default=16,
help='num of transformer attention heads')
group.add_argument('--hidden-size', type=int, default=1024,
help='tansformer hidden size')
group.add_argument('--intermediate-size', type=int, default=None,
help='transformer embedding dimension for FFN'
'set to 4*`--hidden-size` if it is None')
group.add_argument('--num-layers', type=int, default=24,
help='num decoder layers')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='layer norm epsilon')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='dropout probability for hidden state transformer')
group.add_argument('--output-dropout', type=float, default=0.1,
help='dropout probability for pooled output')
group.add_argument('--max-position-embeddings', type=int, default=512,
help='maximum number of position embeddings to use')
group.add_argument('--vocab-size', type=int, default=30522,
help='vocab size to use for non-character-level '
'tokenization. This value will only be used when '
'creating a tokenizer')
group.add_argument('--deep-init', action='store_true',
help='initialize bert model similar to gpt2 model.'
'scales initialization of projection layers by a '
'factor of 1/sqrt(2N). Necessary to train bert '
'models larger than BERT-Large.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
return parser
def add_fp16_config_args(parser):
"""Mixed precision arguments."""
group = parser.add_argument_group('fp16', 'fp16 configurations')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode')
group.add_argument('--fp32-embedding', action='store_true',
help='embedding in fp32')
group.add_argument('--fp32-layernorm', action='store_true',
help='layer norm in fp32')
group.add_argument('--fp32-tokentypes', action='store_true',
help='embedding token types in fp32')
group.add_argument('--fp32-allreduce', action='store_true',
help='all-reduce in fp32')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale')
group.add_argument('--min-scale', type=float, default=1,
help='Minimum loss scale for dynamic loss scale')
return parser
def add_training_args(parser):
"""Training arguments."""
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--experiment-name', type=str, default="gpt-345M",
help="The experiment name for summary and checkpoint")
group.add_argument('--batch-size', type=int, default=4,
help='Data Loader batch size')
group.add_argument('--gradient-accumulation-steps', type=int, default=1,
help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--epochs', type=int, default=None,
help='Number of finetunning epochs. Zero results in evaluation only.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='gradient clipping')
group.add_argument('--train-iters', type=int, default=1000000,
help='total number of iterations to train over all training runs')
group.add_argument('--label-smoothing', type=float, default=0.0)
group.add_argument('--log-interval', type=int, default=100,
help='report interval')
group.add_argument('--summary-dir', type=str, default="", help="The directory to store the summary")
group.add_argument('--seed', type=int, default=1234, help='random seed')
# Batch producer arguments
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
# Learning rate.
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr-decay-ratio', type=float, default=0.1)
group.add_argument('--lr', type=float, default=1.0e-4,
help='initial learning rate')
group.add_argument('--warmup', type=float, default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
group.add_argument('--switch-linear', action='store_true', help="Switch to linear decay for cosine decay")
# model checkpointing
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--new-save-directory', action='store_true')
group.add_argument('--save-epoch', type=int, default=1,
help='number of epochs between saves')
group.add_argument('--save-interval', type=int, default=5000,
help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true',
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true',
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
group.add_argument('--resume-dataloader', action='store_true',
help='Resume the dataloader when resuming training. '
'Does not apply to tfrecords dataloader, try resuming'
'with a different seed in this case.')
# distributed training args
group.add_argument('--distributed-backend', default='nccl',
help='which backend to use for distributed training. One of [gloo, nccl]',
choices=['nccl', 'gloo'])
group.add_argument('--DDP-impl', default='torch', choices=['local', 'torch'],
help='which DistributedDataParallel implementation to use.')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher')
# BlockLM training args
group.add_argument('--block-lm', action='store_true', help="whether use the BlockLM pre-training")
group.add_argument('--masked-lm', action='store_true', help='whether to use the mlm objective')
group.add_argument('--bert-prob', type=float, default=0.5)
group.add_argument('--gpt-infill-prob', type=float, default=0.5)
group.add_argument('--gpt-min-ratio', type=float, default=0.5)
group.add_argument('--gap-sentence-prob', type=float, default=0.0)
group.add_argument('--avg-block-length', type=int, default=3)
group.add_argument('--task-mask', action='store_true', help="Use different mask for generation and blank filling")
group.add_argument('--no-shuffle-block', action='store_true', help="not shuffle the blocks when filling the blank")
group.add_argument('--no-block-position', action='store_true',
help='Use (rough) absolute positions instead of block positions')
group.add_argument('--sentinel-token', action='store_true',
help="Use sentinel (mask) tokens to replace 2d position encoding")
group.add_argument('--block-mask-prob', type=float, default=0.0)
group.add_argument('--context-mask-ratio', type=float, default=0.0)
group.add_argument('--random-position', action='store_true',
help="Use random start position to cover all the position embeddings")
group.add_argument('--nonautoregressive', action='store_true', help="whether add a non-autoregressive loss")
return parser
def add_evaluation_args(parser):
"""Evaluation arguments."""
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=100,
help='number of iterations to run for evaluation'
'validation/test for')
group.add_argument('--eval-interval', type=int, default=1000,
help='interval between running evaluation on validation set')
group.add_argument('--eval-epoch', type=int, default=1,
help='epoch between running evaluation on validation set')
group.add_argument('--eval-seq-length', type=int, default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32)
return parser
def add_text_generate_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument("--temperature", type=float, default=1.0)
group.add_argument("--top_p", type=float, default=0.0)
group.add_argument("--top_k", type=int, default=0)
group.add_argument("--out-seq-length", type=int, default=256)
group.add_argument("--num-beams", type=int, default=1)
group.add_argument("--length-penalty", type=float, default=0.0)
group.add_argument("--no-repeat-ngram-size", type=int, default=0)
group.add_argument("--min-tgt-length", type=int, default=0)
group.add_argument("--select-topk", action='store_true')
group.add_argument("--blank-maskratio", type=float, default=0.1)
return parser
def add_data_args(parser):
"""Train/valid/test data arguments."""
group = parser.add_argument_group('data', 'data configurations')
group.add_argument('--model-parallel-size', type=int, default=1,
help='size of the model parallel.')
group.add_argument('--shuffle', action='store_true',
help='Shuffle data. Shuffling is deterministic '
'based on seed and current epoch.')
group.add_argument('--train-data', nargs='+', default=None,
help='Whitespace separated filenames or corpora names '
'for training.')
group.add_argument('--valid-data', nargs='*', default=None,
help="""Filename for validation data.""")
group.add_argument('--test-data', nargs='*', default=None,
help="""Filename for testing""")
group.add_argument('--data-dir', type=str, default=None, help="The data path to all the data files")
group.add_argument('--input-data-sizes-file', type=str, default='sizes.txt',
help='the filename containing all the shards sizes')
group.add_argument('--delim', default=',',
help='delimiter used to parse csv data files')
group.add_argument('--text-key', default='sentence',
help='key to use to extract text from json/csv')
group.add_argument('--eval-text-key', default=None,
help='key to use to extract text from '
'json/csv evaluation datasets')
group.add_argument('--split', default='1000,1,1',
help='comma-separated list of proportions for training,'
' validation, and test split')
group.add_argument('--lazy-loader', action='store_true',
help='whether to lazy read the data set')
group.add_argument('--loose-json', action='store_true',
help='Use loose json (one json-formatted string per '
'newline), instead of tight json (data file is one '
'json string)')
group.add_argument('--presplit-sentences', action='store_true',
help='Dataset content consists of documents where '
'each document consists of newline separated sentences')
group.add_argument('--num-workers', type=int, default=2,
help="""Number of workers to use for dataloading""")
group.add_argument('--tokenizer-model-type', type=str,
default=None,
help="Model type to use for sentencepiece tokenization \
(one of ['bpe', 'char', 'unigram', 'word']) or \
bert vocab to use for BertWordPieceTokenizer (one of \
['bert-large-uncased', 'bert-large-cased', etc.])")
group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',
help='path used to save/load sentencepiece tokenization '
'models')
group.add_argument('--tokenizer-type', type=str,
default='BertWordPieceTokenizer',
choices=['CharacterLevelTokenizer',
'SentencePieceTokenizer',
'BertWordPieceTokenizer',
'GPT2BPETokenizer',
'ChineseSPTokenizer'],
help='what type of tokenizer to use')
group.add_argument('--no-pre-tokenize', action='store_true')
group.add_argument("--cache-dir", default=None, type=str,
help="Where to store pre-trained BERT downloads")
group.add_argument('--use-tfrecords', action='store_true',
help='load `--train-data`, `--valid-data`, '
'`--test-data` from BERT tf records instead of '
'normal data pipeline')
group.add_argument('--seq-length', type=int, default=512,
help="Maximum sequence length to process")
group.add_argument('--mem-length', type=int, default=0,
help="The memory length to preserve")
group.add_argument('--max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use per sequence.'
'Defaults to math.ceil(`--seq-length`*.15/10)*10.'
'MUST BE SPECIFIED IF `--use-tfrecords` is True.')
group.add_argument('--sample-one-document', action='store_true', help='only sample one document in one sample')
group.add_argument('--load-splits', type=str, default=None, help="The path to load split indices from")
group.add_argument('--save-splits', type=str, default=None, help="The path to save split indices to")
group.add_argument('--save-test-data', type=str, default=None, help="The path to save the test data")
return parser
def add_finetune_config_args(parser):
group = parser.add_argument_group('finetune', 'finetune configurations')
group.add_argument('--task', type=str, help='Task name.')
group.add_argument('--load-pretrained', type=str, help="Load pretrained model", default=None)
group.add_argument('--pool-token', type=str, choices=['start', 'pad', 'cls'],
help='The token to pool the sequence representation', default='cls')
group.add_argument('--continuous-prompt', action='store_true', help="Use continuous prompt for PET")
group.add_argument('--cloze-eval', action='store_true', help='Evaluation dataset with cloze task')
group.add_argument('--segment-length', type=int, default=0, help="The maximum segment length for cloze evaluation")
group.add_argument('--loss-func', type=str, choices=["cross_entropy", "hinge", "generative", "mix"],
default="cross_entropy")
group.add_argument('--pattern-id', type=int, default=0)
group.add_argument('--fast-decode', action='store_true',
help="Fast decode for multi-token cloze. Can only be used without checkpoint activation.")
group.add_argument('--eval-valid', action='store_true', help="Whether evaluate on the valid set")
group.add_argument('--validation-metric', type=str, default=None)
group.add_argument('--unidirectional', action='store_true', help="Use the left to right language model")
group.add_argument('--src-seq-length', type=int, default=None)
group.add_argument('--tgt-seq-length', type=int, default=None)
group.add_argument('--adam-beta1', type=float, default=0.9)
group.add_argument('--adam-beta2', type=float, default=0.999)
group.add_argument('--adam-eps', type=float, default=1e-8)
group.add_argument('--optimizer', type=str, choices=['adam', 'adafactor'], default='adam')
group.add_argument('--wsc-negative', action='store_true')
group.add_argument('--overwrite', action='store_true')
group.add_argument('--no-validation', action='store_true')
group.add_argument('--start-validation', action='store_true')
return parser
def get_args():
"""Parse all the args."""
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_data_args(parser)
parser = add_finetune_config_args(parser)
# Include DeepSpeed configuration arguments
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
if not args.train_data and not args.data_dir:
print('WARNING: No training data specified')
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if hasattr(args, 'deepspeed_mpi') and args.deepspeed_mpi:
mpi_define_env(args)
elif os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
# Possibly running with Slurm
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid * local_size + local_rank
args.world_size = num_nodes * local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
# The args fp32_* or fp16_* meant to be active when the
# args fp16 is set. So the default behaviour should all
# be false.
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
if hasattr(args, "deepspeed") and args.deepspeed and args.deepspeed_config is not None:
with open(args.deepspeed_config) as file:
deepspeed_config = json.load(file)
if "train_micro_batch_size_per_gpu" in deepspeed_config:
args.batch_size = deepspeed_config["train_micro_batch_size_per_gpu"]
if "gradient_accumulation_steps" in deepspeed_config:
args.gradient_accumulation_steps = deepspeed_config["gradient_accumulation_steps"]
else:
args.gradient_accumulation_steps = 1
if "optimizer" in deepspeed_config:
optimizer_params_config = deepspeed_config["optimizer"].get("params", {})
args.lr = optimizer_params_config.get("lr", args.lr)
args.weight_decay = optimizer_params_config.get("weight_decay", args.weight_decay)
return args
def mpi_define_env(args):
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
world_size = comm.Get_size()
master_addr = None
if rank == 0:
master_addr = get_hostname()
master_addr = comm.bcast(master_addr, root=0)
# Determine local rank by assuming hostnames are unique
proc_name = MPI.Get_processor_name()
all_procs = comm.allgather(proc_name)
local_rank = sum([i == proc_name for i in all_procs[:rank]])
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
args.local_rank = local_rank
args.world_size = world_size
args.rank = rank
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = "29500" # TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
print(
"Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'],
args.local_rank,
os.environ['WORLD_SIZE'],
os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
| 54.510504 | 119 | 0.623425 |
3b717949342d62f8e8a3732b0649509a69e6232e | 1,763 | py | Python | tools/create_version_h.py | linear-rpc/linear-cpp | 9f3b2a924a11848829f00e325b5449db0a4c6e30 | [
"MIT"
] | 19 | 2015-09-02T03:25:55.000Z | 2021-03-07T15:02:33.000Z | tools/create_version_h.py | linear-rpc/linear-cpp | 9f3b2a924a11848829f00e325b5449db0a4c6e30 | [
"MIT"
] | 2 | 2015-12-11T12:03:11.000Z | 2017-09-13T09:03:33.000Z | tools/create_version_h.py | linear-rpc/linear-cpp | 9f3b2a924a11848829f00e325b5449db0a4c6e30 | [
"MIT"
] | 6 | 2015-11-25T08:55:51.000Z | 2021-03-07T15:02:36.000Z | #!/usr/bin/env python
from create_header import do_replace
import optparse
import re
import string
import subprocess
import sys
# for creating version.h
GET_COMMIT_ID = 'git log --pretty=format:%H -1'
VERSION_ID_KEY = '@LINEAR_VERSION_ID@'
COMMIT_ID_KEY = '@LINEAR_COMMIT_ID@'
_usage = 'usage: %prog [options]'
def read_commit_id_and_version(ac, kv):
try:
proc = subprocess.Popen(GET_COMMIT_ID,
stdout=subprocess.PIPE,
shell=True)
kv[COMMIT_ID_KEY] = proc.stdout.readlines()[0]
except:
kv[COMMIT_ID_KEY] = ""
f = open(ac, 'r')
if f:
for line in f:
inner = re.search('AC_INIT\((.*)\)', line)
if not inner:
continue
r = re.findall('\[(.+?)\]', inner.group(1))
kv[VERSION_ID_KEY] = r[0] + '-' + r[1]
return kv
if __name__ == '__main__':
parser = optparse.OptionParser(usage=_usage)
parser.add_option('-c', '--configure_ac', dest='ac',
help='configure.ac file', metavar='/path/to/configure.ac')
parser.add_option('-i', '--input', dest='input',
help='input file', metavar='/path/to/file_name')
parser.add_option('-o', '--output', dest='output',
help='output file', metavar='/path/to/file_name')
(opts, args) = parser.parse_args()
if not opts.ac:
parser.error('you must specify configure.ac file')
if not opts.input:
parser.error('you must specify input file')
if not opts.output:
parser.error('you must specify output file')
opts.replace = {
VERSION_ID_KEY: 'package-version',
COMMIT_ID_KEY: '-'
}
opts.replace = read_commit_id_and_version(opts.ac, opts.replace)
sys.exit(do_replace(opts))
| 28.901639 | 80 | 0.609189 |
9de477417b3c368883b0c5f60886bb5d8d891b7f | 2,101 | py | Python | agsadmin/rest_admin/system/Directories.py | christopherblanchfield/agsadmin | 989cb3795aacf285ccf74ee51b0de26bf2f48bc3 | [
"BSD-3-Clause"
] | 2 | 2015-12-07T05:53:29.000Z | 2020-09-13T18:12:15.000Z | agsadmin/rest_admin/system/Directories.py | christopherblanchfield/agsadmin | 989cb3795aacf285ccf74ee51b0de26bf2f48bc3 | [
"BSD-3-Clause"
] | 4 | 2015-03-09T05:59:14.000Z | 2018-01-09T00:12:56.000Z | agsadmin/rest_admin/system/Directories.py | christopherblanchfield/agsadmin | 989cb3795aacf285ccf74ee51b0de26bf2f48bc3 | [
"BSD-3-Clause"
] | 5 | 2015-03-09T01:05:24.000Z | 2019-09-09T23:01:21.000Z | from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
from ..._endpoint_base import EndpointBase
from ..._utils import send_session_request
from .Directory import Directory
class Directories(EndpointBase):
def __init__(self, requests_session, server_url):
super().__init__(requests_session, server_url)
@property
def _url_full(self):
return "{0}/system/directories".format(self._url_base)
def get(self, name):
dir_json = send_session_request(
self._session,
self._create_operation_request("{0}/{1}".format(self._url_full, name), method = "GET")).json()
return Directory._create_from_json(dir_json, self._session, self._url_base)
def list(self):
"""
Gets a list of directory proxy objects for directories registered on the server.
"""
response = self._get()
directories = []
for dir_json in response["directories"]:
directories.append(Directory._create_from_json(dir_json, self._session, self._url_base))
return directories
def register(self, name, physical_path, directory_type, description = None, cleanup_mode = Directory.CleanupMode.NONE, max_file_age = None):
new_dir_data = {
"name": name,
"physicalPath": physical_path,
"directoryType": Directory.DirectoryType(directory_type).value,
"cleanupMode": Directory.CleanupMode(cleanup_mode).value
}
if description != None:
new_dir_data["description"] = description
if max_file_age != None:
new_dir_data["maxFileAge"] = max_file_age
send_session_request(
self._session,
self._create_operation_request(
self,
operation = "register",
method = "POST",
data = new_dir_data
)
) | 35.610169 | 144 | 0.630652 |
d237784cbcc411dccc301df26927cab8d1c83520 | 7,150 | py | Python | ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py | zenmerlin/ansible-workstation-configs | a44f7816344a3d82e94050ff7c3a8edb9c054b37 | [
"MIT"
] | 1 | 2022-02-24T18:15:56.000Z | 2022-02-24T18:15:56.000Z | ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_protected_branch.py | zenmerlin/ansible-workstation-configs | a44f7816344a3d82e94050ff7c3a8edb9c054b37 | [
"MIT"
] | null | null | null | ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_protected_branch.py | zenmerlin/ansible-workstation-configs | a44f7816344a3d82e94050ff7c3a8edb9c054b37 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: gitlab_protected_branch
short_description: (un)Marking existing branches for protection
version_added: 3.4.0
description:
- (un)Marking existing branches for protection.
author:
- "Werner Dijkerman (@dj-wasabi)"
requirements:
- python >= 2.7
- python-gitlab >= 2.3.0
extends_documentation_fragment:
- community.general.auth_basic
- community.general.gitlab
options:
state:
description:
- Create or delete proteced branch.
default: present
type: str
choices: ["present", "absent"]
project:
description:
- The path and name of the project.
required: true
type: str
name:
description:
- The name of the branch that needs to be protected.
- Can make use a wildcard charachter for like C(production/*) or just have C(main) or C(develop) as value.
required: true
type: str
merge_access_levels:
description:
- Access levels allowed to merge.
default: maintainer
type: str
choices: ["maintainer", "developer", "nobody"]
push_access_level:
description:
- Access levels allowed to push.
default: maintainer
type: str
choices: ["maintainer", "developer", "nobody"]
'''
EXAMPLES = '''
- name: Create protected branch on main
community.general.gitlab_protected_branch:
api_url: https://gitlab.com
api_token: secret_access_token
project: "dj-wasabi/collection.general"
name: main
merge_access_levels: maintainer
push_access_level: nobody
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.api import basic_auth_argument_spec
from distutils.version import LooseVersion
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
from ansible_collections.community.general.plugins.module_utils.gitlab import auth_argument_spec, gitlab_authentication
class GitlabProtectedBranch(object):
def __init__(self, module, project, gitlab_instance):
self.repo = gitlab_instance
self._module = module
self.project = self.get_project(project)
self.ACCESS_LEVEL = {
'nobody': gitlab.NO_ACCESS,
'developer': gitlab.DEVELOPER_ACCESS,
'maintainer': gitlab.MAINTAINER_ACCESS
}
def get_project(self, project_name):
return self.repo.projects.get(project_name)
def protected_branch_exist(self, name):
try:
return self.project.protectedbranches.get(name)
except Exception as e:
return False
def create_protected_branch(self, name, merge_access_levels, push_access_level):
if self._module.check_mode:
return True
merge = self.ACCESS_LEVEL[merge_access_levels]
push = self.ACCESS_LEVEL[push_access_level]
self.project.protectedbranches.create({
'name': name,
'merge_access_level': merge,
'push_access_level': push
})
def compare_protected_branch(self, name, merge_access_levels, push_access_level):
configured_merge = self.ACCESS_LEVEL[merge_access_levels]
configured_push = self.ACCESS_LEVEL[push_access_level]
current = self.protected_branch_exist(name=name)
current_merge = current.merge_access_levels[0]['access_level']
current_push = current.push_access_levels[0]['access_level']
if current:
if current.name == name and current_merge == configured_merge and current_push == configured_push:
return True
return False
def delete_protected_branch(self, name):
if self._module.check_mode:
return True
return self.project.protectedbranches.delete(name)
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
project=dict(type='str', required=True),
name=dict(type='str', required=True),
merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]),
push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_username', 'api_token'],
['api_username', 'api_oauth_token'],
['api_username', 'api_job_token'],
['api_token', 'api_oauth_token'],
['api_token', 'api_job_token'],
],
required_together=[
['api_username', 'api_password'],
],
required_one_of=[
['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
],
supports_check_mode=True
)
project = module.params['project']
name = module.params['name']
merge_access_levels = module.params['merge_access_levels']
push_access_level = module.params['push_access_level']
state = module.params['state']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
gitlab_version = gitlab.__version__
if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
" Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
gitlab_instance = gitlab_authentication(module)
this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance)
p_branch = this_gitlab.protected_branch_exist(name=name)
if not p_branch and state == "present":
this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level)
module.exit_json(changed=True, msg="Created the proteched branch.")
elif p_branch and state == "present":
if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level):
this_gitlab.delete_protected_branch(name=name)
this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level)
module.exit_json(changed=True, msg="Recreated the proteched branch.")
elif p_branch and state == "absent":
this_gitlab.delete_protected_branch(name=name)
module.exit_json(changed=True, msg="Deleted the proteched branch.")
module.exit_json(changed=False, msg="No changes are needed.")
if __name__ == '__main__':
main()
| 35.572139 | 145 | 0.694406 |
13c3e3500903f58c77433f01fef8647f066216d4 | 2,272 | py | Python | btshowmetainfo.py | crossbrowsertesting/BitTornado | 30b8da65ec620573351838e0281b3c9c9dc0982b | [
"MIT"
] | 1 | 2020-06-01T06:16:03.000Z | 2020-06-01T06:16:03.000Z | btshowmetainfo.py | crossbrowsertesting/BitTornado | 30b8da65ec620573351838e0281b3c9c9dc0982b | [
"MIT"
] | null | null | null | btshowmetainfo.py | crossbrowsertesting/BitTornado | 30b8da65ec620573351838e0281b3c9c9dc0982b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Written by Henry 'Pi' James and Loring Holden
# modified for multitracker display by John Hoffman
# see LICENSE.txt for license information
import sys
import os
import hashlib
from BitTornado.Meta.Info import MetaInfo
from BitTornado.Meta.bencode import bencode
NAME, EXT = os.path.splitext(os.path.basename(sys.argv[0]))
VERSION = '20130326'
print('{} {} - decode BitTorrent metainfo files'.format(NAME, VERSION))
print()
if len(sys.argv) == 1:
print('{} file1.torrent file2.torrent file3.torrent ...'.format(
sys.argv[0]))
print()
sys.exit(2) # common exit code for syntax error
for metainfo_name in sys.argv[1:]:
metainfo = MetaInfo.read(metainfo_name)
info = metainfo['info']
info_hash = hashlib.sha1(bencode(info))
print('metainfo file.:', os.path.basename(metainfo_name))
print('info hash.....:', info_hash.hexdigest())
piece_length = info['piece length']
if 'length' in info:
# let's assume we just have a file
print('file name.....:', info['name'])
file_length = info['length']
name = 'file size.....:'
else:
# let's assume we have a directory structure
print('directory name:', info['name'])
print('files.........:')
file_length = 0
for file in info['files']:
path = ''
for item in file['path']:
if path != '':
path = path + "/"
path = path + item
print(' {} ({:d})'.format(path, file['length']))
file_length += file['length']
name = 'archive size..:'
piece_number, last_piece_length = divmod(file_length, piece_length)
print('{} {:d} ({:d} * {:d} + {:d})'.format(
name, file_length, piece_number, piece_length, last_piece_length))
print('announce url..:', metainfo['announce'])
if 'announce-list' in metainfo:
announce_list = '|'.join(','.join(tier)
for tier in metainfo['announce-list'])
print('announce-list.:', announce_list)
if 'httpseeds' in metainfo:
print('http seeds....:', '|'.join(metainfo['httpseeds']))
if 'comment' in metainfo:
print('comment.......:', metainfo['comment'])
| 35.5 | 76 | 0.590669 |
9d3341955b8d7e2cea618950d95e99490b6f62e9 | 3,377 | py | Python | quizz/quizz/settings.py | paramsiddharth/Quiz-App | 29b6a6ba05da22137bd9e82dc48665d8cb382c83 | [
"MIT"
] | null | null | null | quizz/quizz/settings.py | paramsiddharth/Quiz-App | 29b6a6ba05da22137bd9e82dc48665d8cb382c83 | [
"MIT"
] | null | null | null | quizz/quizz/settings.py | paramsiddharth/Quiz-App | 29b6a6ba05da22137bd9e82dc48665d8cb382c83 | [
"MIT"
] | 1 | 2021-09-15T11:52:03.000Z | 2021-09-15T11:52:03.000Z | """
Django settings for quizz project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vlc%a2+hr5^5+f(((zv&2yn)#vd@ggyn+sa&%kyh+*m*rddc!7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'question.apps.QuestionConfig',
'users.apps.UsersConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'quizz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'quizz.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| 25.976923 | 91 | 0.694699 |
0c5dabb6eeb0aa0401df078b53794c42299dedfa | 7,807 | py | Python | homeassistant/components/mailbox/__init__.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 1 | 2019-04-22T06:05:09.000Z | 2019-04-22T06:05:09.000Z | homeassistant/components/mailbox/__init__.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mailbox/__init__.py | robin13/home-assistant | 4976569e304c23975d34ec88e2dfb94e84ab1f1c | [
"Apache-2.0"
] | 1 | 2021-09-20T01:52:31.000Z | 2021-09-20T01:52:31.000Z | """
Provides functionality for mailboxes.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mailbox/
"""
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_prepare_setup_platform
_LOGGER = logging.getLogger(__name__)
CONTENT_TYPE_MPEG = 'audio/mpeg'
DEPENDENCIES = ['http']
DOMAIN = 'mailbox'
EVENT = 'mailbox_updated'
SCAN_INTERVAL = timedelta(seconds=30)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for mailboxes."""
mailboxes = []
yield from hass.components.frontend.async_register_built_in_panel(
'mailbox', 'mailbox', 'mdi:mailbox')
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
@asyncio.coroutine
def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, 'async_get_handler'):
mailbox = yield from \
platform.async_get_handler(hass, p_config, discovery_info)
elif hasattr(platform, 'get_handler'):
mailbox = yield from hass.async_add_job(
platform.get_handler, hass, p_config, discovery_info)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error(
"Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up platform %s', p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(hass, mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
yield from component.async_add_entities([mailbox_entity])
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
yield from asyncio.wait(setup_tasks, loop=hass.loop)
@asyncio.coroutine
def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
yield from async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform."""
def __init__(self, hass, mailbox):
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.hass = hass
self.message_count = 0
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
hass.bus.async_listen(EVENT, _mailbox_updated)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
@asyncio.coroutine
def async_update(self):
"""Retrieve messages from platform."""
messages = yield from self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@asyncio.coroutine
def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
@asyncio.coroutine
def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
pass
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes):
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
@asyncio.coroutine
def get(self, request):
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(mailbox.name)
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
@asyncio.coroutine
def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = yield from mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
@asyncio.coroutine
def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
@asyncio.coroutine
def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
hass = request.app['hass']
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10, loop=hass.loop):
try:
stream = yield from mailbox.async_get_media(msgid)
except StreamError as err:
error_msg = "Error getting media: %s" % (err)
_LOGGER.error(error_msg)
return web.Response(status=500)
if stream:
return web.Response(body=stream,
content_type=mailbox.media_type)
return web.Response(status=500)
| 30.377432 | 78 | 0.654669 |
96f4e809e791f6ecb7a441d95cbedd7403d63fd4 | 4,022 | py | Python | dscribe/setup.py | rartino/hands-on-2 | 8f31b978f295a761dc0a7ae093184b3dbfa7e199 | [
"MIT"
] | null | null | null | dscribe/setup.py | rartino/hands-on-2 | 8f31b978f295a761dc0a7ae093184b3dbfa7e199 | [
"MIT"
] | null | null | null | dscribe/setup.py | rartino/hands-on-2 | 8f31b978f295a761dc0a7ae093184b3dbfa7e199 | [
"MIT"
] | 1 | 2021-09-18T21:26:30.000Z | 2021-09-18T21:26:30.000Z | import sys
# Check python version
if sys.version_info[:2] < (3, 0):
raise RuntimeError("Python version >= 3.0 required.")
import platform
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from setuptools import setup, find_packages, Extension
from subprocess import getoutput
def using_clang():
"""Will we be using a clang compiler?
"""
compiler = new_compiler()
customize_compiler(compiler)
compiler_ver = getoutput("{0} -v".format(compiler.compiler[0]))
return "clang" in compiler_ver
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
cpp_extra_link_args = []
cpp_extra_compile_args = ["-std=c++11", "-O3"]
c_extra_compile_args = ["-std=c99", "-O3"]
# Needed to specify C++ runtime library on OSX. This solution is replicated
# from the setup.py of mdanalysis
if platform.system() == "Darwin" and using_clang():
cpp_extra_compile_args.append("-stdlib=libc++")
cpp_extra_compile_args.append("-mmacosx-version-min=10.9")
cpp_extra_link_args.append("-stdlib=libc++")
cpp_extra_link_args.append("-mmacosx-version-min=10.7")
extensions = [
# The SOAP, MBTR, ACSF and utils C++ extensions, wrapped with pybind11
Extension(
'dscribe.ext',
[
"dscribe/ext/ext.cpp",
"dscribe/ext/celllist.cpp",
"dscribe/ext/soapGTO.cpp",
"dscribe/ext/soapGeneral.cpp",
"dscribe/ext/acsf.cpp",
"dscribe/ext/mbtr.cpp",
],
include_dirs=[
# Path to pybind11 headers
"dscribe/ext",
get_pybind_include(),
get_pybind_include(user=True)
],
language='c++',
extra_compile_args=cpp_extra_compile_args + ["-fvisibility=hidden"], # the -fvisibility flag is needed by pybind11
extra_link_args=cpp_extra_link_args,
)
]
if __name__ == "__main__":
setup(
name="dscribe",
version="0.4.1a0",
url="https://singroup.github.io/dscribe/",
description="A Python package for creating feature transformations in applications of machine learning to materials science.",
long_description="A Python package for creating feature transformations in applications of machine learning to materials science.",
packages=find_packages(),
setup_requires=['pybind11>=2.4'],
install_requires=['pybind11>=2.4', "numpy", "scipy", "ase>=3.19.0", "scikit-learn", "joblib"],
include_package_data=True, # This ensures that files defined in MANIFEST.in are included
ext_modules=extensions,
license="Apache License 2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: C",
"Programming Language :: C++",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
],
keywords="descriptor machine learning atomistic structure materials science",
python_requires=">=3.5",
)
| 37.240741 | 139 | 0.636251 |
0e66e4eafe407b5c9328759272e60f4b2ef32cc7 | 1,922 | py | Python | bts/tests.py | chkgk/otree_bayesian_truth_serum | 793d79d29b9776e5b87569c381ea2d166ab1a60b | [
"MIT"
] | 1 | 2020-10-27T19:06:30.000Z | 2020-10-27T19:06:30.000Z | bts/tests.py | chkgk/otree_bayesian_truth_serum | 793d79d29b9776e5b87569c381ea2d166ab1a60b | [
"MIT"
] | null | null | null | bts/tests.py | chkgk/otree_bayesian_truth_serum | 793d79d29b9776e5b87569c381ea2d166ab1a60b | [
"MIT"
] | null | null | null | from otree.api import Currency as c, currency_range, SubmissionMustFail
from . import pages
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
def play_round(self):
submissions = {
1: {'question': 2, 'prediction1': 0.1, 'prediction2': 0.2, 'prediction3': 0.3, 'prediction4': 0.4},
2: {'question': 3, 'prediction1': 0.5, 'prediction2': 0.3, 'prediction3': 0.1, 'prediction4': 0.1},
3: {'question': 4, 'prediction1': 0.5, 'prediction2': 0.2, 'prediction3': 0.2, 'prediction4': 0.1}
}
correct_scores = {
1: {'information_score': 0.3757, 'prediction_score': -0.1446, 'respondent_score': 0.2310},
2: {'information_score': 0.6067, 'prediction_score': -0.8378, 'respondent_score': -0.2310},
3: {'information_score': 0.7419, 'prediction_score': -0.7419, 'respondent_score': 0}
}
# test page
# predictions do not sum to 1
yield SubmissionMustFail(pages.TruthSerum, {'question': 2, 'prediction1': 0.1, 'prediction2': 0.2,
'prediction3': 0.3, 'prediction4': 0.5})
# at least one entry == 0
yield SubmissionMustFail(pages.TruthSerum, {'question': 2, 'prediction1': 0, 'prediction2': 0.2,
'prediction3': 0.3, 'prediction4': 0.5})
# all good
yield pages.TruthSerum, submissions[self.player.id_in_subsession]
# check calculations
assert round(self.player.information_score, 4) == correct_scores[self.player.id_in_subsession]['information_score']
assert round(self.player.prediction_score, 4) == correct_scores[self.player.id_in_subsession]['prediction_score']
assert round(self.player.respondent_score, 4) == correct_scores[self.player.id_in_subsession]['respondent_score']
yield pages.Results
| 49.282051 | 123 | 0.615505 |
ed8f30040358c15f9701b64005e35573a9b32a99 | 1,100 | py | Python | test/test_tofts.py | physimals/fabber_models_dce | e65e9e13ca912cda2fa64169c885bc77e1cc5bb2 | [
"Apache-2.0"
] | null | null | null | test/test_tofts.py | physimals/fabber_models_dce | e65e9e13ca912cda2fa64169c885bc77e1cc5bb2 | [
"Apache-2.0"
] | null | null | null | test/test_tofts.py | physimals/fabber_models_dce | e65e9e13ca912cda2fa64169c885bc77e1cc5bb2 | [
"Apache-2.0"
] | 2 | 2020-07-22T12:27:20.000Z | 2020-08-26T08:01:57.000Z | #!/bin/env python
import os, sys
import traceback
import math
from fabber import self_test, FabberException
TEST_DATA = {
"dce_tofts" : [
{
"convergence" : "trialmode",
"max-trials" : 20,
"max-iterations" : 30,
"aif" : "orton",
"delt" : 0.1,
"fa" : 12,
"tr" : 0.0045,
"r1" : 4.5,
"delay" : 0.5,
"sig0" : 10000,
}, # Model options
{
"ktrans" : [0.05, 0.1, 0.2, 0.4, 0.8],
"ve" : [0.05, 0.1, 0.2, 0.4, 0.8],
},
{
"nt" : 25,
"patchsize" : 5,
"noise" : 5,
} # Other options
]
}
save = "--save" in sys.argv
try:
for model, test_data in TEST_DATA.items():
rundata, params, kwargs = test_data
log = self_test(model, rundata, params, save_input=save, save_output=save,
invert=True, **kwargs)
#print(log)
except FabberException, e:
print e.log
traceback.print_exc()
except:
traceback.print_exc()
| 22.44898 | 83 | 0.461818 |
85a8cd0495ba88a050bf4610cbe538811a284cf2 | 13,355 | py | Python | Lib/tkinter/test/test_tkinter/test_images.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 1,872 | 2015-01-02T18:56:47.000Z | 2022-03-31T07:34:39.000Z | Lib/tkinter/test/test_tkinter/test_images.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 675 | 2015-02-27T09:01:01.000Z | 2022-03-31T14:03:25.000Z | Lib/tkinter/test/test_tkinter/test_images.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | import unittest
import tkinter
from test import support
from tkinter.test.support import AbstractTkTest, requires_tcl
support.requires('gui')
class MiscTest(AbstractTkTest, unittest.TestCase):
def test_image_types(self):
image_types = self.root.image_types()
self.assertIsInstance(image_types, tuple)
self.assertIn('photo', image_types)
self.assertIn('bitmap', image_types)
def test_image_names(self):
image_names = self.root.image_names()
self.assertIsInstance(image_names, tuple)
class BitmapImageTest(AbstractTkTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
AbstractTkTest.setUpClass.__func__(cls)
cls.testfile = support.findfile('python.xbm', subdir='imghdrdata')
def test_create_from_file(self):
image = tkinter.BitmapImage('::img::test', master=self.root,
foreground='yellow', background='blue',
file=self.testfile)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'bitmap')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def test_create_from_data(self):
with open(self.testfile, 'rb') as f:
data = f.read()
image = tkinter.BitmapImage('::img::test', master=self.root,
foreground='yellow', background='blue',
data=data)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'bitmap')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def assertEqualStrList(self, actual, expected):
self.assertIsInstance(actual, str)
self.assertEqual(self.root.splitlist(actual), expected)
def test_configure_data(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['data'], '-data {} {} {} {}')
with open(self.testfile, 'rb') as f:
data = f.read()
image.configure(data=data)
self.assertEqualStrList(image['data'],
('-data', '', '', '', data.decode('ascii')))
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['maskdata'], '-maskdata {} {} {} {}')
image.configure(maskdata=data)
self.assertEqualStrList(image['maskdata'],
('-maskdata', '', '', '', data.decode('ascii')))
def test_configure_file(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['file'], '-file {} {} {} {}')
image.configure(file=self.testfile)
self.assertEqualStrList(image['file'],
('-file', '', '', '',self.testfile))
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['maskfile'], '-maskfile {} {} {} {}')
image.configure(maskfile=self.testfile)
self.assertEqualStrList(image['maskfile'],
('-maskfile', '', '', '', self.testfile))
def test_configure_background(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['background'], '-background {} {} {} {}')
image.configure(background='blue')
self.assertEqual(image['background'], '-background {} {} {} blue')
def test_configure_foreground(self):
image = tkinter.BitmapImage('::img::test', master=self.root)
self.assertEqual(image['foreground'],
'-foreground {} {} #000000 #000000')
image.configure(foreground='yellow')
self.assertEqual(image['foreground'],
'-foreground {} {} #000000 yellow')
class PhotoImageTest(AbstractTkTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
AbstractTkTest.setUpClass.__func__(cls)
cls.testfile = support.findfile('python.gif', subdir='imghdrdata')
def create(self):
return tkinter.PhotoImage('::img::test', master=self.root,
file=self.testfile)
def colorlist(self, *args):
if tkinter.TkVersion >= 8.6 and self.wantobjects:
return args
else:
return tkinter._join(args)
def check_create_from_file(self, ext):
testfile = support.findfile('python.' + ext, subdir='imghdrdata')
image = tkinter.PhotoImage('::img::test', master=self.root,
file=testfile)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'photo')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['data'], '')
self.assertEqual(image['file'], testfile)
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def check_create_from_data(self, ext):
testfile = support.findfile('python.' + ext, subdir='imghdrdata')
with open(testfile, 'rb') as f:
data = f.read()
image = tkinter.PhotoImage('::img::test', master=self.root,
data=data)
self.assertEqual(str(image), '::img::test')
self.assertEqual(image.type(), 'photo')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image['data'], data if self.wantobjects
else data.decode('latin1'))
self.assertEqual(image['file'], '')
self.assertIn('::img::test', self.root.image_names())
del image
self.assertNotIn('::img::test', self.root.image_names())
def test_create_from_ppm_file(self):
self.check_create_from_file('ppm')
def test_create_from_ppm_data(self):
self.check_create_from_data('ppm')
def test_create_from_pgm_file(self):
self.check_create_from_file('pgm')
def test_create_from_pgm_data(self):
self.check_create_from_data('pgm')
def test_create_from_gif_file(self):
self.check_create_from_file('gif')
def test_create_from_gif_data(self):
self.check_create_from_data('gif')
@requires_tcl(8, 6)
def test_create_from_png_file(self):
self.check_create_from_file('png')
@requires_tcl(8, 6)
def test_create_from_png_data(self):
self.check_create_from_data('png')
def test_configure_data(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['data'], '')
with open(self.testfile, 'rb') as f:
data = f.read()
image.configure(data=data)
self.assertEqual(image['data'], data if self.wantobjects
else data.decode('latin1'))
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
def test_configure_format(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['format'], '')
image.configure(file=self.testfile, format='gif')
self.assertEqual(image['format'], ('gif',) if self.wantobjects
else 'gif')
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
def test_configure_file(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['file'], '')
image.configure(file=self.testfile)
self.assertEqual(image['file'], self.testfile)
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
def test_configure_gamma(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['gamma'], '1.0')
image.configure(gamma=2.0)
self.assertEqual(image['gamma'], '2.0')
def test_configure_width_height(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['width'], '0')
self.assertEqual(image['height'], '0')
image.configure(width=20)
image.configure(height=10)
self.assertEqual(image['width'], '20')
self.assertEqual(image['height'], '10')
self.assertEqual(image.width(), 20)
self.assertEqual(image.height(), 10)
def test_configure_palette(self):
image = tkinter.PhotoImage('::img::test', master=self.root)
self.assertEqual(image['palette'], '')
image.configure(palette=256)
self.assertEqual(image['palette'], '256')
image.configure(palette='3/4/2')
self.assertEqual(image['palette'], '3/4/2')
def test_blank(self):
image = self.create()
image.blank()
self.assertEqual(image.width(), 16)
self.assertEqual(image.height(), 16)
self.assertEqual(image.get(4, 6), self.colorlist(0, 0, 0))
def test_copy(self):
image = self.create()
image2 = image.copy()
self.assertEqual(image2.width(), 16)
self.assertEqual(image2.height(), 16)
self.assertEqual(image.get(4, 6), image.get(4, 6))
def test_subsample(self):
image = self.create()
image2 = image.subsample(2, 3)
self.assertEqual(image2.width(), 8)
self.assertEqual(image2.height(), 6)
self.assertEqual(image2.get(2, 2), image.get(4, 6))
image2 = image.subsample(2)
self.assertEqual(image2.width(), 8)
self.assertEqual(image2.height(), 8)
self.assertEqual(image2.get(2, 3), image.get(4, 6))
def test_zoom(self):
image = self.create()
image2 = image.zoom(2, 3)
self.assertEqual(image2.width(), 32)
self.assertEqual(image2.height(), 48)
self.assertEqual(image2.get(8, 18), image.get(4, 6))
self.assertEqual(image2.get(9, 20), image.get(4, 6))
image2 = image.zoom(2)
self.assertEqual(image2.width(), 32)
self.assertEqual(image2.height(), 32)
self.assertEqual(image2.get(8, 12), image.get(4, 6))
self.assertEqual(image2.get(9, 13), image.get(4, 6))
def test_put(self):
image = self.create()
image.put('{red green} {blue yellow}', to=(4, 6))
self.assertEqual(image.get(4, 6), self.colorlist(255, 0, 0))
self.assertEqual(image.get(5, 6),
self.colorlist(0, 128 if tkinter.TkVersion >= 8.6
else 255, 0))
self.assertEqual(image.get(4, 7), self.colorlist(0, 0, 255))
self.assertEqual(image.get(5, 7), self.colorlist(255, 255, 0))
image.put((('#f00', '#00ff00'), ('#000000fff', '#ffffffff0000')))
self.assertEqual(image.get(0, 0), self.colorlist(255, 0, 0))
self.assertEqual(image.get(1, 0), self.colorlist(0, 255, 0))
self.assertEqual(image.get(0, 1), self.colorlist(0, 0, 255))
self.assertEqual(image.get(1, 1), self.colorlist(255, 255, 0))
def test_get(self):
image = self.create()
self.assertEqual(image.get(4, 6), self.colorlist(62, 116, 162))
self.assertEqual(image.get(0, 0), self.colorlist(0, 0, 0))
self.assertEqual(image.get(15, 15), self.colorlist(0, 0, 0))
self.assertRaises(tkinter.TclError, image.get, -1, 0)
self.assertRaises(tkinter.TclError, image.get, 0, -1)
self.assertRaises(tkinter.TclError, image.get, 16, 15)
self.assertRaises(tkinter.TclError, image.get, 15, 16)
def test_write(self):
image = self.create()
self.addCleanup(support.unlink, support.TESTFN)
image.write(support.TESTFN)
image2 = tkinter.PhotoImage('::img::test2', master=self.root,
format='ppm',
file=support.TESTFN)
self.assertEqual(str(image2), '::img::test2')
self.assertEqual(image2.type(), 'photo')
self.assertEqual(image2.width(), 16)
self.assertEqual(image2.height(), 16)
self.assertEqual(image2.get(0, 0), image.get(0, 0))
self.assertEqual(image2.get(15, 8), image.get(15, 8))
image.write(support.TESTFN, format='gif', from_coords=(4, 6, 6, 9))
image3 = tkinter.PhotoImage('::img::test3', master=self.root,
format='gif',
file=support.TESTFN)
self.assertEqual(str(image3), '::img::test3')
self.assertEqual(image3.type(), 'photo')
self.assertEqual(image3.width(), 2)
self.assertEqual(image3.height(), 3)
self.assertEqual(image3.get(0, 0), image.get(4, 6))
self.assertEqual(image3.get(1, 2), image.get(5, 8))
tests_gui = (MiscTest, BitmapImageTest, PhotoImageTest,)
if __name__ == "__main__":
support.run_unittest(*tests_gui)
| 40.716463 | 80 | 0.594908 |
748ed28868581a95b707b4d1a4f4d1ceb756396d | 401 | py | Python | arcade_solutions/intro/alternating_sums.py | nickaigi/automatic-dollop | eb8222475c7871c1d5710242c5aed8c70ea0d2c8 | [
"Unlicense"
] | null | null | null | arcade_solutions/intro/alternating_sums.py | nickaigi/automatic-dollop | eb8222475c7871c1d5710242c5aed8c70ea0d2c8 | [
"Unlicense"
] | null | null | null | arcade_solutions/intro/alternating_sums.py | nickaigi/automatic-dollop | eb8222475c7871c1d5710242c5aed8c70ea0d2c8 | [
"Unlicense"
] | null | null | null | def alternating_sums(a):
even, odd = [], []
for i in range(len(a)):
if i % 2 == 0:
even.append(a[i])
else:
odd.append(a[i])
return [sum(even), sum(odd)]
def alternating_sums_short(a):
return [
sum(a[::2]), sum(a[1::2])
]
if __name__ == '__main__':
a = [50, 60, 60, 45, 70]
alternating_sums(a)
alternating_sums_short(a)
| 21.105263 | 33 | 0.511222 |
2d2901dd594ece7bdea77f11d64a1007ab5076ff | 1,077 | py | Python | moonmining/tests/testdata/load_eveuniverse.py | buahaha/aa-moonmining | 475888fed7b6f4ad3120ccd03148f2b4b84b0a71 | [
"MIT"
] | null | null | null | moonmining/tests/testdata/load_eveuniverse.py | buahaha/aa-moonmining | 475888fed7b6f4ad3120ccd03148f2b4b84b0a71 | [
"MIT"
] | null | null | null | moonmining/tests/testdata/load_eveuniverse.py | buahaha/aa-moonmining | 475888fed7b6f4ad3120ccd03148f2b4b84b0a71 | [
"MIT"
] | null | null | null | import json
from eveuniverse.models import EveMoon, EveType
from eveuniverse.tools.testdata import load_testdata_from_dict
from . import test_data_filename
def _load_eveuniverse_from_file():
with open(test_data_filename(), "r", encoding="utf-8") as f:
data = json.load(f)
return data
eveuniverse_testdata = _load_eveuniverse_from_file()
def load_eveuniverse():
load_testdata_from_dict(eveuniverse_testdata)
def nearest_celestial_stub(eve_solar_system, x, y, z):
eve_type = EveType.objects.get(id=14)
if (x, y, z) == (55028384780, 7310316270, -163686684205):
return eve_solar_system.NearestCelestial(
eve_type=eve_type,
eve_object=EveMoon.objects.get(id=40161708), # Auga V - Moon 1
distance=123,
)
elif (x, y, z) == (45028384780, 6310316270, -163686684205):
return eve_solar_system.NearestCelestial(
eve_type=eve_type,
eve_object=EveMoon.objects.get(id=40161709), # Auga V - Moon 2
distance=123,
)
else:
return None
| 27.615385 | 75 | 0.67688 |
366d9afddfe618079c44af729f140591ca273fdc | 1,445 | py | Python | reto-04/PalyPalillo/src/configurator.py | rburgosm/reto-python | d2da1456be159c22604fbf4ed7e5f815cb676b74 | [
"MIT"
] | 18 | 2022-02-14T19:15:21.000Z | 2022-03-29T12:56:18.000Z | reto-04/PalyPalillo/src/configurator.py | rburgosm/reto-python | d2da1456be159c22604fbf4ed7e5f815cb676b74 | [
"MIT"
] | 22 | 2022-03-05T23:48:04.000Z | 2022-03-31T04:33:58.000Z | reto-04/PalyPalillo/src/configurator.py | rburgosm/reto-python | d2da1456be159c22604fbf4ed7e5f815cb676b74 | [
"MIT"
] | 21 | 2022-02-16T17:52:05.000Z | 2022-03-28T20:13:46.000Z | import os
from pathlib import Path
import toml
class Configurator:
def __init__(self, path, config):
self.path=path
self.config=config
self.check_conf()
def getconfig(self):
if 'XDG_CONFIG_HOME' in os.environ:
return os.environ['XDG_CONFIG_HOME']
else:
return Path.home() / '.config'
def getDirDesc(self):
if 'XDG_DOWNLOAD_DIR' in os.environ:
return os.environ['XDG_DOWNLOAD_DIR']
config_dir = Path(self.getconfig())
user_dirs_file = config_dir / 'user-dirs.dirs'
with open(user_dirs_file, 'r') as fr:
for line in fr.readlines():
if line.startswith('XDG_DOWNLOAD_DIR'):
directory = line.split("=")[1].replace("\"",'')
download_dir = directory.replace("$HOME", str(Path.home()))[:-1]
return Path(download_dir)
return None
def check_conf(self):
config_dir = self.path
if not config_dir.exists():
os.makedirs(config_dir)
config_file = config_dir / self.config
if not config_file.exists():
with open(config_file, 'w') as file:
toml.dump({"directorio": str(self.getDirDesc())},file)
def read(self):
config_dir = Path(self.path)
config_file = config_dir / self.config
idata = toml.load(config_file)
return idata | 32.111111 | 85 | 0.577855 |
54f5d8d6c2b32e3ecbd75c6b3209123f02ba7e61 | 9,052 | py | Python | themes/qutebrowser/themes/default/base16-atelier-seaside-light.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 95 | 2018-05-28T18:06:48.000Z | 2022-03-14T21:36:05.000Z | themes/qutebrowser/themes/default/base16-atelier-seaside-light.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 18 | 2018-08-26T00:57:20.000Z | 2022-02-19T08:29:29.000Z | themes/qutebrowser/themes/default/base16-atelier-seaside-light.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 20 | 2018-06-21T12:41:47.000Z | 2022-03-04T22:06:20.000Z | # base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# Atelier Seaside Light scheme by Bram de Haan (http://atelierbramdehaan.nl)
base00 = "#f4fbf4"
base01 = "#cfe8cf"
base02 = "#8ca68c"
base03 = "#809980"
base04 = "#687d68"
base05 = "#5e6e5e"
base06 = "#242924"
base07 = "#131513"
base08 = "#e6193c"
base09 = "#87711d"
base0A = "#98981b"
base0B = "#29a329"
base0C = "#1999b3"
base0D = "#3d62f5"
base0E = "#ad2bee"
base0F = "#e619c3"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base01
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base05
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base02
# Top border color of the selected completion item.
c.colors.completion.item.selected.border.top = base02
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base02
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base0B
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color of disabled items in the context menu.
c.colors.contextmenu.disabled.bg = base01
# Foreground color of disabled items in the context menu.
c.colors.contextmenu.disabled.fg = base04
# Background color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.bg = base00
# Foreground color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.fg = base05
# Background color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.bg = base02
#Foreground color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.fg = base05
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base02
# Foreground color for the selected item in filename prompts.
c.colors.prompts.selected.fg = base05
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base01
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base01
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0C
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base07
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base07
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base02
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base05
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base02
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base05
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base05
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base02
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base05
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base02
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
| 30.07309 | 95 | 0.771653 |
fd7d4212c83460c6473874a7a44dd022d5a3bb9c | 62 | py | Python | __init__.py | a-maumau/notificator | 79f9ca62476c5e0c45ebd16beb8ef82f8541a80e | [
"MIT"
] | 1 | 2018-01-31T00:54:44.000Z | 2018-01-31T00:54:44.000Z | __init__.py | a-maumau/notificator | 79f9ca62476c5e0c45ebd16beb8ef82f8541a80e | [
"MIT"
] | null | null | null | __init__.py | a-maumau/notificator | 79f9ca62476c5e0c45ebd16beb8ef82f8541a80e | [
"MIT"
] | null | null | null | __all__ = ['notificator']
from .notificator import Notificator | 31 | 36 | 0.806452 |
3b87b97a7e7468b6c663bdd2085ad5943de04bac | 911 | py | Python | profiles_api/permissions.py | AitorPo/profiles-rest-api | 66f6505957dbf728707325836ef5b2e88d067e72 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | AitorPo/profiles-rest-api | 66f6505957dbf728707325836ef5b2e88d067e72 | [
"MIT"
] | null | null | null | profiles_api/permissions.py | AitorPo/profiles-rest-api | 66f6505957dbf728707325836ef5b2e88d067e72 | [
"MIT"
] | null | null | null | from rest_framework import permissions
# This file checks if the user has permissions (or is authenticated) for doing some operations
# by comparing user's id whose made the request with obj's id in the db
class UpdateOwnProfile(permissions.BasePermission):
"""Allow users to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check the user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check the user is trying to update their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| 36.44 | 94 | 0.709111 |
6780dc479aea20404282ecf090627c416281ad90 | 1,219 | py | Python | p_library/migrations/0001_initial.py | StalingradTeam/my_lib_with_media | 7bbe45c9615c87bea24ae48b4e126506548dda29 | [
"MIT"
] | null | null | null | p_library/migrations/0001_initial.py | StalingradTeam/my_lib_with_media | 7bbe45c9615c87bea24ae48b4e126506548dda29 | [
"MIT"
] | null | null | null | p_library/migrations/0001_initial.py | StalingradTeam/my_lib_with_media | 7bbe45c9615c87bea24ae48b4e126506548dda29 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-12-09 13:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.TextField()),
('birth_year', models.SmallIntegerField()),
('country', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ISBN', models.CharField(max_length=13)),
('title', models.TextField()),
('description', models.TextField()),
('year_release', models.SmallIntegerField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='p_library.Author')),
],
),
] | 34.828571 | 115 | 0.545529 |
e90e858d8d12886bef76b2dc8381cfe4868dbaa1 | 21,638 | py | Python | qa/rpc-tests/test_framework/util.py | SAFE-anwang/safe | a1b358ac333895b85b28e6b1d7835bf4257aae6e | [
"MIT"
] | 58 | 2018-02-04T20:50:42.000Z | 2021-02-09T07:23:56.000Z | qa/rpc-tests/test_framework/util.py | SAFE-anwang/safe | a1b358ac333895b85b28e6b1d7835bf4257aae6e | [
"MIT"
] | 3 | 2018-02-06T06:06:24.000Z | 2018-12-20T13:38:44.000Z | qa/rpc-tests/test_framework/util.py | CryptoCoinLab/safe | d835b91c94d0c2c89b80f7f58576ea916b35d874 | [
"MIT"
] | 47 | 2018-01-24T01:02:52.000Z | 2020-07-20T20:55:03.000Z | # Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The Dash Core developers
# Copyright (c) 2018-2018 The Safe Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting SAFE values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "safe.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for safed to start. This means that RPC is accessible and fully initialized.
Raise an exception if safed exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('safed exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run safeds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("SAFED", "safed"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: safed started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in safe.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a safed and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("SAFED", "safed")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: safed started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple safeds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| 35.298532 | 153 | 0.651955 |
838f54a67018fa736881bc23f38e3f525f678258 | 1,126 | py | Python | Ene-Jun-2018/Luis Carielo/Práctica 5/hardwood.py | Andremm303/DAS_Sistemas | 0163505737e2b24365ea8b4e8135773a6801add4 | [
"MIT"
] | null | null | null | Ene-Jun-2018/Luis Carielo/Práctica 5/hardwood.py | Andremm303/DAS_Sistemas | 0163505737e2b24365ea8b4e8135773a6801add4 | [
"MIT"
] | null | null | null | Ene-Jun-2018/Luis Carielo/Práctica 5/hardwood.py | Andremm303/DAS_Sistemas | 0163505737e2b24365ea8b4e8135773a6801add4 | [
"MIT"
] | null | null | null | #variable que guardará los árboles
trees = {}
#variable que guardará el número de casos
num_casos = int(input())
#variable que guarda los renglones vacíos
vacio = input()
#ciclo para recorrer los casos
for x in range(num_casos):
#limpiar la lista cada que haga un bucle el for
trees.clear()
cien_porciento = 0
#ciclo while para capturar los árboles
while True:
#entrada de árboles
in_trees = input()
#si no tiene datos, salir del while
if in_trees=="":
break
#si se ingresó un dato, se le agrega una unidad al árbol en cuestión
if trees.get(in_trees):
trees[in_trees]+=1
#si no, deja el diccionario en 1
else:
trees[in_trees]=1
#se agrega una unidad al 100%
cien_porciento+=1
#ciclo para mostrar la salida
for arbol, porciento in sorted(trees.items()):
porciento=100+(porciento/cien_porciento-1)*100
print("%s %.4f" %(arbol,porciento))
#condición para imprimir un salto de línea cuando muestre la salida
if x<num_casos-1:
print("")
| 26.186047 | 76 | 0.627886 |
86f577b50b6fe57170cd853cecab67388689ca6e | 2,222 | py | Python | get_keys.py | chungchris/cloud-system-reconfiguration-tool | d3a564703c449ffe3d2c1c087a124599b43eed9e | [
"MIT"
] | null | null | null | get_keys.py | chungchris/cloud-system-reconfiguration-tool | d3a564703c449ffe3d2c1c087a124599b43eed9e | [
"MIT"
] | null | null | null | get_keys.py | chungchris/cloud-system-reconfiguration-tool | d3a564703c449ffe3d2c1c087a124599b43eed9e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
## Chris @ DCSLAB, NCTU
## get_keys.py
## Get the provider list through specified xml file or GUI
###########################################################################
from optparse import OptionParser
from threading import Thread
import xml.etree.ElementTree as ET
import os.path
import gui_key
import myheader as chris
lb_dir = {'EC2':'ELB', 'GCE':'GCE'}
# if specified key_file not existed -> start a GUI to let the user give keys
# use these keys to connect to the clouds
# return a list of myheader.Provider data structure
def get_keys(exist, key_file):
if not exist:
# start the gui, activate wx
guithread = gui_key.GUI_Thread_Key(key_file)
guithread.setDaemon(True)
print('debug: starting GUI_Key')
guithread.start()
guithread.join()
if not os.path.exists(key_file):
print('Error: Specified key file not found')
# wait until the key kml file is generated by user through gui
#while(not os.path.exists(key_file)):
# time.sleep(2)
# print('debug: wait for key file')
providers = []
#loadbalancers = []
tree = ET.parse(key_file)
root = tree.getroot()
print('debug: finish parsing')
if str(root.tag) != 'data':
print('syntax error in KEY.xml: <data>')
print('All changes discarded')
exit()
for provider in root:
if str(provider.tag) == 'Provider':
providers.append(chris.Provider(provider.attrib['name'], provider.attrib['region'], '', '', lb_dir[provider.attrib['name']]))
for info in provider:
if str(info.tag) == "AccessID":
providers[len(providers)-1].id = str(info.text)
elif str(info.tag) == "SecretKey":
providers[len(providers)-1].key = str(info.text)
else:
print('Error; syntax error in KEY.xml: illegal tag: ' + str(info.tag))
print('All changes discarded')
exit()
#loadbalancers.append(chris.Loadbalancer(info.attrib['name'], '', '', ''))
#loadbalancers[len(providers)-1].id = providers[len(providers)-1].id
#loadbalancers[len(providers)-1].key = providers[len(providers)-1].key
#loadbalancers[len(providers)-1].region = provider.attrib['region']
print('debug: got keys')
return providers
| 32.676471 | 128 | 0.644464 |
a37ef94e9983e45cadb693b70f371a023fb038c4 | 1,992 | py | Python | source/tornado-botocore-1.0.2/setup.py | bananabandy/serverless-image-handler | 8821add16e49198d21d6bfd8ba2d5191a153f3bc | [
"BSD-2-Clause"
] | null | null | null | source/tornado-botocore-1.0.2/setup.py | bananabandy/serverless-image-handler | 8821add16e49198d21d6bfd8ba2d5191a153f3bc | [
"BSD-2-Clause"
] | null | null | null | source/tornado-botocore-1.0.2/setup.py | bananabandy/serverless-image-handler | 8821add16e49198d21d6bfd8ba2d5191a153f3bc | [
"BSD-2-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
Python setup file for the nicedit app.
In order to register your app at pypi.python.org, create an account at
pypi.python.org and login, then register your new app like so:
python setup.py register
If your name is still free, you can now make your first release but first you
should check if you are uploading the correct files:
python setup.py sdist
Inspect the output thoroughly. There shouldn't be any temp files and if your
app includes staticfiles or templates, make sure that they appear in the list.
If something is wrong, you need to edit MANIFEST.in and run the command again.
If all looks good, you can make your first release:
python setup.py sdist upload
For new releases, you need to bump the version number in
tornado_botocore/__init__.py and re-run the above command.
For more information on creating source distributions, see
http://docs.python.org/2/distutils/sourcedist.html
"""
import os
import uuid
import tornado_botocore as app
import uuid
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
from setuptools import setup, find_packages
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
REQUIREMETS = [str(r.req) for r in parse_requirements('requirements.txt', session=uuid.uuid1())]
setup(
name="tornado-botocore",
version=app.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='tornado, botocore, async boto, amazon, aws',
author='Oleksandr Polyeno',
author_email='polyenoom@gmail.com',
url="https://github.com/nanvel/tornado-botocore",
packages=find_packages(),
package_data={'': ['requirements.txt']},
include_package_data=True,
install_requires=REQUIREMETS,
)
| 29.294118 | 96 | 0.734438 |
5fe4978670aef5bbe16d0ed063f7c853208daf61 | 10,181 | py | Python | sparkly/session.py | Tubular/sparkly | 015dbc830648e20ffeb6bf95a94a760c1fb94b6d | [
"Apache-2.0"
] | 31 | 2016-12-03T06:54:54.000Z | 2018-09-08T01:03:57.000Z | sparkly/session.py | Tubular/sparkly | 015dbc830648e20ffeb6bf95a94a760c1fb94b6d | [
"Apache-2.0"
] | 23 | 2016-12-02T18:41:10.000Z | 2018-06-08T23:54:34.000Z | sparkly/session.py | Tubular/sparkly | 015dbc830648e20ffeb6bf95a94a760c1fb94b6d | [
"Apache-2.0"
] | 4 | 2016-12-31T19:15:21.000Z | 2018-02-12T04:44:59.000Z | #
# Copyright 2017 Tubular Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
from copy import deepcopy
import os
import signal
import sys
import time
import uuid
from pyspark import SparkContext
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession
from sparkly.catalog import SparklyCatalog
from sparkly.instant_testing import InstantTesting
from sparkly.reader import SparklyReader
from sparkly.writer import attach_writer_to_dataframe
class SparklySession(SparkSession):
"""Wrapper around SparkSession to simplify definition of options, packages, JARs and UDFs.
Example::
from pyspark.sql.types import IntegerType
import sparkly
class MySession(sparkly.SparklySession):
options = {'spark.sql.shuffle.partitions': '2000'}
repositories = ['http://packages.confluent.io/maven/']
packages = ['com.databricks:spark-csv_2.10:1.4.0']
jars = ['../path/to/brickhouse-0.7.1.jar']
udfs = {
'collect_max': 'brickhouse.udf.collect.CollectMaxUDAF',
'my_python_udf': (lambda x: len(x), IntegerType()),
}
spark = MySession()
spark.read_ext.cassandra(...)
# Alternatively
spark = MySession.get_or_create()
spark.read_ext.cassandra(...)
Attributes:
options (dict[str,str]): Configuration options that are passed to spark-submit.
See `the list of possible options
<https://spark.apache.org/docs/2.1.0/configuration.html#available-properties>`_.
Note that any options set already through PYSPARK_SUBMIT_ARGS will override
these.
repositories (list[str]): List of additional maven repositories for package lookup.
packages (list[str]): Spark packages that should be installed.
See https://spark-packages.org/
jars (list[str]): Full paths to jar files that we want to include to the session.
E.g. a JDBC connector or a library with UDF functions.
udfs (dict[str,str|typing.Callable]): Register UDF functions within the session.
Key - a name of the function,
Value - either a class name imported from a JAR file
or a tuple with python function and its return type.
name (str): a name that is used in default app_id_template (see below)
app_id_template (str|None): if set and nonempty, generate the `spark.app.id` with
this template. Interpolation is available with some pre-defined variables:
* initial_time: the time that the first session started
* initial_uid: a unique id associated with the first session
* session_time: the time the session started
* session_uid: a unique id associated with the session
A default value is provided using the name, initial-uid and session-time.
This helps a specific use case when running in Kubernetes: when a session
is restarted, the same app-id is used, breaking storage of spark-history data
(only the first session will have its history stored, unless overwrite mode
is used, in which case only the last session will have its history stored).
By defaulting to using the initial-uid and session-time information, we get
sane "grouping" of all sessions originating from the same initial session, but also
achieve separate individual app ids so that history for each can be maintained.
To disable this functionality entirely, simply set to None or emptystring.
Finally, if a user manually specifies `spark.app.id`, then that value will
always trump any template provided here.
"""
name = 'sparkly'
options = {}
packages = []
jars = []
udfs = {}
repositories = []
app_id_template = '{name}-{initial_uid}-{session_time}'
_instantiated_session = None
_original_environment = None
_initial_time = None
_initial_uid = None
def __init__(self, additional_options=None):
SparklySession._original_environment = deepcopy(os.environ)
os.environ['PYSPARK_PYTHON'] = sys.executable
self._initial_time = self._initial_time or int(time.time())
self._initial_uid = self._initial_uid or uuid.uuid4().hex
self._session_time = int(time.time())
self._session_uid = uuid.uuid4().hex
options = {
'spark.sql.catalogImplementation': 'hive',
}
app_id_template = self.app_id_template
if app_id_template:
options.update({
'spark.app.id': app_id_template.format(
name=self.name,
initial_time=self._initial_time,
initial_uid=self._initial_uid,
session_time=self._session_time,
session_uid=self._session_uid,
),
})
options.update(self.options or {})
options.update(additional_options or {})
options = {str(key): str(value) for key, value in options.items()}
submit_args = [
# options that were already defined through PYSPARK_SUBMIT_ARGS
# take precedence over SparklySession's
os.environ.get('PYSPARK_SUBMIT_ARGS', '').replace('pyspark-shell', ''),
self._setup_repositories(),
self._setup_packages(),
self._setup_jars(),
self._setup_options(options),
'pyspark-shell',
]
os.environ['PYSPARK_SUBMIT_ARGS'] = ' '.join(filter(None, submit_args))
def get_context():
conf = SparkConf()
conf.setAll(options.items())
return SparkContext(conf=conf)
# If we are in instant testing mode
if InstantTesting.is_activated():
context = InstantTesting.get_context()
# It's the first run, so we have to create context and demonise the process.
if context is None:
context = get_context()
if os.fork() == 0: # Detached process.
signal.pause()
else:
InstantTesting.set_context(context)
else:
context = get_context()
super(SparklySession, self).__init__(context)
# similar to session builder:
for key, value in options.items():
self._jsparkSession.sessionState().conf().setConfString(key, value)
self._setup_udfs()
self.read_ext = SparklyReader(self)
self.catalog_ext = SparklyCatalog(self)
attach_writer_to_dataframe()
SparklySession._instantiated_session = self
@classmethod
def get_or_create(cls):
"""Access instantiated sparkly session.
If sparkly session has already been instantiated, return that
instance; if not, then instantiate one and return it. Useful
for lazy access to the session. Not thread-safe.
Returns:
SparklySession (or subclass).
"""
if SparklySession._instantiated_session is None:
cls()
return SparklySession._instantiated_session
@classmethod
def stop(cls):
"""Stop instantiated sparkly session."""
if SparklySession._instantiated_session is not None:
SparkSession.stop(SparklySession._instantiated_session)
SparklySession._instantiated_session = None
os.environ = SparklySession._original_environment
SparklySession._original_environment = None
@property
def builder(self):
raise NotImplementedError(
'You do not need a builder for SparklySession. '
'Just use a regular python constructor. '
'Please, follow the documentation for more details.'
)
def _setup_repositories(self):
if self.repositories:
return '--repositories {}'.format(','.join(self.repositories))
else:
return ''
def _setup_packages(self):
if self.packages:
return '--packages {}'.format(','.join(self.packages))
else:
return ''
def _setup_jars(self):
if self.jars:
return '--jars {}'.format(','.join(self.jars))
else:
return ''
def _setup_options(self, options):
# Here we massage conf properties with the intent to pass them to
# spark-submit; this is convenient as it is unified with the approach
# we take for repos, packages and jars, and it also handles precedence
# of conf properties already defined by the user in a very
# straightforward way (since we always append to PYSPARK_SUBMIT_ARGS)
return ' '.join('--conf "{}={}"'.format(*o) for o in sorted(options.items()))
def _setup_udfs(self):
for name, defn in self.udfs.items():
if isinstance(defn, str):
self.sql('create temporary function {} as "{}"'.format(name, defn))
elif isinstance(defn, tuple):
self.catalog.registerFunction(name, *defn)
else:
raise NotImplementedError('Incorrect UDF definition: {}: {}'.format(name, defn))
# https://issues.apache.org/jira/browse/SPARK-27927
# Spark on Kubernetes has an issue where the python process finishes,
# but the controlling java process just hangs, so nothing terminates.
# There is a simple workaround to stop the session prior to python termination.
# We do that here with an atexit registration.
atexit.register(SparklySession.stop)
| 39.769531 | 96 | 0.641587 |
38af4d7dfba0ed5e621a98678bf64c55e1c03659 | 1,095 | py | Python | inv/urls.py | darkdrei/Inventario | dc2dcc830be5a49ba602c242d8c7d5d9c24c7b5c | [
"MIT"
] | null | null | null | inv/urls.py | darkdrei/Inventario | dc2dcc830be5a49ba602c242d8c7d5d9c24c7b5c | [
"MIT"
] | null | null | null | inv/urls.py | darkdrei/Inventario | dc2dcc830be5a49ba602c242d8c7d5d9c24c7b5c | [
"MIT"
] | null | null | null | """inv URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from empleados import views as empleado
urlpatterns = [
url(r'^admin/logout/$', empleado.Logout.as_view()),
url(r'^$', empleado.Home.as_view()),
url(r'^admin/', admin.site.urls),
url(r'^nested_admin/', include('nested_admin.urls')),
url(r'^select2/', include('django_select2.urls')),
url(r'^inventario/', include('inventario.urls',namespace='inventario')),
]
| 40.555556 | 79 | 0.696804 |
d7418792f301404647fb7d9756f2146066167389 | 11,591 | py | Python | modin/pandas/test/test_general.py | pratheekrebala/modin | 97c47fbc986b0df24dabf6fa60700b2774824a1d | [
"Apache-2.0"
] | null | null | null | modin/pandas/test/test_general.py | pratheekrebala/modin | 97c47fbc986b0df24dabf6fa60700b2774824a1d | [
"Apache-2.0"
] | null | null | null | modin/pandas/test/test_general.py | pratheekrebala/modin | 97c47fbc986b0df24dabf6fa60700b2774824a1d | [
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pandas
import pytest
import modin.pandas as pd
import numpy as np
from numpy.testing import assert_array_equal
from .utils import test_data_values, test_data_keys, df_equals
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isnull(pandas_df)
modin_result = pd.isnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notna(pandas_df)
modin_result = pd.notna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notnull(pandas_df)
modin_result = pd.notnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
def test_merge():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = pd.merge(modin_df, modin_df2, how=how)
pandas_result = pandas.merge(pandas_df, pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
pd.merge(modin_df["col1"], modin_df2)
def test_merge_ordered():
data_a = {
"key": list("aceace"),
"lvalue": [1, 2, 3, 1, 2, 3],
"group": list("aaabbb"),
}
data_b = {"key": list("bcd"), "rvalue": [1, 2, 3]}
modin_df_a = pd.DataFrame(data_a)
modin_df_b = pd.DataFrame(data_b)
with pytest.warns(UserWarning):
df = pd.merge_ordered(
modin_df_a, modin_df_b, fill_method="ffill", left_by="group"
)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.merge_ordered(data_a, data_b, fill_method="ffill", left_by="group")
def test_merge_asof():
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
with pytest.warns(UserWarning):
df = pd.merge_asof(left, right, on="a")
assert isinstance(df, pd.DataFrame)
with pytest.warns(UserWarning):
df = pd.merge_asof(left, right, on="a", allow_exact_matches=False)
assert isinstance(df, pd.DataFrame)
with pytest.warns(UserWarning):
df = pd.merge_asof(left, right, on="a", direction="forward")
assert isinstance(df, pd.DataFrame)
with pytest.warns(UserWarning):
df = pd.merge_asof(left, right, on="a", direction="nearest")
assert isinstance(df, pd.DataFrame)
left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
with pytest.warns(UserWarning):
df = pd.merge_asof(left, right, left_index=True, right_index=True)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.merge_asof(
{"left_val": ["a", "b", "c"]},
{"right_val": [1, 2, 3, 6, 7]},
left_index=True,
right_index=True,
)
def test_pivot():
test_df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df = pd.pivot(test_df, index="foo", columns="bar", values="baz")
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot(test_df["bar"], index="foo", columns="bar", values="baz")
def test_pivot_table():
test_df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df = pd.pivot_table(
test_df, values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot_table(
test_df["C"], values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
def test_unique():
modin_result = pd.unique([2, 1, 3, 3])
pandas_result = pandas.unique([2, 1, 3, 3])
assert_array_equal(modin_result, pandas_result)
modin_result = pd.unique(pd.Series([2] + [1] * 5))
pandas_result = pandas.unique(pandas.Series([2] + [1] * 5))
assert_array_equal(modin_result, pandas_result)
modin_result = pd.unique(
pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")])
)
pandas_result = pandas.unique(
pandas.Series([pandas.Timestamp("20160101"), pandas.Timestamp("20160101")])
)
assert_array_equal(modin_result, pandas_result)
modin_result = pd.unique(
pd.Series(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Series(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
modin_result = pd.unique(
pd.Index(
[
pd.Timestamp("20160101", tz="US/Eastern"),
pd.Timestamp("20160101", tz="US/Eastern"),
]
)
)
pandas_result = pandas.unique(
pandas.Index(
[
pandas.Timestamp("20160101", tz="US/Eastern"),
pandas.Timestamp("20160101", tz="US/Eastern"),
]
)
)
assert_array_equal(modin_result, pandas_result)
modin_result = pd.unique(pd.Series(pd.Categorical(list("baabc"))))
pandas_result = pandas.unique(pandas.Series(pandas.Categorical(list("baabc"))))
assert_array_equal(modin_result, pandas_result)
def test_to_datetime():
# DataFrame input for to_datetime
modin_df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
pandas_df = pandas.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df_equals(pd.to_datetime(modin_df), pandas.to_datetime(pandas_df))
# Series input for to_datetime
modin_s = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000)
pandas_s = pandas.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 1000)
df_equals(pd.to_datetime(modin_s), pandas.to_datetime(pandas_s))
# Other inputs for to_datetime
value = 1490195805
assert pd.to_datetime(value, unit="s") == pandas.to_datetime(value, unit="s")
value = 1490195805433502912
assert pd.to_datetime(value, unit="ns") == pandas.to_datetime(value, unit="ns")
value = [1, 2, 3]
assert pd.to_datetime(value, unit="D", origin=pd.Timestamp("2000-01-01")).equals(
pandas.to_datetime(value, unit="D", origin=pandas.Timestamp("2000-01-01"))
)
| 33.891813 | 88 | 0.607368 |
2d408f3e27534696deb37b85a52ab8bec0443f6d | 1,929 | py | Python | src/api/jobnavi/tests/data_makeup/test_check_allowed.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/jobnavi/tests/data_makeup/test_check_allowed.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/jobnavi/tests/data_makeup/test_check_allowed.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from rest_framework.reverse import reverse
from jobnavi.api_helpers.api.util.api_driver import APIResponseUtil
from tests.utils import UnittestClient
@pytest.mark.usefixtures("jobnavi_api_fixture")
def test_check_allowed():
cluster_id = "default"
url = reverse('jobnavi_data_makeup-check-allowed', [cluster_id])
data = {
"schedule_id": "test_schedule",
"schedule_time": 9876543210
}
res = UnittestClient().get(url, data)
APIResponseUtil.check_response(res)
assert res.data.get("status") == "running"
| 52.135135 | 111 | 0.749093 |
41ed2b32801e2a7ff83e52c9e56d2fb6bd25c604 | 25,073 | py | Python | molsysmt/item/mmtf_MMTFDecoder/get.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/mmtf_MMTFDecoder/get.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/mmtf_MMTFDecoder/get.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | #######################################################################################
########### THE FOLLOWING LINES NEED TO BE CUSTOMIZED FOR EVERY CLASS ################
#######################################################################################
from molsysmt._private.execfile import execfile
from molsysmt._private.exceptions import NotWithThisFormError as _NotWithThisFormError
from molsysmt._private.exceptions import NotImplementedMethodError as _NotImplementedMethodError
from molsysmt._private.digestion import digest_item as _digest_item
from molsysmt._private.digestion import digest_indices as _digest_indices
from molsysmt._private.digestion import digest_structure_indices as _digest_structure_indices
from molsysmt import puw as _puw
import numpy as _np
from networkx import Graph as _Graph
_form='mmtf.MMTFDecoder'
## From atom
def get_atom_id_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_atom_id_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_atom_name_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_atom_name_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_atom_type_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_atom_type_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_group_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_group_index_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_component_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_component_index_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_chain_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_chain_index_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_molecule_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_molecule_index_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_entity_index_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_entity_index_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_inner_bonded_atoms_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_inner_bonded_atoms_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_n_inner_bonds_from_atom(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_inner_bonds_from_atom as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_coordinates_from_atom(item, indices='all', structure_indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
xyz = _np.column_stack([item.x_coord_list, item.y_coord_list, item.z_coord_list])
xyz = xyz.reshape([-1, item.num_atoms, 3])
xyz = _puw.quantity(xyz, 'angstroms')
xyz = _puw.standardize(xyz)
if structure_indices is not 'all':
xyz = xyz[structure_indices,:,:]
if indices is not 'all':
xyz = xyz[:,indices,:]
return xyz
## From group
def get_group_id_from_group(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_group_id_from_group as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_group_name_from_group(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_group_name_from_group as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_group_type_from_group(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_group_type_from_group as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
## From component
def get_component_id_from_component(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_component_id_from_component as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_component_name_from_component(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_component_name_from_component as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_component_type_from_component(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_component_type_from_component as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
## From molecule
def get_molecule_id_from_molecule(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_molecule_id_from_molecule as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_molecule_name_from_molecule(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_molecule_name_from_molecule as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_molecule_type_from_molecule(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_molecule_type_from_molecule as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
## From chain
def get_chain_id_from_chain(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_chain_id_from_chain as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_chain_name_from_chain(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_chain_name_from_chain as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_chain_type_from_chain(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_chain_type_from_chain as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
## From entity
def get_entity_id_from_entity(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_entity_id_from_entity as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_entity_name_from_entity(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_entity_name_from_entity as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_entity_type_from_entity(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_entity_type_from_entity as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
## From system
def get_n_atoms_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_atoms_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, check=False)
return output
def get_n_groups_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_groups_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, check=False)
return output
def get_n_components_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_components_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, check=False)
return output
def get_n_chains_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_chains_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, check=False)
return output
def get_n_molecules_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_molecules_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, check=False)
return output
def get_n_entities_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_entities_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_n_bonds_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_n_bonds_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, check=False)
return output
def get_box_from_system(item, structure_indices='all', check=True):
if check:
_digest_item(item, _form)
structure_indices = _digest_structure_indices(structure_indices)
from molsysmt.pbc import box_vectors_from_box_lengths_and_angles
n_structures = get_n_structures_from_system(item, check=False)
if item.unit_cell is not None:
cell_lengths = _np.empty([n_structures,3], dtype='float64')
cell_angles = _np.empty([n_structures,3], dtype='float64')
for ii in range(3):
cell_lengths[:,ii] = item.unit_cell[ii]
cell_angles[:,ii] = item.unit_cell[ii+3]
cell_lengths = _puw.quantity(cell_lengths, 'angstroms')
cell_angles = _puw.quantity(cell_angles, 'degrees')
box = box_vectors_from_box_lengths_and_angles(cell_lengths, cell_angles)
box = _puw.standardize(box)
else:
box = None
if structure_indices is not 'all':
if box is not None:
box = box[structure_indices,:,:]
return box
def get_time_from_system(item, structure_indices='all', check=True):
if check:
_digest_item(item, _form)
structure_indices = _digest_structure_indices(structure_indices)
return None
def get_step_from_system(item, structure_indices='all', check=True):
if check:
_digest_item(item, _form)
structure_indices = _digest_structure_indices(structure_indices)
return None
def get_n_structures_from_system(item, check=True):
if check:
_digest_item(item, _form)
return item.num_models
def get_bonded_atoms_from_system(item, check=True):
if check:
_digest_item(item, _form)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_bonded_atoms_from_system as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, check=False)
return output
## From bond
def get_bond_order_from_bond(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_bond_order_from_bond as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_bond_type_from_bond(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_bond_type_from_bond as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
def get_atom_index_from_bond(item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
from .to_molsysmt_Topology import to_molsysmt_Topology
from ..molsysmt_Topology import get_atom_index_from_bond as aux_get
tmp_item = to_molsysmt_Topology(item, check=False)
output = aux_get(tmp_item, indices=indices, check=False)
return output
#######################################################################################
######### DO NOT TOUCH THE FOLLOWING LINES, JUST INCLUDE THEM AS THEY ARE #############
#######################################################################################
from os import path
this_folder = path.dirname(path.abspath(__file__))
common_get = path.join(this_folder, '../../_private/common_get.py')
execfile(common_get, globals(), locals())
del(path, this_folder, common_get)
#######################################################################################
############## REMOVE COMMON GET METHODS NOT DEFINED FOR THIS FORM ####################
#######################################################################################
del(
# From atom
#get_atom_index_from_atom,
#get_group_id_from_atom,
#get_group_name_from_atom,
#get_group_type_from_atom,
#get_component_id_from_atom,
#get_component_name_from_atom,
#get_component_type_from_atom,
#get_chain_id_from_atom,
#get_chain_name_from_atom,
#get_chain_type_from_atom,
#get_molecule_id_from_atom,
#get_molecule_name_from_atom,
#get_molecule_type_from_atom,
#get_entity_id_from_atom,
#get_entity_name_from_atom,
#get_entity_type_from_atom,
#get_n_atoms_from_atom,
#get_n_groups_from_atom,
#get_n_components_from_atom,
#get_n_molecules_from_atom,
#get_n_chains_from_atom,
#get_n_entities_from_atom,
#get_bonded_atoms_from_atom,
#get_bond_index_from_atom,
#get_n_bonds_from_atom,
#get_inner_bond_index_from_atom,
# From group
#get_atom_index_from_group,
#get_atom_id_from_group,
#get_atom_name_from_group,
#get_atom_type_from_group,
#get_group_index_from_group,
#get_component_index_from_group,
#get_component_id_from_group,
#get_component_name_from_group,
#get_component_type_from_group,
#get_chain_index_from_group,
#get_chain_id_from_group,
#get_chain_name_from_group,
#get_chain_type_from_group,
#get_molecule_index_from_group,
#get_molecule_id_from_group,
#get_molecule_name_from_group,
#get_molecule_type_from_group,
#get_entity_index_from_group,
#get_entity_id_from_group,
#get_entity_name_from_group,
#get_entity_type_from_group,
#get_n_atoms_from_group,
#get_n_groups_from_group,
#get_n_components_from_group,
#get_n_molecules_from_group,
#get_n_chains_from_group,
#get_n_entities_from_group,
# From component
#get_atom_index_from_component,
#get_atom_id_from_component,
#get_atom_name_from_component,
#get_atom_type_from_component,
#get_group_index_from_component,
#get_group_id_from_component,
#get_group_name_from_component,
#get_group_type_from_component,
#get_component_index_from_component,
#get_chain_index_from_component,
#get_chain_id_from_component,
#get_chain_name_from_component,
#get_chain_type_from_component,
#get_molecule_index_from_component,
#get_molecule_id_from_component,
#get_molecule_name_from_component,
#get_molecule_type_from_component,
#get_entity_index_from_component,
#get_entity_id_from_component,
#get_entity_name_from_component,
#get_entity_type_from_component,
#get_n_atoms_from_component,
#get_n_groups_from_component,
#get_n_components_from_component,
#get_n_molecules_from_component,
#get_n_chains_from_component,
#get_n_entities_from_component,
# From molecule
#get_atom_index_from_molecule,
#get_atom_id_from_molecule,
#get_atom_name_from_molecule,
#get_atom_type_from_molecule,
#get_group_index_from_molecule,
#get_group_id_from_molecule,
#get_group_name_from_molecule,
#get_group_type_from_molecule,
#get_component_index_from_molecule,
#get_component_id_from_molecule,
#get_component_name_from_molecule,
#get_component_type_from_molecule,
#get_chain_index_from_molecule,
#get_chain_id_from_molecule,
#get_chain_name_from_molecule,
#get_chain_type_from_molecule,
#get_molecule_index_from_molecule,
#get_entity_index_from_molecule,
#get_entity_id_from_molecule,
#get_entity_name_from_molecule,
#get_entity_type_from_molecule,
#get_n_atoms_from_molecule,
#get_n_groups_from_molecule,
#get_n_components_from_molecule,
#get_n_molecules_from_molecule,
#get_n_chains_from_molecule,
#get_n_entities_from_molecule,
# From chain
#get_atom_index_from_chain,
#get_atom_id_from_chain,
#get_atom_name_from_chain,
#get_atom_type_from_chain,
#get_group_index_from_chain,
#get_group_id_from_chain,
#get_group_name_from_chain,
#get_group_type_from_chain,
#get_component_index_from_chain,
#get_component_id_from_chain,
#get_component_name_from_chain,
#get_component_type_from_chain,
#get_chain_index_from_chain,
#get_molecule_index_from_chain,
#get_molecule_id_from_chain,
#get_molecule_name_from_chain,
#get_molecule_type_from_chain,
#get_entity_index_from_chain,
#get_entity_id_from_chain,
#get_entity_name_from_chain,
#get_entity_type_from_chain,
#get_n_atoms_from_chain,
#get_n_groups_from_chain,
#get_n_components_from_chain,
#get_n_molecules_from_chain,
#get_n_chains_from_chain,
#get_n_entities_from_chain,
# From entity
#get_atom_index_from_entity,
#get_atom_id_from_entity,
#get_atom_name_from_entity,
#get_atom_type_from_entity,
#get_group_index_from_entity,
#get_group_id_from_entity,
#get_group_name_from_entity,
#get_group_type_from_entity,
#get_component_index_from_entity,
#get_component_id_from_entity,
#get_component_name_from_entity,
#get_component_type_from_entity,
#get_chain_index_from_entity,
#get_chain_id_from_entity,
#get_chain_name_from_entity,
#get_chain_type_from_entity,
#get_molecule_index_from_entity,
#get_molecule_id_from_entity,
#get_molecule_name_from_entity,
#get_molecule_type_from_entity,
#get_entity_index_from_entity,
#get_n_atoms_from_entity,
#get_n_groups_from_entity,
#get_n_components_from_entity,
#get_n_molecules_from_entity,
#get_n_chains_from_entity,
#get_n_entities_from_entity,
# From system
#get_n_aminoacids_from_system,
#get_n_nucleotides_from_system,
#get_n_ions_from_system,
#get_n_waters_from_system,
#get_n_cosolutes_from_system,
#get_n_small_molecules_from_system,
#get_n_peptides_from_system,
#get_n_proteins_from_system,
#get_n_dnas_from_system,
#get_n_rnas_from_system,
#get_n_lipids_from_system,
#get_coordinates_from_system,
#get_box_shape_from_system,
#get_box_lengths_from_system,
#get_box_angles_from_system,
#get_box_volume_from_system,
#get_bonded_atoms_from_system,
#get_bond_index_from_system,
#get_inner_bonded_atoms_from_system,
#get_inner_bond_index_from_system,
# From bond
#get_bond_index_from_bond,
#get_n_bonds_from_bond
)
| 29.019676 | 96 | 0.735931 |
3d141e0fca65fd5e413ac94969748a1535ee58a2 | 20,620 | py | Python | tests/integration_tests/test_chainermn.py | miyamamoto/optuna | 855f4ba5c6414eba20c03fed4bc9d4344979971b | [
"MIT"
] | 2 | 2020-05-04T11:26:02.000Z | 2020-07-18T12:59:58.000Z | tests/integration_tests/test_chainermn.py | miyamamoto/optuna | 855f4ba5c6414eba20c03fed4bc9d4344979971b | [
"MIT"
] | null | null | null | tests/integration_tests/test_chainermn.py | miyamamoto/optuna | 855f4ba5c6414eba20c03fed4bc9d4344979971b | [
"MIT"
] | null | null | null | import gc
import os
import pytest
from optuna import create_study
from optuna import distributions
from optuna import integration
from optuna.integration import ChainerMNStudy
from optuna import pruners
from optuna.storages import InMemoryStorage
from optuna.storages import RDBStorage
from optuna.structs import TrialPruned
from optuna.structs import TrialState
from optuna import Study
from optuna.testing.integration import DeterministicPruner
from optuna.testing.sampler import DeterministicRelativeSampler
from optuna.testing.storage import StorageSupplier
from optuna.trial import Trial
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from types import TracebackType # NOQA
from typing import Any # NOQA
from typing import Callable # NOQA
from typing import Dict # NOQA
from typing import Optional # NOQA
from typing import Type # NOQA
from optuna.integration.chainermn import ChainerMNTrial # NOQA
from optuna.pruners import BasePruner # NOQA
from optuna.samplers import BaseSampler # NOQA
from optuna.storages import BaseStorage # NOQA
try:
import chainermn
from chainermn.communicators.communicator_base import CommunicatorBase # NOQA
_available = True
except ImportError:
_available = False
STORAGE_MODES = ['new', 'common']
PRUNER_INIT_FUNCS = [lambda: pruners.MedianPruner(), lambda: pruners.SuccessiveHalvingPruner()]
if os.getenv('INCLUDE_SLOW_TESTS') is None:
CACHE_MODES = [True]
else:
CACHE_MODES = [True, False]
def setup_module():
# type: () -> None
StorageSupplier.setup_common_tempfile()
def teardown_module():
# type: () -> None
StorageSupplier.teardown_common_tempfile()
class Func(object):
def __init__(self):
# type: () -> None
self.suggested_values = {} # type: Dict[int, Dict[str, Any]]
def __call__(self, trial, comm):
# type: (ChainerMNTrial, CommunicatorBase) -> float
x = trial.suggest_uniform('x', -10, 10)
y = trial.suggest_loguniform('y', 20, 30)
z = trial.suggest_categorical('z', (-1.0, 1.0))
self.suggested_values[trial._trial_id] = {}
self.suggested_values[trial._trial_id]['x'] = x
self.suggested_values[trial._trial_id]['y'] = y
self.suggested_values[trial._trial_id]['z'] = z
return (x - 2)**2 + (y - 25)**2 + z
class MultiNodeStorageSupplier(StorageSupplier):
def __init__(self, storage_specifier, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
super(MultiNodeStorageSupplier, self).__init__(storage_specifier, cache_mode)
self.comm = comm
self.storage = None # type: Optional[RDBStorage]
def __enter__(self):
# type: () -> RDBStorage
if self.comm.rank == 0:
storage = super(MultiNodeStorageSupplier, self).__enter__()
assert isinstance(storage, RDBStorage)
url = str(storage.engine.url)
else:
url = 'dummy_url'
url = self.comm.mpi_comm.bcast(url)
self.storage = RDBStorage(url)
return self.storage
def __exit__(self, exc_type, exc_val, exc_tb):
# type: (Type[BaseException], BaseException, TracebackType) -> None
# Explicitly call storage's __del__ before sqlite tempfile is deleted.
del self.storage
gc.collect()
self.comm.mpi_comm.barrier()
if self.comm.rank == 0:
super(MultiNodeStorageSupplier, self).__exit__(exc_type, exc_val, exc_tb)
@pytest.fixture
def comm():
# type: () -> CommunicatorBase
if not _available:
pytest.skip('This test requires ChainerMN.')
return chainermn.create_communicator('naive')
class TestChainerMNStudy(object):
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_init(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_study = ChainerMNStudy(study, comm)
assert mn_study.study_name == study.study_name
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_init_with_multiple_study_names(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
TestChainerMNStudy._check_multi_node(comm)
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
# Create study_name for each rank.
name = create_study(storage).study_name
study = Study(name, storage)
with pytest.raises(ValueError):
ChainerMNStudy(study, comm)
@staticmethod
def test_init_with_incompatible_storage(comm):
# type: (CommunicatorBase) -> None
study = TestChainerMNStudy._create_shared_study(InMemoryStorage(), comm)
with pytest.raises(ValueError):
ChainerMNStudy(study, comm)
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_optimize(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_study = ChainerMNStudy(study, comm)
# Invoke optimize.
n_trials = 20
func = Func()
mn_study.optimize(func, n_trials=n_trials)
# Assert trial counts.
assert len(mn_study.trials) == n_trials
# Assert the same parameters have been suggested among all nodes.
for trial in mn_study.trials:
assert trial.params == func.suggested_values[trial.trial_id]
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
@pytest.mark.parametrize('pruner_init_func', PRUNER_INIT_FUNCS)
def test_pruning(storage_mode, cache_mode, pruner_init_func, comm):
# type: (str, bool, Callable[[], BasePruner], CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
pruner = pruner_init_func()
study = TestChainerMNStudy._create_shared_study(storage, comm, pruner=pruner)
mn_study = ChainerMNStudy(study, comm)
def objective(_trial, _comm):
# type: (ChainerMNTrial, bool) -> float
raise TrialPruned # Always be pruned.
# Invoke optimize.
n_trials = 20
mn_study.optimize(objective, n_trials=n_trials)
# Assert trial count.
assert len(mn_study.trials) == n_trials
# Assert pruned trial count.
pruned_trials = [t for t in mn_study.trials if t.state == TrialState.PRUNED]
assert len(pruned_trials) == n_trials
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_failure(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_study = ChainerMNStudy(study, comm)
def objective(_trial, _comm):
# type: (ChainerMNTrial, bool) -> float
raise ValueError # Always fails.
# Invoke optimize in which `ValueError` is accepted.
n_trials = 20
mn_study.optimize(objective, n_trials=n_trials, catch=(ValueError, ))
# Assert trial count.
assert len(mn_study.trials) == n_trials
# Assert failed trial count.
failed_trials = [t for t in mn_study.trials if t.state == TrialState.FAIL]
assert len(failed_trials) == n_trials
# Synchronize nodes before executing the next optimization.
comm.mpi_comm.barrier()
# Invoke optimize in which no exceptions are accepted.
with pytest.raises(ValueError):
mn_study.optimize(objective, n_trials=n_trials, catch=())
# Assert trial count.
assert len(mn_study.trials) == n_trials + 1
# Assert aborted trial count.
aborted_trials = [t for t in mn_study.trials if t.state == TrialState.RUNNING]
assert len(aborted_trials) == 1
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_relative_sampling(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
relative_search_space = {
'x': distributions.UniformDistribution(low=-10, high=10),
'y': distributions.LogUniformDistribution(low=20, high=30),
'z': distributions.CategoricalDistribution(choices=(-1.0, 1.0)),
}
relative_params = {'x': 1.0, 'y': 25.0, 'z': -1.0}
sampler = DeterministicRelativeSampler(relative_search_space, # type: ignore
relative_params)
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm, sampler=sampler)
mn_study = ChainerMNStudy(study, comm)
# Invoke optimize.
n_trials = 20
func = Func()
mn_study.optimize(func, n_trials=n_trials)
# Assert trial counts.
assert len(mn_study.trials) == n_trials
# Assert the parameters in `relative_params` have been suggested among all nodes.
for trial in mn_study.trials:
assert trial.params == relative_params
@staticmethod
def _create_shared_study(storage, comm, pruner=None, sampler=None):
# type: (BaseStorage, CommunicatorBase, BasePruner, BaseSampler) -> Study
name_local = create_study(storage).study_name if comm.rank == 0 else None
name_bcast = comm.mpi_comm.bcast(name_local)
return Study(name_bcast, storage, pruner=pruner, sampler=sampler)
@staticmethod
def _check_multi_node(comm):
# type: (CommunicatorBase) -> None
if comm.size < 2:
pytest.skip('This test is for multi-node only.')
class TestChainerMNTrial(object):
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_init(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_trial = _create_new_chainermn_trial(study, comm)
trial = study.trials[-1]
assert mn_trial.trial_id == trial.trial_id
assert mn_trial._trial_id == trial.trial_id
assert mn_trial.number == trial.number
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_suggest_uniform(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
low = 0.5
high = 1.0
for _ in range(10):
mn_trial = _create_new_chainermn_trial(study, comm)
x1 = mn_trial.suggest_uniform('x', low, high)
assert low <= x1 <= high
x2 = mn_trial.suggest_uniform('x', low, high)
assert x1 == x2
with pytest.raises(ValueError):
mn_trial.suggest_loguniform('x', low, high)
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_suggest_loguniform(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
low = 1e-7
high = 1e-2
for _ in range(10):
mn_trial = _create_new_chainermn_trial(study, comm)
x1 = mn_trial.suggest_loguniform('x', low, high)
assert low <= x1 <= high
x2 = mn_trial.suggest_loguniform('x', low, high)
assert x1 == x2
with pytest.raises(ValueError):
mn_trial.suggest_uniform('x', low, high)
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_suggest_discrete_uniform(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
low = 0.0
high = 10.0
q = 1.0
for _ in range(10):
mn_trial = _create_new_chainermn_trial(study, comm)
x1 = mn_trial.suggest_discrete_uniform('x', low, high, q)
assert low <= x1 <= high
x2 = mn_trial.suggest_discrete_uniform('x', low, high, q)
assert x1 == x2
with pytest.raises(ValueError):
mn_trial.suggest_uniform('x', low, high)
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_suggest_int(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
low = 0
high = 10
for _ in range(10):
mn_trial = _create_new_chainermn_trial(study, comm)
x1 = mn_trial.suggest_int('x', low, high)
assert low <= x1 <= high
x2 = mn_trial.suggest_int('x', low, high)
assert x1 == x2
with pytest.raises(ValueError):
mn_trial.suggest_uniform('x', low, high)
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_suggest_categorical(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
choices = ('a', 'b', 'c')
for _ in range(10):
mn_trial = _create_new_chainermn_trial(study, comm)
x1 = mn_trial.suggest_categorical('x', choices)
assert x1 in choices
x2 = mn_trial.suggest_categorical('x', choices)
assert x1 == x2
with pytest.raises(ValueError):
mn_trial.suggest_uniform('x', 0., 1.)
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
@pytest.mark.parametrize('is_pruning', [True, False])
def test_report_and_should_prune(storage_mode, cache_mode, comm, is_pruning):
# type: (str, bool, CommunicatorBase, bool) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm,
DeterministicPruner(is_pruning))
mn_trial = _create_new_chainermn_trial(study, comm)
mn_trial.report(1.0, 0)
assert mn_trial.should_prune(0) == is_pruning
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_params(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_trial = _create_new_chainermn_trial(study, comm)
x = mn_trial.suggest_categorical('x', [1])
assert mn_trial.params['x'] == x
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_distributions(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_trial = _create_new_chainermn_trial(study, comm)
mn_trial.suggest_categorical('x', [1])
assert mn_trial.distributions == {
'x': distributions.CategoricalDistribution(choices=(1, ))
}
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_user_attrs(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_trial = _create_new_chainermn_trial(study, comm)
mn_trial.set_user_attr('data', 'MNIST')
assert mn_trial.user_attrs['data'] == 'MNIST'
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_system_attrs(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_trial = _create_new_chainermn_trial(study, comm)
mn_trial.set_system_attr('system_message', 'test')
assert mn_trial.system_attrs['system_message'] == 'test'
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_call_with_mpi(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_trial = _create_new_chainermn_trial(study, comm)
with pytest.raises(RuntimeError):
def func():
# type: () -> None
raise RuntimeError
mn_trial._call_with_mpi(func)
@staticmethod
@pytest.mark.parametrize('storage_mode', STORAGE_MODES)
@pytest.mark.parametrize('cache_mode', CACHE_MODES)
def test_datetime_start(storage_mode, cache_mode, comm):
# type: (str, bool, CommunicatorBase) -> None
with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
study = TestChainerMNStudy._create_shared_study(storage, comm)
mn_trial = _create_new_chainermn_trial(study, comm)
assert mn_trial.datetime_start is not None
def _create_new_chainermn_trial(study, comm):
# type: (Study, CommunicatorBase) -> integration.chainermn.ChainerMNTrial
if comm.rank == 0:
trial_id = study._storage.create_new_trial(study.study_id)
trial = Trial(study, trial_id)
mn_trial = integration.chainermn.ChainerMNTrial(trial, comm)
else:
mn_trial = integration.chainermn.ChainerMNTrial(None, comm)
comm.mpi_comm.barrier()
return mn_trial
| 38.25603 | 95 | 0.652716 |
8d6e4161a99ed716b7819f25ea6d91c7f3bf2f53 | 25,401 | py | Python | nonebot/command/__init__.py | MilkiceForks/nonebot | c13173dd2a37c5edb7ed79eb3c69aa7fce7a8e1d | [
"MIT"
] | null | null | null | nonebot/command/__init__.py | MilkiceForks/nonebot | c13173dd2a37c5edb7ed79eb3c69aa7fce7a8e1d | [
"MIT"
] | null | null | null | nonebot/command/__init__.py | MilkiceForks/nonebot | c13173dd2a37c5edb7ed79eb3c69aa7fce7a8e1d | [
"MIT"
] | null | null | null | import asyncio
import re
import shlex
from datetime import datetime
from typing import (
Tuple, Union, Callable, Iterable, Any, Optional, List, Dict,
Awaitable
)
from nonebot import NoneBot, permission as perm
from nonebot.command.argfilter import ValidateError
from nonebot.helpers import context_id, send, render_expression
from nonebot.log import logger
from nonebot.message import Message
from nonebot.session import BaseSession
from nonebot.typing import (
Context_T, CommandName_T, CommandArgs_T, Message_T, State_T,
Filter_T
)
from typing import Pattern
# key: one segment of command name
# value: subtree or a leaf Command object
_registry = {} # type: Dict[str, Union[Dict, Command]]
# key: alias
# value: real command name
_aliases = {} # type: Dict[str, CommandName_T]
# key: context id
# value: CommandSession object
_sessions = {} # type: Dict[str, CommandSession]
# key: alias
# value: priority value
_priorities = {} # type: Dict[str, int]
CommandHandler_T = Callable[['CommandSession'], Any]
class Command:
__slots__ = ('name', 'func',
'permission',
'only_to_me',
'privileged',
'args_parser_func')
def __init__(self, *,
name: CommandName_T,
func: CommandHandler_T,
permission: int,
only_to_me: bool,
privileged: bool):
self.name = name
self.func = func
self.permission = permission
self.only_to_me = only_to_me
self.privileged = privileged
self.args_parser_func: Optional[CommandHandler_T] = None
async def run(self, session, *,
check_perm: bool = True,
dry: bool = False) -> bool:
"""
Run the command in a given session.
:param session: CommandSession object
:param check_perm: should check permission before running
:param dry: just check any prerequisite, without actually running
:return: the command is finished (or can be run, given dry == True)
"""
has_perm = await self._check_perm(session) if check_perm else True
if self.func and has_perm:
if dry:
return True
if session.current_arg_filters is not None and \
session.current_key is not None:
# argument-level filters are given, use them
arg = session.current_arg
config = session.bot.config
for f in session.current_arg_filters:
try:
res = f(arg)
if isinstance(res, Awaitable):
res = await res
arg = res
except ValidateError as e:
# validation failed
if config.MAX_VALIDATION_FAILURES > 0:
# should check number of validation failures
session.state['__validation_failure_num'] = \
session.state.get(
'__validation_failure_num', 0) + 1
if session.state['__validation_failure_num'] >= \
config.MAX_VALIDATION_FAILURES:
# noinspection PyProtectedMember
session.finish(render_expression(
config.TOO_MANY_VALIDATION_FAILURES_EXPRESSION
), **session._current_send_kwargs)
failure_message = e.message
if failure_message is None:
failure_message = render_expression(
config.DEFAULT_VALIDATION_FAILURE_EXPRESSION
)
# noinspection PyProtectedMember
session.pause(failure_message,
**session._current_send_kwargs)
# passed all filters
session.state[session.current_key] = arg
else:
# fallback to command-level args_parser_func
if self.args_parser_func:
await self.args_parser_func(session)
if session.current_key is not None and \
session.current_key not in session.state:
# args_parser_func didn't set state, here we set it
session.state[session.current_key] = session.current_arg
await self.func(session)
return True
return False
async def _check_perm(self, session) -> bool:
"""
Check if the session has sufficient permission to
call the command.
:param session: CommandSession object
:return: the session has the permission
"""
return await perm.check_permission(session.bot, session.ctx,
self.permission)
class CommandFunc:
__slots__ = ('cmd', 'func')
def __init__(self, cmd: Command, func: CommandHandler_T):
self.cmd = cmd
self.func = func
def __call__(self, session: 'CommandSession') -> Any:
return self.func(session)
def args_parser(self, parser_func: CommandHandler_T) -> CommandHandler_T:
"""
Decorator to register a function as the arguments parser of
the corresponding command.
"""
self.cmd.args_parser_func = parser_func
return parser_func
def on_command(name: Union[str, CommandName_T], *,
aliases: Iterable[str] = (),
priority = 10,
permission: int = perm.EVERYBODY,
only_to_me: bool = True,
privileged: bool = False,
shell_like: bool = False) -> Callable:
"""
Decorator to register a function as a command.
:param name: command name (e.g. 'echo' or ('random', 'number'))
:param aliases: aliases of command name, for convenient access
:param permission: permission required by the command
:param only_to_me: only handle messages to me
:param privileged: can be run even when there is already a session
:param shell_like: use shell-like syntax to split arguments
"""
def deco(func: CommandHandler_T) -> CommandHandler_T:
if not isinstance(name, (str, tuple)):
raise TypeError('the name of a command must be a str or tuple')
if not name:
raise ValueError('the name of a command must not be empty')
cmd_name = (name,) if isinstance(name, str) else name
cmd = Command(name=cmd_name, func=func, permission=permission,
only_to_me=only_to_me, privileged=privileged)
if shell_like:
async def shell_like_args_parser(session):
session.args['argv'] = shlex.split(session.current_arg)
cmd.args_parser_func = shell_like_args_parser
current_parent = _registry
for parent_key in cmd_name[:-1]:
current_parent[parent_key] = current_parent.get(parent_key) or {}
current_parent = current_parent[parent_key]
current_parent[cmd_name[-1]] = cmd
for alias in aliases:
_aliases[alias] = cmd_name
_priorities[alias] = priority
return CommandFunc(cmd, func)
return deco
def _find_command(name: Union[str, CommandName_T]) -> Optional[Command]:
cmd_name = (name,) if isinstance(name, str) else name
if not cmd_name:
return None
cmd_tree = _registry
for part in cmd_name[:-1]:
if part not in cmd_tree or not isinstance(cmd_tree[part], dict):
return None
cmd_tree = cmd_tree[part]
cmd = cmd_tree.get(cmd_name[-1])
return cmd if isinstance(cmd, Command) else None
class _PauseException(Exception):
"""
Raised by session.pause() indicating that the command session
should be paused to ask the user for some arguments.
"""
pass
class _FinishException(Exception):
"""
Raised by session.finish() indicating that the command session
should be stopped and removed.
"""
def __init__(self, result: bool = True):
"""
:param result: succeeded to call the command
"""
self.result = result
class SwitchException(Exception):
"""
Raised by session.switch() indicating that the command session
should be stopped and replaced with a new one (going through
handle_message() again).
Since the new context message will go through handle_message()
again, the later function should be notified. So this exception
is designed to be propagated to handle_message().
"""
def __init__(self, new_ctx_message: Message):
"""
:param new_ctx_message: new message which should be placed in context
"""
self.new_ctx_message = new_ctx_message
class CommandSession(BaseSession):
__slots__ = ('cmd',
'current_key', 'current_arg_filters', '_current_send_kwargs',
'current_arg', '_current_arg_text', '_current_arg_images',
'_state', '_last_interaction', '_running')
def __init__(self, bot: NoneBot, ctx: Context_T, cmd: Command, *,
current_arg: str = '', args: Optional[CommandArgs_T] = None):
super().__init__(bot, ctx)
self.cmd = cmd # Command object
# unique key of the argument that is currently requesting (asking)
self.current_key: Optional[str] = None
# initialize current argument filters
self.current_arg_filters: Optional[List[Filter_T]] = None
self._current_send_kwargs: Dict[str, Any] = {}
# initialize current argument
self.current_arg: str = '' # with potential CQ codes
self._current_arg_text = None
self._current_arg_images = None
self.refresh(ctx, current_arg=current_arg) # fill the above
self._state: State_T = {}
if args:
self._state.update(args)
self._last_interaction = None # last interaction time of this session
self._running = False
@property
def state(self) -> State_T:
"""
State of the session.
This contains all named arguments and
other session scope temporary values.
"""
return self._state
@property
def args(self) -> CommandArgs_T:
"""Deprecated. Use `session.state` instead."""
return self.state
@property
def running(self) -> bool:
return self._running
@running.setter
def running(self, value) -> None:
if self._running is True and value is False:
# change status from running to not running, record the time
self._last_interaction = datetime.now()
self._running = value
@property
def is_valid(self) -> bool:
"""Check if the session is expired or not."""
if self.bot.config.SESSION_EXPIRE_TIMEOUT and \
self._last_interaction and \
datetime.now() - self._last_interaction > \
self.bot.config.SESSION_EXPIRE_TIMEOUT:
return False
return True
@property
def is_first_run(self) -> bool:
return self._last_interaction is None
@property
def current_arg_text(self) -> str:
"""
Plain text part in the current argument, without any CQ codes.
"""
if self._current_arg_text is None:
self._current_arg_text = Message(
self.current_arg).extract_plain_text()
return self._current_arg_text
@property
def current_arg_images(self) -> List[str]:
"""
Images (as list of urls) in the current argument.
"""
if self._current_arg_images is None:
self._current_arg_images = [
s.data['url'] for s in Message(self.current_arg)
if s.type == 'image' and 'url' in s.data
]
return self._current_arg_images
@property
def argv(self) -> List[str]:
"""
Shell-like argument list, similar to sys.argv.
Only available while shell_like is True in on_command decorator.
"""
return self.state.get('argv', [])
def refresh(self, ctx: Context_T, *, current_arg: str = '') -> None:
"""
Refill the session with a new message context.
:param ctx: new message context
:param current_arg: new command argument as a string
"""
self.ctx = ctx
self.current_arg = current_arg
self._current_arg_text = None
self._current_arg_images = None
def get(self, key: str, *,
prompt: Optional[Message_T] = None,
arg_filters: Optional[List[Filter_T]] = None,
**kwargs) -> Any:
"""
Get an argument with a given key.
If the argument does not exist in the current session,
a pause exception will be raised, and the caller of
the command will know it should keep the session for
further interaction with the user.
:param key: argument key
:param prompt: prompt to ask the user
:param arg_filters: argument filters for the next user input
:return: the argument value
"""
if key in self.state:
return self.state[key]
self.current_key = key
self.current_arg_filters = arg_filters
self._current_send_kwargs = kwargs
self.pause(prompt, **kwargs)
def get_optional(self, key: str,
default: Optional[Any] = None) -> Optional[Any]:
"""
Simply get a argument with given key.
Deprecated. Use `session.state.get()` instead.
"""
return self.state.get(key, default)
def pause(self, message: Optional[Message_T] = None, **kwargs) -> None:
"""Pause the session for further interaction."""
if message:
asyncio.ensure_future(self.send(message, **kwargs))
raise _PauseException
def finish(self, message: Optional[Message_T] = None, **kwargs) -> None:
"""Finish the session."""
if message:
asyncio.ensure_future(self.send(message, **kwargs))
raise _FinishException
def switch(self, new_ctx_message: Message_T) -> None:
"""
Finish the session and switch to a new (fake) message context.
The user may send another command (or another intention as natural
language) when interacting with the current session. In this case,
the session may not understand what the user is saying, so it
should call this method and pass in that message, then NoneBot will
handle the situation properly.
"""
if self.is_first_run:
# if calling this method during first run,
# we think the command is not handled
raise _FinishException(result=False)
if not isinstance(new_ctx_message, Message):
new_ctx_message = Message(new_ctx_message)
raise SwitchException(new_ctx_message)
def parse_command(bot: NoneBot,
cmd_string: str) -> Tuple[Optional[Command], Optional[str]]:
"""
Parse a command string (typically from a message).
:param bot: NoneBot instance
:param cmd_string: command string
:return: (Command object, current arg string)
"""
logger.debug(f'Parsing command: {cmd_string}')
matched_start = None
for start in bot.config.COMMAND_START:
# loop through COMMAND_START to find the longest matched start
curr_matched_start = None
if isinstance(start, type(re.compile(''))):
m = start.search(cmd_string)
if m and m.start(0) == 0:
curr_matched_start = m.group(0)
elif isinstance(start, str):
if cmd_string.startswith(start):
curr_matched_start = start
if curr_matched_start is not None and \
(matched_start is None or
len(curr_matched_start) > len(matched_start)):
# a longer start, use it
matched_start = curr_matched_start
if matched_start is None:
# it's not a command
logger.debug('It\'s not a command')
return None, None
logger.debug(f'Matched command start: '
f'{matched_start}{"(empty)" if not matched_start else ""}')
full_command = cmd_string[len(matched_start):].lstrip()
if not full_command:
# command is empty
return None, None
cmd_name_text, *cmd_remained = full_command.split(maxsplit=1)
cmd_name = None
cmd_priority = None
for alias, name in _aliases.items():
if isinstance(alias, Pattern) and alias.match(cmd_name_text):
if not cmd_priority or _priorities[alias] > cmd_priority:
cmd_name = name
cmd_priority = _priorities[alias]
elif isinstance(alias, str) and alias == cmd_name_text:
if not cmd_priority or _priorities[alias] > cmd_priority:
cmd_name = name
cmd_priority = _priorities[alias]
if not cmd_name:
for sep in bot.config.COMMAND_SEP:
# loop through COMMAND_SEP to find the most optimized split
curr_cmd_name = None
if isinstance(sep, type(re.compile(''))):
curr_cmd_name = tuple(sep.split(cmd_name_text))
elif isinstance(sep, str):
curr_cmd_name = tuple(cmd_name_text.split(sep))
if curr_cmd_name is not None and \
(not cmd_name or len(curr_cmd_name) > len(cmd_name)):
# a more optimized split, use it
cmd_name = curr_cmd_name
if not cmd_name:
cmd_name = (cmd_name_text,)
logger.debug(f'Split command name: {cmd_name}')
cmd = _find_command(cmd_name)
if not cmd:
logger.debug(f'Command {cmd_name} not found')
return None, None
logger.debug(f'Command {cmd.name} found, function: {cmd.func}')
return cmd, ''.join(cmd_remained)
async def handle_command(bot: NoneBot, ctx: Context_T) -> bool:
"""
Handle a message as a command.
This function is typically called by "handle_message".
:param bot: NoneBot instance
:param ctx: message context
:return: the message is handled as a command
"""
cmd, current_arg = parse_command(bot, str(ctx['message']).lstrip())
is_privileged_cmd = cmd and cmd.privileged
if is_privileged_cmd and cmd.only_to_me and not ctx['to_me']:
is_privileged_cmd = False
disable_interaction = is_privileged_cmd
if is_privileged_cmd:
logger.debug(f'Command {cmd.name} is a privileged command')
ctx_id = context_id(ctx)
if not is_privileged_cmd:
# wait for 1.5 seconds (at most) if the current session is running
retry = 5
while retry > 0 and \
_sessions.get(ctx_id) and _sessions[ctx_id].running:
retry -= 1
await asyncio.sleep(0.3)
check_perm = True
session = _sessions.get(ctx_id) if not is_privileged_cmd else None
if session:
if session.running:
logger.warning(f'There is a session of command '
f'{session.cmd.name} running, notify the user')
asyncio.ensure_future(send(
bot, ctx,
render_expression(bot.config.SESSION_RUNNING_EXPRESSION)
))
# pretend we are successful, so that NLP won't handle it
return True
if session.is_valid:
logger.debug(f'Session of command {session.cmd.name} exists')
# since it's in a session, the user must be talking to me
ctx['to_me'] = True
session.refresh(ctx, current_arg=str(ctx['message']))
# there is no need to check permission for existing session
check_perm = False
else:
# the session is expired, remove it
logger.debug(f'Session of command {session.cmd.name} is expired')
if ctx_id in _sessions:
del _sessions[ctx_id]
session = None
if not session:
if not cmd:
logger.debug('Not a known command, ignored')
return False
if cmd.only_to_me and not ctx['to_me']:
logger.debug('Not to me, ignored')
return False
session = CommandSession(bot, ctx, cmd, current_arg=current_arg)
logger.debug(f'New session of command {session.cmd.name} created')
return await _real_run_command(session, ctx_id, check_perm=check_perm,
disable_interaction=disable_interaction)
async def call_command(bot: NoneBot, ctx: Context_T,
name: Union[str, CommandName_T], *,
current_arg: str = '',
args: Optional[CommandArgs_T] = None,
check_perm: bool = True,
disable_interaction: bool = False) -> bool:
"""
Call a command internally.
This function is typically called by some other commands
or "handle_natural_language" when handling NLPResult object.
Note: If disable_interaction is not True, after calling this function,
any previous command session will be overridden, even if the command
being called here does not need further interaction (a.k.a asking
the user for more info).
:param bot: NoneBot instance
:param ctx: message context
:param name: command name
:param current_arg: command current argument string
:param args: command args
:param check_perm: should check permission before running command
:param disable_interaction: disable the command's further interaction
:return: the command is successfully called
"""
cmd = _find_command(name)
if not cmd:
return False
session = CommandSession(bot, ctx, cmd, current_arg=current_arg, args=args)
return await _real_run_command(session, context_id(session.ctx),
check_perm=check_perm,
disable_interaction=disable_interaction)
async def _real_run_command(session: CommandSession,
ctx_id: str,
disable_interaction: bool = False,
**kwargs) -> bool:
if not disable_interaction:
# override session only when interaction is not disabled
_sessions[ctx_id] = session
try:
logger.debug(f'Running command {session.cmd.name}')
session.running = True
future = asyncio.ensure_future(session.cmd.run(session, **kwargs))
timeout = None
if session.bot.config.SESSION_RUN_TIMEOUT:
timeout = session.bot.config.SESSION_RUN_TIMEOUT.total_seconds()
try:
await asyncio.wait_for(future, timeout)
handled = future.result()
except asyncio.TimeoutError:
handled = True
except (_PauseException, _FinishException, SwitchException) as e:
raise e
except Exception as e:
logger.error(f'An exception occurred while '
f'running command {session.cmd.name}:')
logger.exception(e)
handled = True
raise _FinishException(handled)
except _PauseException:
session.running = False
if disable_interaction:
# if the command needs further interaction, we view it as failed
return False
logger.debug(f'Further interaction needed for '
f'command {session.cmd.name}')
# return True because this step of the session is successful
return True
except (_FinishException, SwitchException) as e:
session.running = False
logger.debug(f'Session of command {session.cmd.name} finished')
if not disable_interaction and ctx_id in _sessions:
# the command is finished, remove the session,
# but if interaction is disabled during this command call,
# we leave the _sessions untouched.
del _sessions[ctx_id]
if isinstance(e, _FinishException):
return e.result
elif isinstance(e, SwitchException):
# we are guaranteed that the session is not first run here,
# which means interaction is definitely enabled,
# so we can safely touch _sessions here.
if ctx_id in _sessions:
# make sure there is no session waiting
del _sessions[ctx_id]
logger.debug(f'Session of command {session.cmd.name} switching, '
f'new context message: {e.new_ctx_message}')
raise e # this is intended to be propagated to handle_message()
def kill_current_session(ctx: Context_T) -> None:
"""
Force kill current session of the given context,
despite whether it is running or not.
:param ctx: message context
"""
ctx_id = context_id(ctx)
if ctx_id in _sessions:
del _sessions[ctx_id]
from nonebot.command.group import CommandGroup
| 36.339056 | 82 | 0.606512 |
5676c868fa4ac9207478c95d249379bd3b01a36d | 3,955 | py | Python | pystachio_smt/simulation.py | ejh516/pystachio-smt | 1a2f11140830b871963d47b94995268e84e5c1ec | [
"MIT"
] | 8 | 2021-05-05T12:17:21.000Z | 2022-03-04T14:38:36.000Z | pystachio_smt/simulation.py | awollman/pystachio-smt | 37f87e0a4b562c1465ab90d26a77998e8dd340fb | [
"MIT"
] | 1 | 2021-05-05T12:14:01.000Z | 2021-05-10T12:33:06.000Z | pystachio_smt/simulation.py | awollman/pystachio-smt | 37f87e0a4b562c1465ab90d26a77998e8dd340fb | [
"MIT"
] | 2 | 2021-08-18T19:33:05.000Z | 2022-03-21T10:55:52.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 Edward Higgins <ed.higgins@york.ac.uk>
#
# Distributed under terms of the MIT license.
""" SIMULATION - Dataset simulation module
Description:
simulation.py contains the code for the simulation task, which simulates
pseudo-experimental datasets as characterised by the relevant parameters.
Contains:
function simulate
Author:
Edward Higgins
Version: 0.2.0
"""
from functools import reduce
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as random
import sys
from images import ImageData
from spots import Spots
import trajectories
def simulate(params):
if params.num_frames < 1:
sys.exit("ERROR: Cannot simulate image with num_frames < 0")
# Make a spot array the same size as normal
real_spots = [Spots(params.num_spots) for i in range(params.num_frames)]
if params.max_spot_molecules == 1:
n_mols = np.array([1] * params.num_spots)
else:
n_mols = np.array(random.randint(1, params.max_spot_molecules, params.num_spots))
n_mols_fractional_intensity = np.zeros(n_mols.shape)
# initialise the spot co-ords
real_spots[0].positions[:, 0] = random.rand(params.num_spots) * params.frame_size[0]
real_spots[0].positions[:, 1] = random.rand(params.num_spots) * params.frame_size[1]
real_spots[0].spot_intensity[:] = params.I_single
real_spots[0].frame = 1
# Simulate diffusion
S = np.sqrt(2 * params.diffusion_coeff * params.frame_time) / params.pixel_size
frame_start = 0
frame_end = params.num_spots
for frame in range(1, params.num_frames):
real_spots[frame].frame = frame
real_spots[frame].spot_intensity[:] = params.I_single * (n_mols+n_mols_fractional_intensity)
real_spots[frame].traj_num = real_spots[frame - 1].traj_num[:]
real_spots[frame].positions = random.normal(
real_spots[frame - 1].positions, S, (params.num_spots, 2)
)
# Photobleah some spots
n_mols_fractional_intensity[:] = 0
for i in range(params.num_spots):
if n_mols[i] > 0:
for j in range(n_mols[i]):
if random.rand() < params.p_bleach_per_frame:
#How far into next frame does this one last?
frac = random.rand()
n_mols_fractional_intensity += frac
n_mols[i] -= 1
# Simulate the image stack and save
image = ImageData()
image.initialise(params.num_frames, params.frame_size)
x_pos, y_pos = np.meshgrid(range(params.frame_size[0]), range(params.frame_size[1]))
for frame in range(params.num_frames):
frame_data = np.zeros([params.frame_size[1], params.frame_size[0]]).astype(np.uint16)
for spot in range(params.num_spots):
spot_data = (
(real_spots[frame].spot_intensity[spot] / (2 * np.pi * params.spot_width**2))
* np.exp(
-(
(x_pos - real_spots[frame].positions[spot, 0]) ** 2
+ (y_pos - real_spots[frame].positions[spot, 1]) ** 2
)
/ (2 * params.spot_width ** 2)
)
).astype(np.uint16)
frame_data += spot_data
real_spots[frame].spot_intensity[spot]=np.sum(spot_data)
frame_data = random.poisson(frame_data)
bg_noise = random.normal(params.bg_mean, params.bg_std, [params.frame_size[1], params.frame_size[0]])
frame_data += np.where(bg_noise > 0, bg_noise.astype(np.uint16), 0)
image[frame] = frame_data
real_trajs = trajectories.build_trajectories(real_spots, params)
image.write(params.name + ".tif")
trajectories.write_trajectories(real_trajs, params.name + '_simulated.tsv')
return image, real_trajs
| 35.630631 | 109 | 0.642478 |
31b533efe29437aebb403192cc4c2b8c32cbf3fd | 765 | py | Python | Lib/site-packages/nbformat/v4/__init__.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/nbformat/v4/__init__.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/nbformat/v4/__init__.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | """The main API for the v4 notebook format."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
__all__ = [
"nbformat",
"nbformat_minor",
"nbformat_schema",
"new_code_cell",
"new_markdown_cell",
"new_notebook",
"new_output",
"output_from_msg",
"reads",
"writes",
"to_notebook",
"downgrade",
"upgrade",
]
from .nbbase import (
nbformat,
nbformat_minor,
nbformat_schema,
new_code_cell,
new_markdown_cell,
new_notebook,
new_output,
new_raw_cell,
output_from_msg,
)
from .nbjson import reads, to_notebook, writes
reads_json = reads
writes_json = writes
to_notebook_json = to_notebook
from .convert import downgrade, upgrade
| 19.125 | 58 | 0.682353 |
d5a099526c96afe31c17dc2cb24200379e605be6 | 2,163 | py | Python | src/glauber.py | AdrianSosic/StarCTBN | 9e0f24ed28053555a7b08cc4e9944745c9c6b74c | [
"MIT"
] | 1 | 2021-06-23T03:03:30.000Z | 2021-06-23T03:03:30.000Z | src/glauber.py | AdrianSosic/StarCTBN | 9e0f24ed28053555a7b08cc4e9944745c9c6b74c | [
"MIT"
] | null | null | null | src/glauber.py | AdrianSosic/StarCTBN | 9e0f24ed28053555a7b08cc4e9944745c9c6b74c | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from potts import Potts_CTBN
class Glauber_CTBN(Potts_CTBN):
"""CTBN with Glauber dynamics."""
def __init__(self, **kwargs):
"""
Parameters
----------
beta : float
Inverse Glauber temperature.
tau: float
Glauber rate scale.
"""
Potts_CTBN.__init__(self, n_states=2, **kwargs)
def crm_stats(self, stats):
# implements method of CTBN
return glauber_crm(int(stats), self.beta, self.tau)
@staticmethod
def set2stats(states):
# implements method of CTBN
return 2 * np.sum(states) - np.size(states)
@staticmethod
def stats_values(n_nodes):
# implements method of CTBN
return np.arange(-n_nodes, n_nodes + 1, 2)[:, None]
@staticmethod
def stats2inds(n_nodes, stats):
# implements method of CTBN
return ((stats + n_nodes) / 2).astype(int)
def glauber_crm(sum_of_spins, beta, tau):
"""
Computes the conditional rate matrix of a node based on the parent spins using Glauber dynamics.
Parameters
----------
sum_of_spins : int
Sum of parent spins (down spin = -1, up spin = 1).
beta : float
Inverse Glauber temperature
tau : float
Glauber rate scale.
Returns
-------
out : 2-D array, shape: (S, S)
Conditional rate matrix.
"""
tan = np.tanh(beta * sum_of_spins)
rate_up = 0.5 * (1 + tan)
rate_down = 0.5 * (1 - tan)
return tau * np.array([[-rate_down, rate_down], [rate_up, -rate_up]])
if __name__ == '__main__':
# network size
N = 4
# number of observations
n_obs = 10
# CTBN parameters
ctbn_params = dict(
adjacency=np.ones((N, N))-np.eye(N),
beta=1,
tau=1,
T=10,
obs_std=0.1,
)
# generate and simulate Glauber network
ctbn = Glauber_CTBN(**ctbn_params)
ctbn.simulate()
ctbn.emit(n_obs)
ctbn.plot_trajectory(kind='line'), plt.show()
# inference
ctbn.update_rho()
ctbn.update_Q()
ctbn.plot_trajectory(kind='line'), plt.show()
| 23.010638 | 100 | 0.590846 |
21b37bc665204875af5bf23733782a5e969c4bf2 | 5,166 | py | Python | app/user/tests/test_user.py | ashok-kavungal/python-recipieAPI | bc509250754f9cd45509b8663338b59a237fea21 | [
"MIT"
] | 1 | 2020-07-07T09:01:31.000Z | 2020-07-07T09:01:31.000Z | app/user/tests/test_user.py | ashok-kavungal/python-recipieAPI | bc509250754f9cd45509b8663338b59a237fea21 | [
"MIT"
] | null | null | null | app/user/tests/test_user.py | ashok-kavungal/python-recipieAPI | bc509250754f9cd45509b8663338b59a237fea21 | [
"MIT"
] | 1 | 2020-10-29T08:05:02.000Z | 2020-10-29T08:05:02.000Z | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me') #give acces to profile for already authenicated user
def create_user(**params):
"""function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating using with a valid payload is successful"""
payload = {
'email': 'test@ashok.com',
'password': 'test123',
'name': 'ashok',
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(
user.check_password(payload['password'])
)
self.assertNotIn('password', res.data) ##make sure password is not send via url
def test_user_exists(self):
"""Test fails if the payload contains existing user data"""
payload = {'email': 'test@gmail.com', 'password': '123open'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that password must be more than 5 characters"""
payload = {'email': 'test@ashok.com', 'password': 'pw'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists
)
def test_create_token_for_user(self):
"""Test that a token is created for the user with valid payload post req"""
payload = {'email': 'test@ashok.com', 'password': 'test123'}
create_user(**payload) #creates user first
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)#if token generated, token object will be found
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_token_not_generated_invalidpayload(self):
"""Test that token is not created if invalid payload is passed"""
create_user(email='test@123.com', password='open123')
payload = {'email': 'test@123.com', 'password': 'invalidpassword'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doens't exist"""
payload = {'email': 'ashok@123.com', 'password': 'openit'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_check_user_unauthorized(self):
"""Test for unaauthenticated user cant acces using /me url"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='test@gmail.com',
password='openit',
name='mathewmaraddock',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)#force authenticate the client using payload
def test_access_profile_success(self):
"""Test access profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me URL"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'peter', 'password': 'unlock17a'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()#checks change is updated in db
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 36.9 | 98 | 0.66144 |
57b41facee42e1849e183f9bb3265e254c4b0aae | 8,015 | py | Python | tests/integration/scheduler/test_error.py | yosnoop/salt | e0ab000ffa3dd197408f08e0b37bf54a87d04090 | [
"Apache-2.0"
] | 1 | 2016-08-21T21:19:12.000Z | 2016-08-21T21:19:12.000Z | tests/integration/scheduler/test_error.py | yosnoop/salt | e0ab000ffa3dd197408f08e0b37bf54a87d04090 | [
"Apache-2.0"
] | 2 | 2019-03-06T20:43:44.000Z | 2019-04-10T23:56:02.000Z | tests/integration/scheduler/test_error.py | yosnoop/salt | e0ab000ffa3dd197408f08e0b37bf54a87d04090 | [
"Apache-2.0"
] | 1 | 2020-04-10T20:18:40.000Z | 2020-04-10T20:18:40.000Z | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import copy
import logging
import os
import dateutil.parser as dateutil_parser
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt Testing Libs
from tests.support.mock import MagicMock, patch
from tests.support.unit import skipIf
import tests.integration as integration
# Import Salt libs
import salt.utils.schedule
from salt.modules.test import ping as ping
try:
import croniter # pylint: disable=W0611
HAS_CRONITER = True
except ImportError:
HAS_CRONITER = False
log = logging.getLogger(__name__)
ROOT_DIR = os.path.join(integration.TMP, 'schedule-unit-tests')
SOCK_DIR = os.path.join(ROOT_DIR, 'test-socks')
DEFAULT_CONFIG = salt.config.minion_config(None)
DEFAULT_CONFIG['conf_dir'] = ROOT_DIR
DEFAULT_CONFIG['root_dir'] = ROOT_DIR
DEFAULT_CONFIG['sock_dir'] = SOCK_DIR
DEFAULT_CONFIG['pki_dir'] = os.path.join(ROOT_DIR, 'pki')
DEFAULT_CONFIG['cachedir'] = os.path.join(ROOT_DIR, 'cache')
class SchedulerErrorTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the pkg module
'''
def setUp(self):
with patch('salt.utils.schedule.clean_proc_dir', MagicMock(return_value=None)):
functions = {'test.ping': ping}
self.schedule = salt.utils.schedule.Schedule(copy.deepcopy(DEFAULT_CONFIG), functions, returners={})
self.schedule.opts['loop_interval'] = 1
self.schedule.opts['grains']['whens'] = {'tea time': '11/29/2017 12:00pm'}
def tearDown(self):
self.schedule.reset()
@skipIf(not HAS_CRONITER, 'Cannot find croniter python module')
def test_eval_cron_invalid(self):
'''
verify that scheduled job runs
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'cron': '0 16 29 13 *'
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
with patch('croniter.croniter.get_next', MagicMock(return_value=run_time)):
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'],
'Invalid cron string. Ignoring job job1.')
def test_eval_when_invalid_date(self):
'''
verify that scheduled job does not run
and returns the right error
'''
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': '13/29/2017 1:00pm',
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second before the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'],
'Invalid date string 13/29/2017 1:00pm. Ignoring job job1.')
def test_eval_whens_grain_not_dict(self):
'''
verify that scheduled job does not run
and returns the right error
'''
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'when': 'tea time',
}
}
}
self.schedule.opts['grains']['whens'] = ['tea time']
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second before the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
self.assertEqual(ret['_error'],
'Grain "whens" must be a dict. Ignoring job job1.')
def test_eval_once_invalid_datestring(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'once': '2017-13-13T13:00:00',
}
}
}
run_time = dateutil_parser.parse('12/13/2017 1:00pm')
# Add the job to the scheduler
self.schedule.opts.update(job)
# Evaluate 1 second at the run time
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('Date string could not be parsed: '
'2017-13-13T13:00:00, %Y-%m-%dT%H:%M:%S. '
'Ignoring job job1.')
self.assertEqual(ret['_error'], _expected)
def test_eval_skip_during_range_invalid_date(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'hours': 1,
'skip_during_range': {'start': '1:00pm', 'end': '25:00pm'}
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# eval at 3:00pm to prime, simulate minion start up.
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
# eval at 4:00pm to prime
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job job1.')
self.assertEqual(ret['_error'], _expected)
def test_eval_skip_during_range_end_before_start(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'hours': 1,
'skip_during_range': {'start': '1:00pm', 'end': '12:00pm'}
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# eval at 3:00pm to prime, simulate minion start up.
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
# eval at 4:00pm to prime
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job job1.')
self.assertEqual(ret['_error'], _expected)
def test_eval_skip_during_range_not_dict(self):
'''
verify that scheduled job does not run
and returns the right error
'''
job = {
'schedule': {
'job1': {
'function': 'test.ping',
'hours': 1,
'skip_during_range': ['start', '1:00pm', 'end', '12:00pm']
}
}
}
# Add the job to the scheduler
self.schedule.opts.update(job)
# eval at 3:00pm to prime, simulate minion start up.
run_time = dateutil_parser.parse('11/29/2017 3:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
# eval at 4:00pm to prime
run_time = dateutil_parser.parse('11/29/2017 4:00pm')
self.schedule.eval(now=run_time)
ret = self.schedule.job_status('job1')
_expected = ('schedule.handle_func: Invalid, '
'range must be specified as a '
'dictionary. Ignoring job job1.')
self.assertEqual(ret['_error'], _expected)
| 30.591603 | 112 | 0.571678 |
b80bed2535b1b8cbbfdd4f7f0e0628d0ab2cccb9 | 6,878 | py | Python | src/hanlendar/gui/widget/noteswidget.py | anetczuk/hanlendar | 940f2ca52f9d2bb601ffd1fa0332c6e31aaf854f | [
"MIT"
] | null | null | null | src/hanlendar/gui/widget/noteswidget.py | anetczuk/hanlendar | 940f2ca52f9d2bb601ffd1fa0332c6e31aaf854f | [
"MIT"
] | null | null | null | src/hanlendar/gui/widget/noteswidget.py | anetczuk/hanlendar | 940f2ca52f9d2bb601ffd1fa0332c6e31aaf854f | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2020 Arkadiusz Netczuk <dev.arnet@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
# from datetime import datetime
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QTextEdit, QMenu, QInputDialog
from PyQt5.QtWidgets import QLineEdit
from .. import uiloader
UiTargetClass, QtBaseClass = uiloader.load_ui_from_class_name( __file__ )
_LOGGER = logging.getLogger(__name__)
NOTES_BG_COLOR = "#f7ec9d"
class SinglePageWidget( QWidget ):
contentChanged = pyqtSignal()
createToDo = pyqtSignal( str )
def __init__(self, parentWidget=None):
super().__init__(parentWidget)
self.content = ""
self.changeCounter = 0
vlayout = QVBoxLayout()
vlayout.setContentsMargins( 0, 0, 0, 0 )
self.setLayout( vlayout )
self.textEdit = QTextEdit(self)
self.textEdit.setContextMenuPolicy( Qt.CustomContextMenu )
# self.textEdit.setStyleSheet( "background-color: #f7ec9d;" )
self.setStyleSheet(
"""
QTextEdit {
background: %s;
}
""" % NOTES_BG_COLOR
)
vlayout.addWidget( self.textEdit )
self.textEdit.textChanged.connect( self.textChanged )
self.textEdit.customContextMenuRequested.connect( self.textEditContextMenuRequest )
def getText(self):
return self.textEdit.toPlainText()
def textChanged(self):
contentText = self.getText()
newLength = len( contentText )
currLength = len( self.content )
diff = abs( newLength - currLength )
self.changeCounter += diff
self.content = contentText
if self.changeCounter > 24:
self.changeCounter = 0
self.contentChanged.emit()
def textEditContextMenuRequest(self, point):
menu = self.textEdit.createStandardContextMenu()
convertAction = menu.addAction("Convert to ToDo")
convertAction.triggered.connect( self._convertToToDo )
selectedText = self.textEdit.textCursor().selectedText()
if not selectedText:
convertAction.setEnabled( False )
globalPos = self.mapToGlobal( point )
menu.exec_( globalPos )
def _convertToToDo(self):
selectedText = self.textEdit.textCursor().selectedText()
if not selectedText:
return
self.createToDo.emit( selectedText )
class NotesWidget( QtBaseClass ): # type: ignore
addNote = pyqtSignal( str )
renameNote = pyqtSignal( str, str )
removeNote = pyqtSignal( str )
notesChanged = pyqtSignal()
createToDo = pyqtSignal( str )
def __init__(self, parentWidget=None):
super().__init__(parentWidget)
self.ui = UiTargetClass()
self.ui.setupUi(self)
self.ui.notes_tabs.setStyleSheet(
"""
QTabWidget {
background: %s;
}
QTabBar {
background: %s;
}
""" % (NOTES_BG_COLOR, NOTES_BG_COLOR)
)
self.ui.notes_tabs.clear()
self.addTab( "notes" )
def getNotes(self):
notes = dict()
notesSize = self.ui.notes_tabs.count()
for tabIndex in range(0, notesSize):
title = self.ui.notes_tabs.tabText( tabIndex )
pageWidget = self.ui.notes_tabs.widget( tabIndex )
text = pageWidget.getText()
notes[ title ] = text
return notes
def setNotes(self, notesDict):
self.ui.notes_tabs.clear()
for key, value in notesDict.items():
self.addTab( key, value )
def addTab(self, title, text=""):
pageWidget = SinglePageWidget(self)
pageWidget.textEdit.setText( text )
pageWidget.contentChanged.connect( self.notesChanged )
pageWidget.createToDo.connect( self.createToDo )
self.ui.notes_tabs.addTab( pageWidget, title )
def contextMenuEvent( self, event ):
evPos = event.pos()
globalPos = self.mapToGlobal( evPos )
tabBar = self.ui.notes_tabs.tabBar()
tabPos = tabBar.mapFromGlobal( globalPos )
tabIndex = tabBar.tabAt( tabPos )
contextMenu = QMenu(self)
newAction = contextMenu.addAction("New")
renameAction = contextMenu.addAction("Rename")
deleteAction = contextMenu.addAction("Delete")
if tabIndex < 0:
renameAction.setEnabled( False )
deleteAction.setEnabled( False )
action = contextMenu.exec_( globalPos )
if action == newAction:
self._newTabRequest()
elif action == renameAction:
self._renameTabRequest( tabIndex )
elif action == deleteAction:
noteTitle = self.ui.notes_tabs.tabText( tabIndex )
self.removeNote.emit( noteTitle )
def _newTabRequest( self ):
newTitle = self._requestTabName( "notes" )
if len(newTitle) < 1:
return
self.addNote.emit( newTitle )
def _renameTabRequest( self, tabIndex ):
if tabIndex < 0:
return
oldTitle = self.ui.notes_tabs.tabText( tabIndex )
newTitle = self._requestTabName(oldTitle)
if not newTitle:
# empty
return
self.renameNote.emit( oldTitle, newTitle )
def _requestTabName( self, currName ):
newText, ok = QInputDialog.getText( self,
"Rename Note",
"Note name:",
QLineEdit.Normal,
currName )
if ok and newText:
# not empty
return newText
return ""
| 33.067308 | 91 | 0.623873 |
adda04337e4c06bb524ea87f03283dd7f74ef81f | 1,406 | py | Python | examples/reporting/scalar_reporting.py | harrywang/clearml | d72a42b89feba761115103c51b73897f04ae61d7 | [
"Apache-2.0"
] | null | null | null | examples/reporting/scalar_reporting.py | harrywang/clearml | d72a42b89feba761115103c51b73897f04ae61d7 | [
"Apache-2.0"
] | null | null | null | examples/reporting/scalar_reporting.py | harrywang/clearml | d72a42b89feba761115103c51b73897f04ae61d7 | [
"Apache-2.0"
] | null | null | null | # ClearML - Example of manual graphs and statistics reporting
#
from clearml import Task, Logger
def report_scalars(logger):
# type: (Logger) -> ()
"""
reporting scalars to scalars section
:param logger: The task.logger to use for sending the scalars
"""
# report two scalar series on the same graph
for i in range(100):
logger.report_scalar("unified graph", "series A", iteration=i, value=1./(i+1))
logger.report_scalar("unified graph", "series B", iteration=i, value=10./(i+1))
# report two scalar series on two different graphs
for i in range(100):
logger.report_scalar("graph A", "series A", iteration=i, value=1./(i+1))
logger.report_scalar("graph B", "series B", iteration=i, value=10./(i+1))
def main():
# Create the experiment Task
task = Task.init(project_name="examples", task_name="scalar reporting")
print('reporting scalar graphs')
# Get the task logger,
# You can also call Task.current_task().get_logger() from anywhere in your code.
logger = task.get_logger()
# report scalars
report_scalars(logger)
# force flush reports
# If flush is not called, reports are flushed in the background every couple of seconds,
# and at the end of the process execution
logger.flush()
print('We are done reporting, have a great day :)')
if __name__ == "__main__":
main()
| 30.565217 | 92 | 0.668563 |
a91c4c3593e40f1a7ad2903be5f27fbe66bb6544 | 629 | py | Python | Python_lxf/Python_Basic_Operation/IO_Operate/StringIO_BytesIO.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 2 | 2019-01-24T15:06:59.000Z | 2019-01-25T07:34:45.000Z | Python_lxf/Python_Basic_Operation/IO_Operate/StringIO_BytesIO.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-12-23T09:45:11.000Z | 2019-12-23T09:45:11.000Z | Python_lxf/Python_Basic_Operation/IO_Operate/StringIO_BytesIO.py | QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | [
"MIT"
] | 1 | 2019-07-18T14:21:35.000Z | 2019-07-18T14:21:35.000Z | # StringIO and BytesIO
# StringIO 就是在内存中读写str.
# 要把str写入StringIO, 先创建一个StringIO,
# 然后, 像文件一样写入
from io import StringIO
f = StringIO()
print(f.write('hello'))
print(f.getvalue())
print(f.write(', world!'))
print(f.getvalue())
# 要读取StringIO, 可以用一个str初始化StringIO
from io import StringIO
f = StringIO('Hello!\nHi\nGoodbye!') # 用一个str初始化StringIO
while True:
s = f.readline()
if s == '':
break
print(s.strip())
# BytesIO实现了在内存中读写bytes
from io import BytesIO
f = BytesIO()
print(f.write('中文'.encode('utf-8')))
print(f.getvalue())
# 和StringIO一样可以用一个bytes初始化BytesIO, 然后读取
f = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87')
print(f.read())
| 20.290323 | 56 | 0.715421 |
e86eecadc6f12086c9b9c9f21819fe1b979e1a46 | 1,991 | py | Python | data/test_data_file.py | SCECcode/albacore | 127f46d0f1d4953d980a92b4a7268e50231c24aa | [
"BSD-3-Clause"
] | null | null | null | data/test_data_file.py | SCECcode/albacore | 127f46d0f1d4953d980a92b4a7268e50231c24aa | [
"BSD-3-Clause"
] | null | null | null | data/test_data_file.py | SCECcode/albacore | 127f46d0f1d4953d980a92b4a7268e50231c24aa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
##
# Retrieve the data files and retrieve the content
# ./make_data_file.py > ii
# ./test_data_file.py > oo
# cat ii oo | sort | uniq -c |sort > zz
# should be 2x of every valid entries in the model
#
import getopt
import sys
import subprocess
import struct
import numpy as np
import pdb
dimension_x = 27
dimension_y = 8
dimension_z = 101
lon_origin = -124.6472
lat_origin = 32.7
lon_upper = -116.84720
lat_upper = 34.8
delta_lon = (lon_upper - lon_origin )/(dimension_x-1)
delta_lat = (lat_upper - lat_origin)/(dimension_y-1)
def usage():
print("\n./test_data_files.py\n\n")
sys.exit(0)
def myrange(start, end, step):
while start <= end:
yield start
start += step
def main():
count =0
f_vp = open("./alba/vp.dat")
f_vs = open("./alba/vs.dat")
f_rho = open("./alba/rho.dat")
vp_arr = np.fromfile(f_vp, dtype=np.float32)
vs_arr = np.fromfile(f_vs, dtype=np.float32)
rho_arr = np.fromfile(f_rho, dtype=np.float32)
f_vp.close()
f_vs.close()
f_rho.close()
lon_start = lon_origin
lat_start = lat_origin
depth_start = 0.0;
na_cnt = 0
for lon_v in myrange(lon_origin, lon_upper, delta_lon):
for lat_v in myrange(lat_origin, lat_upper, delta_lat) :
for depth_v in range(101) :
y_pos = int(round((lat_v - lat_origin) / delta_lat))
x_pos = int(round((lon_v - lon_origin) / delta_lon))
z_pos = int(depth_v)
offset=z_pos * (dimension_y * dimension_x) + (y_pos * dimension_x) + x_pos
vp=vp_arr[offset];
vs=vs_arr[offset];
rho=rho_arr[offset];
if vp != -1 :
print(x_pos," ",y_pos," ",z_pos," >> ", lon_v, " ",lat_v, " ", float(depth_v) , "-->", vp," ", vs," ", rho)
else :
na_cnt=na_cnt+1
print("na_cnt is ",na_cnt)
print("\nDone!")
if __name__ == "__main__":
main()
| 23.151163 | 124 | 0.589653 |
910e5959981ab006a0ac8d64896d95b9b000ab64 | 16,556 | py | Python | Orbit.py | mtlam/Lab_Orbit | c43e681f5f2f00d4435b08695a7f2ab33acc6f2b | [
"BSD-3-Clause"
] | null | null | null | Orbit.py | mtlam/Lab_Orbit | c43e681f5f2f00d4435b08695a7f2ab33acc6f2b | [
"BSD-3-Clause"
] | null | null | null | Orbit.py | mtlam/Lab_Orbit | c43e681f5f2f00d4435b08695a7f2ab33acc6f2b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 17:49:21 2013
@author: Amit Vishwas
Minor edits by Michael Lam
"""
import numpy as np
from matplotlib import pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import Ellipse
from matplotlib import animation
import matplotlib.gridspec as gridspec
# Font to be used for labels on the plot
font = {'size' : 9}
plt.rc('font', **font)
# Setup figure and subplots
# Size, dpi and Title for Plot window
#fig = plt.figure(num = 'Orbit Simulator', figsize = (12,8.5),dpi = 100)
fig = plt.figure(num = 'Orbit Simulator', figsize = (9.5,6.75),dpi = 100)
# Divide in 3x3 grid, set area to be used on the plot
gs = gridspec.GridSpec(3, 3)
gs.update(left=0.07, right=0.95, wspace=0.15)
#ax = fig.add_subplot(gs[0,:-1], aspect ='equal', projection = '3d') # Maybe use to implement 3D view
# Define the main subplot where orbits are shown
ax = fig.add_subplot(gs[0:,:-1], aspect = 'equal')
ax.set_ylabel('Distance (in AU)')
plt.setp(ax.get_xticklabels(), visible=False) # Set xaxis tick labels to be invisible
ax.text(0.01, 0.01, 'As seen by Observer',
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes,
color='Black', fontsize=12)
# Define the subplot where the velocity profile is shown
ax2 = fig.add_subplot(gs[:-1,-1], aspect = 'auto')
ax2.set_xlabel('Time (in Years)')
ax2.yaxis.tick_right()
ax2.set_ylabel('Velocity (in km/s)')
ax2.locator_params(nbins=6) # limit number of x-ticks
# Define subplot where the Top view of the orbit is shown
ax3 = fig.add_subplot(gs[0,0], aspect = 'equal')
ax3.xaxis.set_visible(False)
ax3.yaxis.set_visible(False)
ax3.text(0.1, 0.99, 'Orbit Top view',
verticalalignment='top', horizontalalignment='left',
transform=ax.transAxes,
color='Black', fontsize=12)
pause = True # Click to pause functionality
change = False
# Initialize global variables - orbital elements
phase = 0.0 # Angle in the orbit, measured with respect to periastron
timer = 0.0 # Time Counter
comx = 0. # Center of Mass co-ordinates
comy = 0.
m1 = 3.0; # Mass of Obj 1, in Solar mass units
m2 = 1.0; # Mass of Obj 2, in Solar mass units
semi_a = 1.0 # Semi major axis for the orbit, in AU
ecc = 0.3 # Eccentricity
alpha = semi_a*(1-ecc**2)
nodeangle = 0. # Node angle for the orbit
inclination = np.pi/2 # Inclination of the orbit
mu = m1*m2/(m1+m2); # Reduced Mass
semi_b = semi_a*(1-ecc**2)**0.5 # Semi-minor Axis
L = np.sqrt(mu*semi_a*(1-ecc**2)) # Orbital angula rmomentum : constant for a given orbit
P = ((1/(m1+m2))*semi_a**3)**0.5 # Period of the orbit, in years
tarray = np.zeros(721) # Placeholder to store conversion between time step "i" to phase in orbit
xt = np.zeros(721) # Placeholder to store conversion between time step "i" to actual time units in years
xt[:]= [(2*P/720)*x for x in range(721)]
for i in range(721):
tht = np.radians(phase)
tarray[i] = tht
phase += np.absolute((1 + ecc*np.cos(tht))**2 / (1 - ecc**2)**1.5)
phase %= 360
phase = 0.
##################### Show Orbiting Bodies & corresponding orbits
M1 = plt.Circle((0, 0), 0.03, fc='r', clip_on=True, lw = 0); # Draw a circle to represent Body 1
M2 = plt.Circle((0, 0), 0.03, fc='b', clip_on=True, lw = 0); # Draw a circle to represent Body 2
# Try to draw the orbit that the objects will follow
orb1, = ax.plot(0,0,'r-', alpha = 0.33, visible = False) # empty place holder graphs for orbits
orb2, = ax.plot(0,0,'b-', alpha = 0.33, visible = False)
############ Previous attempts for orbits ####
#Ellipse(xy=(-semi_a*(ecc)*(mu/m1), 0), width=2*semi_a*(mu/m1), height=2*semi_b*(mu/m1)*np.cos(inclination),
# edgecolor='r', fc='None', alpha = 0.33, lw=1)
#Ellipse(xy=(semi_a*(ecc)*(mu/m2), 0), width=2*semi_a*(mu/m2), height=2*semi_b*(mu/m2)*np.cos(inclination),
# edgecolor='b', fc='None', alpha = 0.33, lw=1)
##############################################
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.grid(True)
############ Show Velocity of corrresponding bodies with respect to time
# Draw circles for showing the instantaneous velocity of the body on the velocity - time graph
#Mv1 = plt.Circle((0, 0), 0.05*P, fc = 'r', ec='r', clip_on=True, lw = 1);
#Mv2 = plt.Circle((0, 0), 0.05*P, fc = 'k', ec='k', clip_on=True, lw = 1);
Mv1 = Ellipse((0, 0), 0.1*P, 0.1*P, fc = 'r', ec='r', clip_on=True, lw = 1);
Mv2 = Ellipse((0, 0), 0.1*P, 0.1*P, fc = 'b', ec='b', clip_on=True, lw = 1);
# 29.87 is velocity of Earth around the Sun, scaled using a^3/(M_sol) = P^2
d3 = 29.87*np.sqrt((m1+m2/alpha))*(1+ecc)
d4 = 29.87*np.sqrt((m1+m2/alpha))* ecc
d6 = np.sin(inclination)*np.sqrt((d4**2 + d3**2)) # used to define the axis limit on velocity plot, some number larger than either
v1, = ax2.plot(0,0,'r-', visible = False) # empty place holder graphs for velocity curves
v2, = ax2.plot(0,0,'b-', visible = False)
ax2.set_xlim(0, 2*P) # Plot velocity for two prbits
ax2.set_ylim(-d6-0.1, d6+0.1)
ax2.grid(True)
#ax.get_xaxis().set_animated(True) # enabling it takes away the labels
############### Preffered view of the orbits - from the top, no effect of inclination ####
Mi1 = plt.Circle((0, 0), 0.05, fc='r', clip_on=True, lw = 0);
Mi2 = plt.Circle((0, 0), 0.05, fc='b', clip_on=True, lw = 0);
# Draw orbits as elipses
orbi1 = Ellipse(xy=(-semi_a*(ecc)*(mu/m1), 0), width=2*semi_a*(mu/m1), height=2*semi_b*(mu/m1),
edgecolor='r', fc='None', lw=0.5)
orbi2 = Ellipse(xy=(semi_a*(ecc)*(mu/m2), 0), width=2*semi_a*(mu/m2), height=2*semi_b*(mu/m2),
edgecolor='b', fc='None', lw=0.5)
ax3.set_xlim(-2, 2)
ax3.set_ylim(-2, 2)
ax3.grid(True)
###############################################################################
# pause animation on click
def onClick(event):
global pause
pause ^= True
###############################################################################
def init():
global M1, M2, orb1, orb2, Mv1, Mv2, Mi1, Mi2, orbi1, orbi2, phase, v1, v2
M1.center = (-100, -100) # initialize the patches at a far location
M2.center = (-100, -100)
ax.add_patch(M1)
# art3d.pathpatch_2d_to_3d(M1, z=0, zdir="x")
ax.add_patch(M2)
# art3d.pathpatch_2d_to_3d(M2, z=0, zdir="x")
# orb1.center = (-100, -100)
# ax.add_patch(orb1)
# orb2.center = (-100, -100)
# ax.add_patch(orb2)
#####################################################
Mv1.center = (-100, -100)
Mv2.center = (-100, -100)
ax2.add_patch(Mv1)
ax2.add_patch(Mv2)
#####################################################
Mi1.center = (-100, -100)
Mi2.center = (-100, -100)
ax3.add_patch(Mi1)
ax3.add_patch(Mi2)
orbi1.center = (-100, -100)
ax3.add_patch(orbi1)
orbi2.center = (-100, -100)
ax3.add_patch(orbi2)
######################################################
## return everything that you want to remain visible as the animation runs
return M1,M2, orb1, orb2, Mv1, Mv2, Mi1, Mi2, orbi1, orbi2, v1, v2
###############################################################################
def update(val):
global comx, comy, m1, m2, d6
global semi_a, semi_b, ecc, alpha, nodeangle, inclination
global mu, L, P, r , r1, r2
global M1, M2, orb1, orb2, Mi1, Mi2, orbi1, orbi2, v1, v2, pause
global phase, timer, xt, tarray, change
phase = 0.
timer = 0.
v1.set_visible(False)
v2.set_visible(False)
orb2.set_visible(False)
orb2.set_visible(False)
m1 = round(s_m1.val,1)
m2 = round(s_m2.val,1)
semi_a = round(s_a.val,1)
if round(s_ecc.val,1) != ecc :
ecc = round(s_ecc.val,1)
change = True
alpha = semi_a*(1-ecc**2)
nodeangle = np.radians(int(s_node.val))
inclination = np.radians(int(s_inc.val))
mu = ((m1*m2)/(m1+m2));
semi_b = semi_a*(1-ecc**2)**0.5
L = np.sqrt(mu*alpha)
P = ((1/(m1+m2))*semi_a**3)**0.5
if change == True:
for i in range(721):
tht = np.radians(phase)
tarray[i] = tht
phase += np.absolute((1 + ecc*np.cos(tht))**2 / (1 - ecc**2)**1.5)
phase %= 360
phase = 0.
change = False
xt[:]= [(2*P/720)*x for x in range(721)]
r = alpha/(1+ecc);
r1 = r*(mu/m1);
r2 = -r*(mu/m2);
M1.set_radius(0.03*(semi_a))
M2.set_radius(0.03*(semi_a))
orb1.set_xdata(comx + (mu/m1)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(tarray[0:361] + nodeangle));
orb1.set_ydata(comy + (mu/m1)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(inclination) * np.sin(tarray[0:361] + nodeangle));
orb1.set_visible(True)
ax.draw_artist(orb1)
orb2.set_xdata(comx - (mu/m2)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(tarray[0:361] + nodeangle));
orb2.set_ydata(comy - (mu/m2)*(alpha/(1+(ecc*np.cos(tarray[0:361])))) * np.cos(inclination) * np.sin(tarray[0:361] + nodeangle));
orb2.set_visible(True)
ax.draw_artist(orb2)
########### Old orbit plot attempt ####
# orb1.center = (comx + semi_a*(ecc)*(mu/m1)*np.cos(nodeangle+np.pi), comy + np.cos(inclination)*semi_a*(ecc)*(mu/m1)*np.sin(nodeangle+np.pi))
#
# orb1.width = 2*semi_a*(mu/m1)*(np.cos(nodeangle))**2 + 2*semi_b*(mu/m1)*(np.sin(nodeangle))**2
# orb1.height = np.cos(inclination)*(2*semi_a*(mu/m1)*(np.sin(nodeangle))**2 + 2*semi_b*(mu/m1)*(np.cos(nodeangle))**2)
# #orb1.angle = np.rad2deg(nodeangle)
#
# orb2.center = (comx + semi_a*(ecc)*(mu/m2)*np.cos(nodeangle), comy + np.cos(inclination)*semi_a*(ecc)*(mu/m2)*np.sin(nodeangle))
#
# orb2.width = 2*semi_a*(mu/m2)*(np.cos(nodeangle))**2 + 2*semi_b*(mu/m2)*(np.sin(nodeangle))**2
# orb2.height = np.cos(inclination)*(2*semi_a*(mu/m2)*(np.sin(nodeangle))**2 + 2*semi_b*(mu/m2)*(np.cos(nodeangle))**2)
#orb2.angle = np.rad2deg(nodeangle)
ax.set_xlim(-2*semi_a, 2*semi_a)
ax.set_ylim(-2*semi_a, 2*semi_a)
###############################################################
d3 = 29.87*np.sqrt((m1+m2/alpha))*(1+ecc)
d4 = 29.87*np.sqrt((m1+m2/alpha))* ecc
d6 = np.sin(inclination)*np.sqrt((d4**2 + d3**2))
v1.set_ydata((mu/m1)*np.sin(inclination)*(d4*np.sin(tarray+nodeangle)*np.sin(tarray) + (1/(1+ecc))*d3*np.cos(tarray+nodeangle)*(1+ecc*np.cos(tarray))))
v1.set_xdata(xt)
v1.set_visible(True)
ax2.draw_artist(v1)
v2.set_ydata((-mu/m2)*np.sin(inclination)*(d4*np.sin(tarray+nodeangle)*np.sin(tarray) + (1/(1+ecc))*d3*np.cos(tarray+nodeangle)*(1+ecc*np.cos(tarray))))
v2.set_xdata(xt)
v2.set_visible(True)
ax2.draw_artist(v2)
ax2.set_xlim(0, 2*P)
ax2.set_ylim(-d6-0.1, d6+0.1)
ratio = (d6+0.1)/P #ylim/xlim ratio
#Mv1.set_radius(0.05*(P))
#Mv2.set_radius(0.05*(P))
Mv1.width = 0.1*P
Mv1.height = 0.1*P*ratio
Mv2.width = 0.1*P
Mv2.height = 0.1*P*ratio# / np.sin(inclination)
###############################################################
Mi1.set_radius(0.05*(semi_a))
Mi2.set_radius(0.05*(semi_a))
orbi1.width = 2*semi_a*(mu/m1)
orbi1.height = 2*semi_b*(mu/m1)
orbi1.angle = np.rad2deg(nodeangle)
orbi1.center = (comx + semi_a*(ecc)*(mu/m1)*np.cos(nodeangle+np.pi), comy + semi_a*(ecc)*(mu/m1)*np.sin(nodeangle+np.pi))
orbi2.width = 2*semi_a*(mu/m2)
orbi2.height = 2*semi_b*(mu/m2)
orbi2.angle = np.rad2deg(nodeangle)
orbi2.center = (comx + semi_a*(ecc)*(mu/m2)*np.cos(nodeangle), comy + semi_a*(ecc)*(mu/m2)*np.sin(nodeangle))
ax3.set_xlim(-2*semi_a, 2*semi_a)
ax3.set_ylim(-2*semi_a, 2*semi_a)
##################################################################
pause = False
###############################################################################
def animate(i):
global semi_a, alpha, ecc, inclination, nodeangle
global r, r1, r2, mu, m1, m2, P
global M1, M2, orb1, orb2, Mi1, Mi2, orbi1, orbi2, comx, comy
global phase, tarray, timer, xt
if not pause:
tht = phase
r = alpha/(1+(ecc*np.cos(tht)));
r1 = r*(mu/m1);
r2 = -r*(mu/m2);
#############################################################
#x1, y1 = M1.center
x1 = comx + r1 * np.cos(tht + nodeangle);
y1 = (comy + r1 * np.cos(inclination) * np.sin(tht + nodeangle));
#x2, y2 = M2.center
x2 = comx + r2 * np.cos(tht + nodeangle);
y2 = (comy + r2 * np.cos(inclination) * np.sin(tht + nodeangle));
M1.center = (x1, y1)
M2.center = (x2, y2)
# orb1.center = (comx + semi_a*(ecc)*(mu/m1)*np.cos(nodeangle+np.pi), comy + np.cos(inclination)*semi_a*(ecc)*(mu/m1)*np.sin(nodeangle+np.pi))
# orb2.center = (comx + semi_a*(ecc)*(mu/m2)*np.cos(nodeangle), comy + np.cos(inclination)*semi_a*(ecc)*(mu/m2)*np.sin(nodeangle))
############################################################
d3 = 29.87*np.sqrt((m1+m2/alpha))*(1+ecc*np.cos(tht))
d4 = 29.87*np.sqrt((m1+m2/alpha))* ecc*np.sin(tht)
d6 = np.sin(inclination)*(d4*np.sin(tht+nodeangle) + d3*np.cos(tht+nodeangle))
vm1 = (mu/m1)*d6
vm2 = -(mu/m2)*d6
xv1, yv1 = Mv1.center
xv1 = 2*P*(timer/720)
yv1 = vm1
xv2, yv2 = Mv2.center
xv2 = 2*P*(timer/720)
yv2 = vm2
Mv1.center = (xv1, yv1)
Mv2.center = (xv2, yv2)
#############################################################
xi1 = comx + r1 * np.cos(tht + nodeangle);
yi1 = comy + r1 * np.sin(tht + nodeangle);
xi2 = comx + r2 * np.cos(tht + nodeangle);
yi2 = comy + r2 * np.sin(tht + nodeangle);
Mi1.center = (xi1, yi1)
Mi2.center = (xi2, yi2)
orbi1.center = (comx + semi_a*(ecc)*(mu/m1)*np.cos(nodeangle+np.pi), comy + semi_a*(ecc)*(mu/m1)*np.sin(nodeangle+np.pi))
orbi2.center = (comx + semi_a*(ecc)*(mu/m2)*np.cos(nodeangle), comy + semi_a*(ecc)*(mu/m2)*np.sin(nodeangle))
############################################################
phase = tarray[timer]
timer += 1
timer %= 720
return M1,M2, orb1, orb2, Mv1, Mv2, v1, v2, Mi1, Mi2, orbi1, orbi2
###############################################################################
fig.canvas.mpl_connect('button_press_event', onClick)
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=360,
interval=20,
blit=True, repeat = True)
###############################################################################
############################## Put sliders
axslider_inc = plt.axes([0.1, 0.92, 0.25, 0.03])
s_inc = plt.Slider(axslider_inc, 'Inc ', 0, 90, valfmt='%0d', valinit=90)
s_inc.on_changed(update)
axslider_node = plt.axes([0.65, 0.92, 0.25, 0.03])
s_node = plt.Slider(axslider_node, 'Node Angle', -90, 90, valfmt='%0d', valinit=0)
s_node.on_changed(update)
axslider_a = plt.axes([0.1, 0.06, 0.5, 0.03])
s_a = plt.Slider(axslider_a, 'a ', 0.1, 10.0, valfmt='%0.1f', valinit=1.0)
s_a.on_changed(update)
axslider_ecc = plt.axes([0.1, 0.01, 0.5, 0.03])
s_ecc = plt.Slider(axslider_ecc, 'Ecc ', 0, 0.9, valfmt='%0.1f', valinit=0.3)
s_ecc.on_changed(update)
axslider_m1 = plt.axes([0.67, 0.06, 0.25, 0.03])
s_m1 = plt.Slider(axslider_m1, 'm1 ', 0.1, 10.0, valfmt='%0.1f', valinit=3.0)
s_m1.on_changed(update)
axslider_m2 = plt.axes([0.67, 0.01, 0.25, 0.03])
s_m2 = plt.Slider(axslider_m2, 'm2 ', 0.1, 10.0, valfmt='%0.1f', valinit=1.0)
s_m2.on_changed(update)
###############################################################################
plt.show()
| 35.758099 | 158 | 0.533341 |
1bfd647c274e0e89f2e962d26bc5810fdb277675 | 4,336 | py | Python | third_party/buildbot_8_4p1/buildbot/status/web/olpb.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | third_party/buildbot_8_4p1/buildbot/status/web/olpb.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | third_party/buildbot_8_4p1/buildbot/status/web/olpb.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.status.web.base import HtmlResource, BuildLineMixin, map_branches
# /one_line_per_build
# accepts builder=, branch=, numbuilds=, reload=
class OneLinePerBuild(HtmlResource, BuildLineMixin):
"""This shows one line per build, combining all builders together. Useful
query arguments:
numbuilds=: how many lines to display
builder=: show only builds for this builder. Multiple builder= arguments
can be used to see builds from any builder in the set.
reload=: reload the page after this many seconds
"""
pageTitle = "Recent Builds"
def __init__(self, numbuilds=20):
HtmlResource.__init__(self)
self.numbuilds = numbuilds
def getChild(self, path, req):
status = self.getStatus(req)
builder = status.getBuilder(path)
return OneLinePerBuildOneBuilder(builder, numbuilds=self.numbuilds)
def get_reload_time(self, request):
if "reload" in request.args:
try:
reload_time = int(request.args["reload"][0])
return max(reload_time, 15)
except ValueError:
pass
return None
def content(self, req, cxt):
status = self.getStatus(req)
numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
builders = req.args.get("builder", [])
branches = [b for b in req.args.get("branch", []) if b]
g = status.generateFinishedBuilds(builders, map_branches(branches),
numbuilds, max_search=numbuilds)
cxt['refresh'] = self.get_reload_time(req)
cxt['num_builds'] = numbuilds
cxt['branches'] = branches
cxt['builders'] = builders
builds = cxt['builds'] = []
for build in g:
builds.append(self.get_line_values(req, build))
cxt['authz'] = self.getAuthz(req)
# get information on the builders - mostly just a count
building = 0
online = 0
for bn in builders:
builder = status.getBuilder(bn)
builder_status = builder.getState()[0]
if builder_status == "building":
building += 1
online += 1
elif builder_status != "offline":
online += 1
cxt['num_online'] = online
cxt['num_building'] = building
template = req.site.buildbot_service.templates.get_template('onelineperbuild.html')
return template.render(**cxt)
# /one_line_per_build/$BUILDERNAME
# accepts branch=, numbuilds=
class OneLinePerBuildOneBuilder(HtmlResource, BuildLineMixin):
def __init__(self, builder, numbuilds=20):
HtmlResource.__init__(self)
self.builder = builder
self.builder_name = builder.getName()
self.numbuilds = numbuilds
self.pageTitle = "Recent Builds of %s" % self.builder_name
def content(self, req, cxt):
numbuilds = int(req.args.get("numbuilds", [self.numbuilds])[0])
branches = [b for b in req.args.get("branch", []) if b]
# walk backwards through all builds of a single builder
g = self.builder.generateFinishedBuilds(map_branches(branches),
numbuilds)
cxt['builds'] = map(lambda b: self.get_line_values(req, b), g)
cxt.update(dict(num_builds=numbuilds,
builder_name=self.builder_name,
branches=branches))
template = req.site.buildbot_service.templates.get_template('onelineperbuildonebuilder.html')
return template.render(**cxt)
| 36.436975 | 101 | 0.639299 |
e2a4251f83d34811bb309a83ae0689f531f48482 | 37,550 | py | Python | src/transformers/pipelines/base.py | deklanw/transformers | 38580455dea435acd4a261e788d237d3421d65b2 | [
"Apache-2.0"
] | 35 | 2021-08-30T09:31:41.000Z | 2021-12-21T11:30:49.000Z | src/transformers/pipelines/base.py | Hecim1984/transformers | 8e908c8c74f556a82534f4cf1e7a1b4f7b55d24c | [
"Apache-2.0"
] | 9 | 2021-06-08T22:35:33.000Z | 2021-10-04T08:53:44.000Z | src/transformers/pipelines/base.py | Hecim1984/transformers | 8e908c8c74f556a82534f4cf1e7a1b4f7b55d24c | [
"Apache-2.0"
] | 2 | 2021-08-31T09:09:59.000Z | 2021-09-10T07:59:03.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import importlib
import json
import os
import pickle
import sys
import warnings
from abc import ABC, abstractmethod
from collections import UserDict
from contextlib import contextmanager
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from ..feature_extraction_utils import PreTrainedFeatureExtractor
from ..file_utils import ModelOutput, add_end_docstrings, is_tf_available, is_torch_available
from ..modelcard import ModelCard
from ..models.auto.configuration_auto import AutoConfig
from ..tokenization_utils import PreTrainedTokenizer
from ..utils import logging
GenericTensor = Union[List["GenericTensor"], "torch.Tensor", "tf.Tensor"]
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TFAutoModel
if is_torch_available():
import torch
from torch.utils.data import DataLoader, Dataset, IterableDataset
from ..models.auto.modeling_auto import AutoModel
else:
Dataset = None
KeyDataset = None
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
def collate_fn(items):
if len(items) != 1:
raise ValueError("This collate_fn is meant to be used with batch_size=1")
return items[0]
def infer_framework_load_model(
model,
config: AutoConfig,
model_classes: Optional[Dict[str, Tuple[type]]] = None,
task: Optional[str] = None,
framework: Optional[str] = None,
**model_kwargs
):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the model class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
config (:class:`~transformers.AutoConfig`):
The config associated with the model to help using the correct class
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
task (:obj:`str`):
The task defining which pipeline will be returned.
model_kwargs:
Additional dictionary of keyword arguments passed along to the model's :obj:`from_pretrained(...,
**model_kwargs)` function.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
model_kwargs["_from_pipeline"] = task
class_tuple = ()
look_pt = is_torch_available() and framework in {"pt", None}
look_tf = is_tf_available() and framework in {"tf", None}
if model_classes:
if look_pt:
class_tuple = class_tuple + model_classes.get("pt", (AutoModel,))
if look_tf:
class_tuple = class_tuple + model_classes.get("tf", (TFAutoModel,))
if config.architectures:
classes = []
for architecture in config.architectures:
transformers_module = importlib.import_module("transformers")
if look_pt:
_class = getattr(transformers_module, architecture, None)
if _class is not None:
classes.append(_class)
if look_tf:
_class = getattr(transformers_module, f"TF{architecture}", None)
if _class is not None:
classes.append(_class)
class_tuple = class_tuple + tuple(classes)
if len(class_tuple) == 0:
raise ValueError(f"Pipeline cannot infer suitable model classes from {model}")
for model_class in class_tuple:
kwargs = model_kwargs.copy()
if framework == "pt" and model.endswith(".h5"):
kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
try:
model = model_class.from_pretrained(model, **kwargs)
if hasattr(model, "eval"):
model = model.eval()
# Stop loading on the first successful load.
break
except (OSError, ValueError):
continue
if isinstance(model, str):
raise ValueError(f"Could not load model {model} with any of the following classes: {class_tuple}.")
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework, model
def infer_framework_from_model(
model,
model_classes: Optional[Dict[str, Tuple[type]]] = None,
task: Optional[str] = None,
framework: Optional[str] = None,
**model_kwargs
):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the model class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
task (:obj:`str`):
The task defining which pipeline will be returned.
model_kwargs:
Additional dictionary of keyword arguments passed along to the model's :obj:`from_pretrained(...,
**model_kwargs)` function.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if isinstance(model, str):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **model_kwargs)
else:
config = model.config
return infer_framework_load_model(
model, config, model_classes=model_classes, _from_pipeline=task, task=task, framework=framework, **model_kwargs
)
def get_framework(model, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
the model name). If no specific model is provided, defaults to using PyTorch.
"""
warnings.warn(
"`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.",
FutureWarning,
)
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model = AutoModel.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model = TFAutoModel.from_pretrained(model, revision=revision)
else:
try:
model = AutoModel.from_pretrained(model, revision=revision)
except OSError:
model = TFAutoModel.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework
def get_default_model(targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]) -> str:
"""
Select a default model to use for a given task. Defaults to pytorch if ambiguous.
Args:
targeted_task (:obj:`Dict` ):
Dictionary representing the given task, that should contain default models
framework (:obj:`str`, None)
"pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.
task_options (:obj:`Any`, None)
Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
translation task.
Returns
:obj:`str` The model string representing the default model for this pipeline
"""
if is_torch_available() and not is_tf_available():
framework = "pt"
elif is_tf_available() and not is_torch_available():
framework = "tf"
defaults = targeted_task["default"]
if task_options:
if task_options not in defaults:
raise ValueError(f"The task does not provide any default models for options {task_options}")
default_models = defaults[task_options]["model"]
elif "model" in defaults:
default_models = targeted_task["default"]["model"]
else:
# XXX This error message needs to be updated to be more generic if more tasks are going to become
# parametrized
raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')
if framework is None:
framework = "pt"
return default_models[framework]
class PipelineException(Exception):
"""
Raised by a :class:`~transformers.Pipeline` when handling __call__.
Args:
task (:obj:`str`): The task of the pipeline.
model (:obj:`str`): The model used by the pipeline.
reason (:obj:`str`): The error message to display.
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing. Supported data formats
currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
:obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError(f"{self.output_path} already exists on disk")
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError(f"{self.input_path} doesnt exist on disk")
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
Returns:
:obj:`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending on
:obj:`format`.
Args:
format: (:obj:`str`):
The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
output_path (:obj:`str`, `optional`):
Where to save the outgoing data.
input_path (:obj:`str`, `optional`):
Where to look for the input data.
column (:obj:`str`, `optional`):
The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
Returns:
:class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError(f"Unknown reader {format} (Available reader are json/csv/pipe)")
class CsvPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using CSV data format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`List[dict]`): The data to store.
"""
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using JSON file format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
"""
Save the provided data object in a json file.
Args:
data (:obj:`dict`): The data to store.
"""
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process. For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
"""
Print the data.
Args:
data (:obj:`dict`): The data to store.
"""
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
PIPELINE_INIT_ARGS = r"""
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""
if is_torch_available():
class PipelineDataset(Dataset):
def __init__(self, dataset, process, params):
self.dataset = dataset
self.process = process
self.params = params
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
item = self.dataset[i]
processed = self.process(item, **self.params)
return processed
class PipelineIterator(IterableDataset):
def __init__(self, loader, infer, params):
self.loader = loader
self.infer = infer
self.params = params
def __len__(self):
return len(self.loader)
def __iter__(self):
self.iterator = iter(self.loader)
return self
def __next__(self):
item = next(self.iterator)
processed = self.infer(item, **self.params)
return processed
class KeyDataset(Dataset):
def __init__(self, dataset: Dataset, key: str):
self.dataset = dataset
self.key = key
def __len__(self):
return len(self.dataset)
def __getitem__(self, i):
return self.dataset[i][self.key]
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
operations:
Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument (see below).
Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` )
output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the
pickle format.
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: Optional[PreTrainedTokenizer] = None,
feature_extractor: Optional[PreTrainedFeatureExtractor] = None,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
**kwargs,
):
if framework is None:
framework, model = infer_framework_load_model(model, config=model.config)
self.task = task
self.model = model
self.tokenizer = tokenizer
self.feature_extractor = feature_extractor
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else f"cuda:{device}")
self.binary_output = binary_output
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
self.call_count = 0
self._preprocess_params, self._forward_params, self._postprocess_params = self._sanitize_parameters(**kwargs)
def save_pretrained(self, save_directory: str):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (:obj:`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(save_directory)
if self.feature_extractor is not None:
self.feature_extractor.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Returns:
Context manager
Examples::
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = pipe(...)
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else f"/device:GPU:{self.device}"):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be :obj:`torch.Tensor`, the rest is ignored): The tensors to place on :obj:`self.device`.
Recursive on lists **only**.
Return:
:obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.
"""
return self._ensure_tensor_on_device(inputs, self.device)
def _ensure_tensor_on_device(self, inputs, device):
if isinstance(inputs, ModelOutput):
return ModelOutput(
{name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
)
elif isinstance(inputs, dict):
return {name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()}
elif isinstance(inputs, UserDict):
return UserDict({name: self._ensure_tensor_on_device(tensor, device) for name, tensor in inputs.items()})
elif isinstance(inputs, list):
return [self._ensure_tensor_on_device(item, device) for item in inputs]
elif isinstance(inputs, tuple):
return tuple([self._ensure_tensor_on_device(item, device) for item in inputs])
elif isinstance(inputs, torch.Tensor):
return inputs.to(self.device)
else:
return inputs
def check_model_type(self, supported_models: Union[List[str], dict]):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (:obj:`List[str]` or :obj:`dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models_names = []
for config, model in supported_models.items():
# Mapping can now contain tuples of models for the same configuration.
if isinstance(model, tuple):
supported_models_names.extend([_model.__name__ for _model in model])
else:
supported_models_names.append(model.__name__)
supported_models = supported_models_names
if self.model.__class__.__name__ not in supported_models:
logger.error(
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}."
)
@abstractmethod
def _sanitize_parameters(self, **pipeline_parameters):
"""
_sanitize_parameters will be called with any excessive named arguments from either `__init__` or `__call__`
methods. It should return 3 dictionnaries of the resolved parameters used by the various `preprocess`,
`forward` and `postprocess` methods. Do not fill dictionnaries if the caller didn't specify a kwargs. This
let's you keep defaults in function signatures, which is more "natural".
It is not meant to be called directly, it will be automatically called and the final parameters resolved by
`__init__` and `__call__`
"""
raise NotImplementedError("_sanitize_parameters not implemented")
@abstractmethod
def preprocess(self, input_: Any, **preprocess_parameters: Dict) -> Dict[str, GenericTensor]:
"""
Preprocess will take the `input_` of a specific pipeline and return a dictionnary of everything necessary for
`_forward` to run properly. It should contain at least one tensor, but might have arbitrary other items.
"""
raise NotImplementedError("preprocess not implemented")
@abstractmethod
def _forward(self, input_tensors: Dict[str, GenericTensor], **forward_parameters: Dict) -> ModelOutput:
"""
_forward will receive the prepared dictionnary from `preprocess` and run it on the model. This method might
involve the GPU or the CPU and should be agnostic to it. Isolating this function is the reason for `preprocess`
and `postprocess` to exist, so that the hot path, this method generally can run as fast as possible.
It is not meant to be called directly, `forward` is preferred. It is basically the same but contains additional
code surrounding `_forward` making sure tensors and models are on the same device, disabling the training part
of the code (leading to faster inference).
"""
raise NotImplementedError("_forward not implemented")
@abstractmethod
def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict) -> Any:
"""
Postprocess will receive the raw outputs of the `_forward` method, generally tensors, and reformat them into
something more friendly. Generally it will output a list or a dict or results (containing just strings and
numbers).
"""
raise NotImplementedError("postprocess not implemented")
def forward(self, model_inputs, **forward_params):
with self.device_placement():
if self.framework == "tf":
model_inputs["training"] = False
model_outputs = self._forward(model_inputs, **forward_params)
elif self.framework == "pt":
with torch.no_grad():
model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
model_outputs = self._forward(model_inputs, **forward_params)
model_outputs = self._ensure_tensor_on_device(model_outputs, device=torch.device("cpu"))
else:
raise ValueError(f"Framework {self.framework} is not supported")
return model_outputs
def get_iterator(self, inputs, num_workers: int, preprocess_params, forward_params, postprocess_params):
if "TOKENIZERS_PARALLELISM" not in os.environ:
logger.info("Disabling tokenizer parallelism, we're using DataLoader multithreading already")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
dataset = PipelineDataset(inputs, self.preprocess, preprocess_params)
dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=1, collate_fn=collate_fn)
model_iterator = PipelineIterator(dataloader, self.forward, forward_params)
final_iterator = PipelineIterator(model_iterator, self.postprocess, postprocess_params)
return final_iterator
def __call__(self, inputs, *args, num_workers=8, **kwargs):
if args:
logger.warning(f"Ignoring args : {args}")
preprocess_params, forward_params, postprocess_params = self._sanitize_parameters(**kwargs)
# Fuse __init__ params and __call__ params without modifying the __init__ ones.
preprocess_params = {**self._preprocess_params, **preprocess_params}
forward_params = {**self._forward_params, **forward_params}
postprocess_params = {**self._postprocess_params, **postprocess_params}
self.call_count += 1
if self.call_count > 10 and self.framework == "pt" and self.device.type == "cuda":
warnings.warn(
"You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset",
UserWarning,
)
if isinstance(inputs, list):
if self.framework == "pt":
final_iterator = self.get_iterator(
inputs, num_workers, preprocess_params, forward_params, postprocess_params
)
outputs = [output for output in final_iterator]
return outputs
else:
return self.run_multi(inputs, preprocess_params, forward_params, postprocess_params)
elif Dataset is not None and isinstance(inputs, Dataset):
return self.get_iterator(inputs, num_workers, preprocess_params, forward_params, postprocess_params)
else:
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
def run_multi(self, inputs, preprocess_params, forward_params, postprocess_params):
return [self.run_single(item, preprocess_params, forward_params, postprocess_params) for item in inputs]
def run_single(self, inputs, preprocess_params, forward_params, postprocess_params):
model_inputs = self.preprocess(inputs, **preprocess_params)
model_outputs = self.forward(model_inputs, **forward_params)
outputs = self.postprocess(model_outputs, **postprocess_params)
return outputs
| 40.594595 | 139 | 0.639574 |
994cbd62fe70dafe86f2c176beb4a43b3dcde109 | 522 | py | Python | cimcb/utils/dict_95ci.py | CIMCB/cimcb | 5d30f80423ed94e1068871b30e465b38d451581a | [
"MIT"
] | 5 | 2020-05-26T23:45:40.000Z | 2022-01-13T00:40:14.000Z | cimcb/utils/dict_95ci.py | CIMCB/cimcb | 5d30f80423ed94e1068871b30e465b38d451581a | [
"MIT"
] | 3 | 2020-10-20T09:03:18.000Z | 2021-11-01T14:22:05.000Z | cimcb/utils/dict_95ci.py | KevinMMendez/cimcb | fe831253b122ed0ff9e33cbd160ef721abee1e38 | [
"MIT"
] | 4 | 2020-10-12T07:17:43.000Z | 2022-03-28T06:28:44.000Z | import numpy as np
def dict_95ci(dict_list):
median_dict = {}
for key in dict_list.keys():
value = dict_list[key]
value_arr = np.array(value)
if np.isnan(value_arr).any() == True:
median_dict[key] = np.nan
else:
append_low = np.percentile(value_arr, 2.5)
append_mid = np.percentile(value_arr, 50)
append_upp = np.percentile(value_arr, 95.7)
median_dict[key] = [append_low, append_upp, append_mid]
return median_dict
| 30.705882 | 67 | 0.603448 |
638f46044f4281332db5ee4faab6d40bb48a4e8e | 4,693 | py | Python | classification/models/vgg_face_dag.py | ilyakava/ACGAN-PyTorch | a2ac29d4d297e091a9c6a80281767f796b390be2 | [
"MIT"
] | null | null | null | classification/models/vgg_face_dag.py | ilyakava/ACGAN-PyTorch | a2ac29d4d297e091a9c6a80281767f796b390be2 | [
"MIT"
] | null | null | null | classification/models/vgg_face_dag.py | ilyakava/ACGAN-PyTorch | a2ac29d4d297e091a9c6a80281767f796b390be2 | [
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class Vgg_face_dag(nn.Module):
def __init__(self):
super(Vgg_face_dag, self).__init__()
self.meta = {'mean': [129.186279296875, 104.76238250732422, 93.59396362304688],
'std': [1, 1, 1],
'imageSize': [224, 224, 3]}
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1))
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(kernel_size=[2, 2], stride=[2, 2], padding=0, dilation=1, ceil_mode=False)
self.fc6 = nn.Linear(in_features=25088, out_features=4096, bias=True)
self.relu6 = nn.ReLU(inplace=True)
self.dropout6 = nn.Dropout(p=0.5)
self.fc7 = nn.Linear(in_features=4096, out_features=4096, bias=True)
self.relu7 = nn.ReLU(inplace=True)
self.dropout7 = nn.Dropout(p=0.5)
self.fc8 = nn.Linear(in_features=4096, out_features=2622, bias=True)
def forward(self, x0):
x1 = self.conv1_1(x0)
x2 = self.relu1_1(x1)
x3 = self.conv1_2(x2)
x4 = self.relu1_2(x3)
x5 = self.pool1(x4)
x6 = self.conv2_1(x5)
x7 = self.relu2_1(x6)
x8 = self.conv2_2(x7)
x9 = self.relu2_2(x8)
x10 = self.pool2(x9)
x11 = self.conv3_1(x10)
x12 = self.relu3_1(x11)
x13 = self.conv3_2(x12)
x14 = self.relu3_2(x13)
x15 = self.conv3_3(x14)
x16 = self.relu3_3(x15)
x17 = self.pool3(x16)
x18 = self.conv4_1(x17)
x19 = self.relu4_1(x18)
x20 = self.conv4_2(x19)
x21 = self.relu4_2(x20)
x22 = self.conv4_3(x21)
x23 = self.relu4_3(x22)
x24 = self.pool4(x23)
x25 = self.conv5_1(x24)
x26 = self.relu5_1(x25)
x27 = self.conv5_2(x26)
x28 = self.relu5_2(x27)
x29 = self.conv5_3(x28)
x30 = self.relu5_3(x29)
x31_preflatten = self.pool5(x30)
x31 = x31_preflatten.view(x31_preflatten.size(0), -1)
x32 = self.fc6(x31)
x33 = self.relu6(x32)
x34 = self.dropout6(x33)
x35 = self.fc7(x34)
x36 = self.relu7(x35)
x37 = self.dropout7(x36)
x38 = self.fc8(x37)
return x38
def vgg_face_dag(weights_path=None, **kwargs):
"""
load imported model instance
Args:
weights_path (str): If set, loads model weights from the given path
"""
model = Vgg_face_dag()
if weights_path:
state_dict = torch.load(weights_path)
model.load_state_dict(state_dict)
return model
| 44.273585 | 108 | 0.591732 |
105c1d6ba3c985271fb082fd2ccf25a149f053f0 | 774 | py | Python | tests/parse/test_yaml_parse.py | northwestwitch/mutacc | 1999bb9d75a78a543d2cec4df601387c89827a85 | [
"MIT"
] | 1 | 2021-12-16T19:29:23.000Z | 2021-12-16T19:29:23.000Z | tests/parse/test_yaml_parse.py | northwestwitch/mutacc | 1999bb9d75a78a543d2cec4df601387c89827a85 | [
"MIT"
] | 59 | 2018-11-27T16:19:28.000Z | 2021-12-01T16:42:31.000Z | tests/parse/test_yaml_parse.py | adrosenbaum/mutacc | 1999bb9d75a78a543d2cec4df601387c89827a85 | [
"MIT"
] | 1 | 2021-12-01T07:15:10.000Z | 2021-12-01T07:15:10.000Z | import pytest
from yaml import YAMLError
from ped_parser.exceptions import PedigreeError
from mutacc.parse.yaml_parse import yaml_parse, YAMLFieldsError
INVALID_YAML = "tests/fixtures/case_invalid_yaml.yaml"
INVALID_KEYS = "tests/fixtures/case_invalid_keys.yaml"
INVALID_KEYS2 = "tests/fixtures/case_invalid_keys2.yaml"
INVALID_PEDIGREE = "tests/fixtures/case_invalid_pedigree.yaml"
def test_yaml_parse():
with pytest.raises(YAMLError) as error:
case = yaml_parse(INVALID_YAML)
with pytest.raises(YAMLFieldsError) as error:
case = yaml_parse(INVALID_KEYS)
with pytest.raises(YAMLFieldsError) as error:
case = yaml_parse(INVALID_KEYS2)
with pytest.raises(PedigreeError) as error:
case = yaml_parse(INVALID_PEDIGREE)
| 26.689655 | 63 | 0.773902 |
004b7acda5c8a403228679a30c7c01e1fee7f13f | 11,523 | py | Python | sdk/python/pulumi_azure_native/apimanagement/api_release.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/api_release.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/api_release.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ApiReleaseArgs', 'ApiRelease']
@pulumi.input_type
class ApiReleaseArgs:
def __init__(__self__, *,
api_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
notes: Optional[pulumi.Input[str]] = None,
release_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ApiRelease resource.
:param pulumi.Input[str] api_id: Identifier of the API the release belongs to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] notes: Release Notes
:param pulumi.Input[str] release_id: Release identifier within an API. Must be unique in the current API Management service instance.
"""
pulumi.set(__self__, "api_id", api_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if release_id is not None:
pulumi.set(__self__, "release_id", release_id)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Input[str]:
"""
Identifier of the API the release belongs to.
"""
return pulumi.get(self, "api_id")
@api_id.setter
def api_id(self, value: pulumi.Input[str]):
pulumi.set(self, "api_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
Release Notes
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter(name="releaseId")
def release_id(self) -> Optional[pulumi.Input[str]]:
"""
Release identifier within an API. Must be unique in the current API Management service instance.
"""
return pulumi.get(self, "release_id")
@release_id.setter
def release_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "release_id", value)
class ApiRelease(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
release_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
ApiRelease details.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: Identifier of the API the release belongs to.
:param pulumi.Input[str] notes: Release Notes
:param pulumi.Input[str] release_id: Release identifier within an API. Must be unique in the current API Management service instance.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApiReleaseArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
ApiRelease details.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param ApiReleaseArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApiReleaseArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
release_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApiReleaseArgs.__new__(ApiReleaseArgs)
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__.__dict__["api_id"] = api_id
__props__.__dict__["notes"] = notes
__props__.__dict__["release_id"] = release_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["created_date_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
__props__.__dict__["updated_date_time"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:ApiRelease"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:ApiRelease"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:ApiRelease")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiRelease, __self__).__init__(
'azure-native:apimanagement:ApiRelease',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiRelease':
"""
Get an existing ApiRelease resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApiReleaseArgs.__new__(ApiReleaseArgs)
__props__.__dict__["api_id"] = None
__props__.__dict__["created_date_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["notes"] = None
__props__.__dict__["type"] = None
__props__.__dict__["updated_date_time"] = None
return ApiRelease(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Output[Optional[str]]:
"""
Identifier of the API the release belongs to.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="createdDateTime")
def created_date_time(self) -> pulumi.Output[str]:
"""
The time the API was released. The date conforms to the following format: yyyy-MM-ddTHH:mm:ssZ as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "created_date_time")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
"""
Release Notes
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedDateTime")
def updated_date_time(self) -> pulumi.Output[str]:
"""
The time the API release was updated.
"""
return pulumi.get(self, "updated_date_time")
| 44.662791 | 1,458 | 0.653389 |
254d3c8b87ee67dabc7d5bb699a60bae95e1835d | 2,435 | py | Python | landlab/components/stream_power/examples/fastscape_sp_driver.py | joeljgeo/landlab | 1d2651c76a8a36a7a132f139638192df1823f8fb | [
"MIT"
] | null | null | null | landlab/components/stream_power/examples/fastscape_sp_driver.py | joeljgeo/landlab | 1d2651c76a8a36a7a132f139638192df1823f8fb | [
"MIT"
] | null | null | null | landlab/components/stream_power/examples/fastscape_sp_driver.py | joeljgeo/landlab | 1d2651c76a8a36a7a132f139638192df1823f8fb | [
"MIT"
] | null | null | null | '''
simple_sp_driver.py
A simple driver implementing Braun-Willett flow routing and then a
fastscape stream power component.
DEJH, 09/15/14
'''
from __future__ import print_function
from landlab.components import FlowAccumulator, StreamPowerEroder, FastscapeEroder
from landlab.plot.imshow import imshow_node_grid
import numpy
from landlab import RasterModelGrid
from landlab import ModelParameterDictionary
import pylab
import time
inputs = ModelParameterDictionary('./drive_sp_params.txt')
nrows = inputs.read_int('nrows')
ncols = inputs.read_int('ncols')
dx = inputs.read_float('dx')
dt = inputs.read_float('dt')
time_to_run = inputs.read_float('run_time')
#nt needs defining
uplift = inputs.read_float('uplift_rate')
init_elev = inputs.read_float('init_elev')
mg = RasterModelGrid(nrows, ncols, dx)
#create the fields in the grid
mg.add_zeros('topographic__elevation', at='node')
z = mg.zeros(at='node') + init_elev
mg['node']['topographic__elevation'] = z + numpy.random.rand(len(z))/1000.
#make some K values in a field to test
mg.at_node['K_values'] = 0.1+numpy.random.rand(nrows*ncols)/10.
print( 'Running ...' )
#instantiate the components:
fr = FlowAccumulator(mg, flow_director='D8')
sp = StreamPowerEroder(mg, './drive_sp_params.txt')
#load the Fastscape module too, to allow direct comparison
fsp = FastscapeEroder(mg, './drive_sp_params.txt')
#perform the loop:
elapsed_time = 0. #total time in simulation
while elapsed_time < time_to_run:
print(elapsed_time)
if elapsed_time+dt>time_to_run:
print("Short step!")
dt = time_to_run - elapsed_time
mg = fr.run_one_step()
#print 'Area: ', numpy.max(mg.at_node['drainage_area'])
#mg = fsp.erode(mg)
mg = fsp.erode(mg, K_if_used='K_values')
#mg,_,_ = sp.erode(mg, dt, node_drainage_areas='drainage_area', slopes_at_nodes='topographic__steepest_slope')
#add uplift
mg.at_node['topographic__elevation'][mg.core_nodes] += uplift*dt
elapsed_time += dt
#Finalize and plot
elev = mg['node']['topographic__elevation']
elev_r = mg.node_vector_to_raster(elev)
# Clear previous plots
pylab.figure(1)
pylab.close()
# Plot topography
pylab.figure(1)
im = imshow_node_grid(mg, 'topographic__elevation') # display a colored image
print(elev_r)
pylab.figure(2)
im = pylab.plot(dx*numpy.arange(nrows), elev_r[:,int(ncols//2)]) # display a colored image
pylab.title('Vertical cross section')
pylab.show()
print('Done.')
| 29.337349 | 114 | 0.747433 |
fb9c625aa72cfbf496254168c81fcc2655c3aa6a | 401 | py | Python | gestionStock/asgi.py | skyrred/Gestion | c38c4d1fa229f5b0e0ef2667ff98864a28dc3241 | [
"Apache-2.0"
] | null | null | null | gestionStock/asgi.py | skyrred/Gestion | c38c4d1fa229f5b0e0ef2667ff98864a28dc3241 | [
"Apache-2.0"
] | null | null | null | gestionStock/asgi.py | skyrred/Gestion | c38c4d1fa229f5b0e0ef2667ff98864a28dc3241 | [
"Apache-2.0"
] | null | null | null | """
ASGI config for gestionStock project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gestionStock.settings')
application = get_asgi_application()
| 23.588235 | 78 | 0.790524 |
e9c8a9c210f7c585c12eb9d810416986adae0b36 | 4,741 | py | Python | vaizlabs/settings/base.py | vaizguy/vaizlabs-heroku | c4b55c7aea06e840b3b46570856fb1d1ed21f28c | [
"CC-BY-4.0"
] | null | null | null | vaizlabs/settings/base.py | vaizguy/vaizlabs-heroku | c4b55c7aea06e840b3b46570856fb1d1ed21f28c | [
"CC-BY-4.0"
] | 6 | 2020-02-12T00:00:36.000Z | 2022-03-11T23:17:40.000Z | vaizlabs/settings/base.py | vaizguy/vaizlabs-heroku | c4b55c7aea06e840b3b46570856fb1d1ed21f28c | [
"CC-BY-4.0"
] | null | null | null | """
Django settings for vaizlabs project on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))[:-9]
# Add project path to system
import sys
sys.path.insert(0, os.path.join(PROJECT_ROOT, '.'))
sys.path.insert(0, os.path.join(PROJECT_ROOT, '..'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "+$x#^mv4qdjy+16!!5o1r)ilb4cm!((t&gxkchs-k*morleza*"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
# Django imagekit
'imagekit',
# uploadcare
'pyuploadcare.dj',
# Vaizlabs apps
'vaizlabs.apps.home',
'vaizlabs.apps.blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vaizlabs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'apps/home/templates'),
os.path.join(BASE_DIR, 'apps/blog/templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'vaizlabs.context_processors.home_static',
],
'debug': DEBUG,
},
},
]
WSGI_APPLICATION = 'vaizlabs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Media files path
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'mediafiles')
# Uploadcare public key and private key
UPLOADCARE = {
'pub_key' : os.environ['UPLOADCARE_API_PUB_KEY'],
'secret' : os.environ['UPLOADCARE_API_SECRET'],
}
| 30.197452 | 91 | 0.69479 |
6cc44711bc0c4aa044c939b5c3a697ac39d07f97 | 3,540 | py | Python | examples/benchmarks/bragg/bragg_empy.py | joamatab/emepy | 6b3119a4355ff70d7cfd3fdf18224920c22be937 | [
"MIT"
] | null | null | null | examples/benchmarks/bragg/bragg_empy.py | joamatab/emepy | 6b3119a4355ff70d7cfd3fdf18224920c22be937 | [
"MIT"
] | null | null | null | examples/benchmarks/bragg/bragg_empy.py | joamatab/emepy | 6b3119a4355ff70d7cfd3fdf18224920c22be937 | [
"MIT"
] | null | null | null | from emepy.fd import MSEMpy # Requires Lumerical API
from emepy.eme import Layer, EME
import numpy as np
from matplotlib import pyplot as plt
import argparse
def bragg_empy(args):
num_periods = args.num_periods # Number of Periods for Bragg Grating
length = args.length # Length of each segment of BG, Period = Length * 2
num_wavelengths = args.num_wavelengths # Number of wavelengths to sweep
wl_lower = args.wl_lower # Lower wavelength bound
wl_upper = args.wl_upper # Upper wavelength bound
num_modes = args.num_modes # Number of Modes
mesh = args.mesh # Number of mesh points
width1 = args.width1 # Width of first core block
width2 = args.width2 # Width of second core block
thickness = args.thickness # Thicnkess of the core
modesolver = MSEMpy # Which modesolver to use
t = [] # Array that holds the transmission coefficients for different wavelengths
eme = EME(num_periods=num_periods)
for wavelength in np.linspace(wl_lower, wl_upper, num_wavelengths):
eme.reset()
mode_solver1 = modesolver(
wavelength * 1e-6, width1 * 1e-6, thickness * 1e-6, mesh=mesh, num_modes=num_modes
) # First half of bragg grating
mode_solver2 = modesolver(
wavelength * 1e-6, width2 * 1e-6, thickness * 1e-6, mesh=mesh, num_modes=num_modes
) # Second half of bragg grating
eme.add_layer(Layer(mode_solver1, num_modes, wavelength * 1e-6, length * 1e-6)) # First half of bragg grating
eme.add_layer(Layer(mode_solver2, num_modes, wavelength * 1e-6, length * 1e-6)) # Second half of bragg grating
eme.propagate() # propagate at given wavelength
t.append(np.abs((eme.s_parameters()))[0, 0, num_modes] ** 2) # Grab the transmission coefficient
return t
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-num_periods", type=int, default="50", help="Number of Periods for Bragg Grating (default: 50)"
)
parser.add_argument(
"-length",
type=float,
default="0.159",
help="Length of each segment of BG, Period = Length * 2 (microns) (default: 0.159)",
)
parser.add_argument("-num_wavelengths", type=int, default="25", help="Number of wavelengths to sweep (default: 30)")
parser.add_argument("-wl_lower", type=float, default="1.5", help="Lower wavelength bound (microns) (default: 1.5)")
parser.add_argument("-wl_upper", type=float, default="1.6", help="Upper wavelength bound (microns) (default: 1.6)")
parser.add_argument("-num_modes", type=int, default="1", help="Number of Modes (default: 1)")
parser.add_argument("-mesh", type=int, default="256", help="Number of mesh points (default: 128)")
parser.add_argument(
"-width1", type=float, default="0.46", help="Width of first core block (microns) (default: 0.46)"
)
parser.add_argument(
"-width2", type=float, default="0.54", help="Width of second core block (microns) (default: 0.54)"
)
parser.add_argument(
"-thickness", type=float, default="0.22", help="Thickness of the core (microns) (default: 0.22)"
)
args = parser.parse_args()
t = bragg_empy(args)
# Plot the results
plt.plot(np.linspace(args.wl_lower, args.wl_upper, args.num_wavelengths), t)
plt.title("Grating freq sweep empy nperiods=" + str(args.num_periods))
plt.xlabel("Wavelength (microns)")
plt.ylabel("t")
plt.grid()
plt.show()
if __name__ == "__main__":
main()
| 39.333333 | 120 | 0.669492 |
9ce44a639ef7100739e74369e48170c2779383e7 | 4,694 | py | Python | iam_syncr/amazon/buckets.py | realestate-com-au/iam_syncr | 874d9ad39d54775116f5ee5237983e149c664067 | [
"MIT"
] | 2 | 2015-04-01T01:03:59.000Z | 2015-07-17T07:31:15.000Z | iam_syncr/amazon/buckets.py | realestate-com-au/iam_syncr | 874d9ad39d54775116f5ee5237983e149c664067 | [
"MIT"
] | null | null | null | iam_syncr/amazon/buckets.py | realestate-com-au/iam_syncr | 874d9ad39d54775116f5ee5237983e149c664067 | [
"MIT"
] | null | null | null | from iam_syncr.amazon.documents import AmazonDocuments
from iam_syncr.amazon.common import AmazonMixin
from iam_syncr.errors import BadPolicy
from boto.s3.tagging import TagSet, Tags
from itertools import chain
import logging
import boto
log = logging.getLogger("iam_syncr.amazon.buckets")
class AmazonBuckets(AmazonMixin, object):
def __init__(self, amazon):
self.amazon = amazon
self.documents = AmazonDocuments()
self.connection = amazon.s3_connection
def bucket_info(self, name):
"""Return what amazon knows about this bucket"""
try:
return self.connection.get_bucket(name)
except boto.exception.S3ResponseError as error:
if error.status == 404:
return False
raise
def current_policy(self, bucket):
"""Return the current policy for this bucket"""
try:
return bucket.get_policy().decode('utf-8')
except boto.exception.S3ResponseError as error:
if error.status == 404:
return "{}"
raise
def current_tags(self, bucket):
"""Return the tags associated with this bucket"""
try:
return dict(chain.from_iterable([(tag.key, tag.value) for tag in tags] for tags in bucket.get_tags()))
except boto.exception.S3ResponseError as error:
if error.status == 404:
return {}
raise
def has_bucket(self, name):
"""Return whether amazon has info about this role"""
return bool(self.bucket_info(name))
def create_bucket(self, name, location, permission_document=None, tags=None):
"""Create a role"""
with self.catch_boto_400("Couldn't create bucket", name=name):
for _ in self.change("+", "bucket[{0}] ".format(location), name=name):
self.connection.create_bucket(name, location=location)
# And add our permissions
if permission_document:
with self.catch_boto_400("Couldn't add policy", "Bucket {0} - Permission document".format(name), permission_document, bucket=name):
for _ in self.change("+", "bucket_policy", bucket=name, document=permission_document):
self.bucket_info(name).set_policy(permission_document)
def modify_bucket(self, name, location, permission_document, tags):
"""Modify a bucket"""
log.info("Inspecting bucket\tname=%s", name)
bucket = self.bucket_info(name)
if not bucket:
return
current_location = bucket.get_location()
if current_location != location:
raise BadPolicy("The location of the bucket is wrong. You need to delete and recreate the bucket to have it in your specified location", current=current_location, wanted=location)
current_policy = self.current_policy(bucket)
changes = list(self.documents.compare_two_documents(current_policy, permission_document))
if changes:
with self.catch_boto_400("Couldn't modify policy", "Bucket {0} policy".format(name), permission_document, bucket=name):
for _ in self.change("M", "bucket_policy", bucket=name, changes=changes):
bucket.set_policy(permission_document)
self.modify_bucket_tags(name, bucket, tags)
def modify_bucket_tags(self, name, bucket, tags):
"""Modify the tags on a bucket"""
changes = {}
new_tags = TagSet()
current_tags = self.current_tags(bucket)
for tag_name, tag_val in tags.items():
if tag_name in current_tags:
if current_tags[tag_name] != tag_val:
changes[tag_name] = ("modify", tag_name, current_tags[tag_name], tag_val)
elif tag_name not in current_tags:
changes[tag_name] = ("create", tag_name, None, tag_val)
new_tags.add_tag(tag_name, tag_val)
for tag_name in current_tags:
if tag_name not in tags:
changes[tag_name] = ("delete", tag_name, current_tags[tag_name], None)
if changes:
if not new_tags:
for _ in self.change("D", "bucket_tags", bucket=name, changes=["Delete all tags"]):
bucket.delete_tags()
else:
one_letter = "M" if any(typ in ("modify", "delete") for typ, _, _, _ in changes.values()) else "C"
for _ in self.change(one_letter, "bucket_tag", bucket=name, changes=["{0} {1} from {2} to {3}".format(*change) for change in changes.values()]):
t = Tags()
t.add_tag_set(new_tags)
bucket.set_tags(t)
| 42.288288 | 191 | 0.623562 |
11bfef780b844f8910d39d0452df8ed47b73c658 | 1,008 | py | Python | project/settings/base/secutiry.py | vo0doO/pydj-persweb | efcd6b7090230f7c0b9ec056008f6d1d9e876ed9 | [
"CC0-1.0"
] | null | null | null | project/settings/base/secutiry.py | vo0doO/pydj-persweb | efcd6b7090230f7c0b9ec056008f6d1d9e876ed9 | [
"CC0-1.0"
] | 4 | 2020-05-06T17:22:00.000Z | 2021-12-13T20:43:30.000Z | project/settings/base/secutiry.py | vo0doO/pydj-persweb | efcd6b7090230f7c0b9ec056008f6d1d9e876ed9 | [
"CC0-1.0"
] | null | null | null | # ПРЕДУПРЕЖДЕНИЕ О БЕЗОПАСНОСТИ: держите секретный ключ, используемый в производстве, в секрете!
# SECRET_KEY предоставляется через переменную окружения в OpenShift
# Безопасное значение, когда DJANGO_SECRET_KEY не может быть установлен
import os
SECRET_KEY = os.getenv(
'DJANGO_SECRET_KEY',
# безопасное значение, используемое для разработки, когда DJANGO_SECRET_KEY не может быть установлен
'9e4@&tw46$l31)zrqe3wi+-slqm(ruvz&se0^%9#6(_w3ui!c0'
)
ALLOWED_HOSTS = ['*']
# Проверка пароля
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
| 32.516129 | 104 | 0.735119 |
38261777503fc5e83b01917cd5c134255462eb9e | 6,078 | py | Python | src/sparsetorch/plotter.py | timotheehornek/sparsetorch | 212c4e38dc352af15eea9e72f011c974fd43eb53 | [
"MIT"
] | null | null | null | src/sparsetorch/plotter.py | timotheehornek/sparsetorch | 212c4e38dc352af15eea9e72f011c974fd43eb53 | [
"MIT"
] | null | null | null | src/sparsetorch/plotter.py | timotheehornek/sparsetorch | 212c4e38dc352af15eea9e72f011c974fd43eb53 | [
"MIT"
] | null | null | null | """This sub-module contains basic plotting capability for 1D and 2D functions and is mainly intended for demonstration purposes.
The module is used in the examples provided with the package."""
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import torch
from sparsetorch.utils import get_equidist_coord
def plot(model, f_target_, name='plot', a=0, b=1, steps=100):
"""Plots target function along with approximation.
Parameters
----------
model : Model
function approximation model
f_target_ : function
target function, takes scalar
name : str, optional
name of plot, by default 'plot'
a : int, optional
left border of evaluation boundary, by default 0
b : int, optional
right border of evaluation boundary, by default 1
steps : int, optional
steps for discretisation of visualisation, by default 100
"""
f_target = np.vectorize(f_target_)
x = torch.linspace(a, b, steps)
model_approx = torch.flatten(model(x))
fig = plt.figure()
plt.plot(x.numpy(), f_target(x), '-')
plt.plot(x.numpy(), model_approx.detach().numpy(), 'r--')
plt.title(name)
# plt.savefig(name+'.png')
plt.show()
plt.close()
def plot_3D(f,
name='plot',
x_min=0,
x_max=1,
y_min=0,
y_max=1,
steps=100,
beautify=False,
save=False):
"""Plots function.
Parameters
----------
f : function
function to plot
name : str, optional
name of plot, by default 'plot'
x_min : int, optional
left border of evaluation boundary in x direction, by default 0
x_max : int, optional
right border of evaluation boundary in x direction, by default 1
y_min : int, optional
left border of evaluation boundary in y direction, by default 0
y_max : int, optional
right border of evaluation boundary in y direction, by default 1
steps : int, optional
steps in each dimension for discretisation of evaluation, by default 100
beautify : bool, optional
more beautiful plot if True, takes longer to render, by default False
save : bool, optional
save plot if True, by default False
"""
# create evaluation grid
x = torch.linspace(x_min, x_max, steps)
y = torch.linspace(y_min, y_max, steps)
X, Y = torch.meshgrid(x, y)
grid_x = torch.reshape(X, (steps**2, ))
grid_y = torch.reshape(Y, (steps**2, ))
# initialize input for function and model
input = torch.empty(2, steps**2)
input[0] = grid_x
input[1] = grid_y
# evaluate function
Z = f(input)
Z = torch.reshape(Z, (steps, steps))
# detach tensors for plotting
Z = torch.reshape(Z, (steps, steps)).detach()
# plot real function evaluation
fig = plt.figure(1)
ax = fig.add_subplot(projection='3d')
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
if not beautify:
# standard plot
ax.plot_wireframe(X, Y, Z, color='blue', rcount=10, ccount=10)
else:
# alternative more beautiful plot (takes longer to render)
ax.view_init(41, -26)
ax.plot_surface(X.numpy(), Y.numpy(), Z.numpy(), cmap=cm.coolwarm,
linewidth=0, antialiased=True, rstride=1, cstride=1)
if save:
# save plot
plt.savefig(name+'.png', dpi=300, pad_inches=0.0, bbox_inches='tight')
ax.set_title(name)
plt.show()
plt.close()
def plot_3D_all(model,
f_dD,
name='plot',
x_min=0,
x_max=1,
y_min=0,
y_max=1,
steps=100):
"""Plots results of model with 2D basis functions.
Parameters
----------
model : Model
function approximation model
f_dD : function
target function, takes scalar
name : str, optional
name of plot, by default 'plot'
x_min : int, optional
left border of evaluation boundary in x direction, by default 0
x_max : int, optional
right border of evaluation boundary in x direction, by default 1
y_min : int, optional
left border of evaluation boundary in y direction, by default 0
y_max : int, optional
right border of evaluation boundary in y direction, by default 1
steps : int, optional
steps in each dimension for discretisation of visualisation, by default 100
"""
title = name+': Real Function Evaluation'
plot_3D(f_dD, title, x_min=x_min, x_max=x_max,
y_min=y_min, y_max=y_max, steps=steps)
title = name+': Model Evaluation'
plot_3D(model, title, x_min=x_min, x_max=x_max,
y_min=y_min, y_max=y_max, steps=steps)
title = name+': Absolute Error'
plot_3D(lambda x: torch.abs(f_dD(x)-model(x)), title, x_min=x_min,
x_max=x_max, y_min=y_min, y_max=y_max, steps=steps)
# compute derivatives
# generate points
points = get_equidist_coord(torch.tensor([x_min, y_min]),
torch.tensor([x_max, y_max]),
steps*torch.ones(2))
# enable gradient computation
points.requires_grad = True
# evaluate model
approx = model(points)
# compute derivatives
approx.backward(torch.ones(steps**2))
# detach tensor for further use like plotting
derivative = points.grad.detach()
title = name+': Model x-Derivative'
plot_3D(lambda _: derivative[0], title, x_min=x_min,
x_max=x_max, y_min=y_min, y_max=y_max, steps=steps)
# note that the lambda function is a workaround to plot the derivative
# step number has to match step number used for derivatives
title = name+': Model y-Derivative'
plot_3D(lambda _: derivative[1], title, x_min=x_min,
x_max=x_max, y_min=y_min, y_max=y_max, steps=steps)
# note that the lambda function is a workaround to plot the derivative
# step number has to match step number used for derivatives | 33.213115 | 128 | 0.627015 |
2b053fceac08b0e1bf525b0309174cccf6876b8c | 879 | py | Python | myproject/users/tests/test_drf_views.py | tasmiaorni/cookiecutterOrni | fcf1991858c711ccb68a07a2a17a6b1c3519c46f | [
"MIT"
] | null | null | null | myproject/users/tests/test_drf_views.py | tasmiaorni/cookiecutterOrni | fcf1991858c711ccb68a07a2a17a6b1c3519c46f | [
"MIT"
] | null | null | null | myproject/users/tests/test_drf_views.py | tasmiaorni/cookiecutterOrni | fcf1991858c711ccb68a07a2a17a6b1c3519c46f | [
"MIT"
] | null | null | null | import pytest
from django.test import RequestFactory
from myproject.users.api.views import UserViewSet
from myproject.users.models import User
pytestmark = pytest.mark.django_db
class TestUserViewSet:
def test_get_queryset(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert user in view.get_queryset()
def test_me(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
response = view.me(request)
assert response.data == {
"username": user.username,
"email": user.email,
"name": user.name,
"url": f"http://testserver/api/users/{user.username}/",
}
| 25.114286 | 67 | 0.617747 |
4df7e70e36bda93eaf4e5398b274fd33da924683 | 15,785 | py | Python | src/sage/algebras/group_algebra.py | drvinceknight/sage | 00199fb220aa173d8585b9e90654dafd3247d82d | [
"BSL-1.0"
] | 2 | 2015-08-11T05:05:47.000Z | 2019-05-15T17:27:25.000Z | src/sage/algebras/group_algebra.py | kaushik94/sage | 00199fb220aa173d8585b9e90654dafd3247d82d | [
"BSL-1.0"
] | null | null | null | src/sage/algebras/group_algebra.py | kaushik94/sage | 00199fb220aa173d8585b9e90654dafd3247d82d | [
"BSL-1.0"
] | 1 | 2020-07-24T12:04:03.000Z | 2020-07-24T12:04:03.000Z | r"""
Class for group algebras of arbitrary groups (over a general commutative base
ring).
NOTE:
-- It seems to be impossible to make this fit nicely with Sage's coercion
model. The problem is that (for example) if G is the additive group (ZZ,+),
and R = ZZ[G] is its group ring, then the integer 2 can be coerced into R
in two ways -- via G, or via the base ring -- and *the answers are
different*. In practice we get around this by preventing elements of G
coercing automatically into ZZ[G], which is a shame, but makes more sense
than preventing elements of the base ring doing so.
AUTHOR:
-- David Loeffler (2008-08-24): initial version
"""
#*****************************************************************************
# Copyright (C) 2008 William Stein <wstein@gmail.com>
# 2008 David Loeffler <d.loeffler.01@cantab.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.categories.all import GroupAlgebras
from sage.structure.parent_gens import ParentWithGens
from sage.algebras.algebra import Algebra
from sage.algebras.algebra_element import AlgebraElement
from sage.rings.all import IntegerRing
from sage.groups.group import Group, is_Group
from sage.structure.formal_sum import FormalSums, FormalSum
from sage.sets.set import Set
from sage.misc.superseded import deprecation
deprecation(6670, "The module group_algebra is deprecated and will be removed in a future version of Sage. Use group_algebra_new instead.")
class GroupAlgebra(Algebra):
def __init__(self, group, base_ring = IntegerRing()):
r""" Create the given group algebra.
INPUT:
-- (Group) group: a generic group.
-- (Ring) base_ring: a commutative ring.
OUTPUT:
-- a GroupAlgebra instance.
EXAMPLES::
sage: from sage.algebras.group_algebra import GroupAlgebra
doctest:...: DeprecationWarning:...
sage: GroupAlgebra(GL(3, GF(7)))
Group algebra of group "General Linear Group of degree 3 over Finite
Field of size 7" over base ring Integer Ring
sage: GroupAlgebra(1)
Traceback (most recent call last):
...
TypeError: "1" is not a group
sage: GroupAlgebra(SU(2, GF(4, 'a')), IntegerModRing(12)).category()
Category of group algebras over Ring of integers modulo 12
"""
if not base_ring.is_commutative():
raise NotImplementedError("Base ring must be commutative")
if not is_Group(group):
raise TypeError('"%s" is not a group' % group)
ParentWithGens.__init__(self, base_ring, category = GroupAlgebras(base_ring))
self._formal_sum_module = FormalSums(base_ring)
self._group = group
def group(self):
r""" Return the group of this group algebra.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(GL(3, GF(11))).group()
General Linear Group of degree 3 over Finite Field of size 11
sage: GroupAlgebra(SymmetricGroup(10)).group()
Symmetric group of order 10! as a permutation group
"""
return self._group
def is_commutative(self):
r""" Return True if self is a commutative ring. True if and only if
self.group() is abelian.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(SymmetricGroup(2)).is_commutative()
True
sage: GroupAlgebra(SymmetricGroup(3)).is_commutative()
False
"""
return self.group().is_abelian()
def is_field(self, proof = True):
r""" Return True if self is a field. This is always false unless
self.group() is trivial and self.base_ring() is a field.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(SymmetricGroup(2)).is_field()
False
sage: GroupAlgebra(SymmetricGroup(1)).is_field()
False
sage: GroupAlgebra(SymmetricGroup(1), QQ).is_field()
True
"""
if not self.base_ring().is_field(proof):
return False
return (self.group().order() == 1)
def is_finite(self):
r""" Return True if self is finite, which is true if and only if
self.group() and self.base_ring() are both finite.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(SymmetricGroup(2), IntegerModRing(10)).is_finite()
True
sage: GroupAlgebra(SymmetricGroup(2)).is_finite()
False
sage: GroupAlgebra(AbelianGroup(1), IntegerModRing(10)).is_finite()
False
"""
return (self.base_ring().is_finite() and self.group().is_finite())
def is_exact(self):
r""" Return True if elements of self have exact representations,
which is true of self if and only if it is true of self.group()
and self.base_ring().
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(GL(3, GF(7))).is_exact()
True
sage: GroupAlgebra(GL(3, GF(7)), RR).is_exact()
False
sage: GroupAlgebra(GL(3, pAdicRing(7))).is_exact() # not implemented correctly (not my fault)!
False
"""
return self.group().is_exact() and self.base_ring().is_exact()
def is_integral_domain(self, proof = True):
r""" Return True if self is an integral domain.
This is false unless
self.base_ring() is an integral domain, and even then it is false unless
self.group() has no nontrivial elements of finite order. I don't know if
this condition suffices, but it obviously does if the group is abelian and
finitely generated.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(SymmetricGroup(2)).is_integral_domain()
False
sage: GroupAlgebra(SymmetricGroup(1)).is_integral_domain()
True
sage: GroupAlgebra(SymmetricGroup(1), IntegerModRing(4)).is_integral_domain()
False
sage: GroupAlgebra(AbelianGroup(1)).is_integral_domain()
True
sage: GroupAlgebra(AbelianGroup(2, [0,2])).is_integral_domain()
False
sage: GroupAlgebra(GL(2, ZZ)).is_integral_domain() # not implemented
False
"""
ans = False
try:
if self.base_ring().is_integral_domain():
if self.group().is_finite():
if self.group().order() > 1:
ans = False
else:
ans = True
else:
if self.group().is_abelian():
invs = self.group().invariants()
if Set(invs) != Set([0]):
ans = False
else:
ans = True
else:
raise NotImplementedError
else:
ans = False
except AttributeError:
if proof:
raise NotImplementedError("cannot determine whether self is an integral domain")
except NotImplementedError:
if proof:
raise NotImplementedError("cannot determine whether self is an integral domain")
return ans
# I haven't written is_noetherian(), because I don't know when group
# algebras are noetherian, and I haven't written is_prime_field(), because
# I don't know if that means "is canonically isomorphic to a prime field"
# or "is identical to a prime field".
def _coerce_impl(self, x):
return self(self.base_ring().coerce(x))
def _an_element_impl(self):
"""
Return an element of self.
EXAMPLE:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(SU(2, 13), QQ).an_element() # random; hideous formatting!
-1/95*[ 9 2*a + 12]
[ 0 3] - 4*[ 9 9*a + 2]
[3*a + 5 1]
"""
try:
return self(self._formal_sum_module([
(self.base_ring().random_element(), self.group().random_element()),
(self.base_ring().random_element(), self.group().random_element()),
]))
except Exception: # base ring or group might not implement .random_element()
return self(self._formal_sum_module([ (self.base_ring().an_element(), self.group().an_element()) ]))
def __call__(self, x, check=True):
r"""
Create an element of this group algebra.
INPUT:
-- x: either a FormalSum element consisting of elements of
self.group(), an element of self.base_ring(), or an element
of self.group().
-- check (boolean): whether or not to check that the given elements
really do lie in self.group(). Chiefly provided to speed up
arithmetic operations with elements that have already been checked
to lie in the group.
OUTPUT:
-- a GroupAlgebraElement instance whose parent is self.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: G = AbelianGroup(1)
sage: f = G.gen()
sage: ZG = GroupAlgebra(G)
sage: ZG(f)
f
sage: ZG(1) == ZG(G(1))
True
sage: ZG(FormalSum([(1,f), (2, f**2)]))
f + 2*f^2
sage: G = GL(2,7)
sage: OG = GroupAlgebra(G, ZZ[sqrt(5)])
sage: OG(2)
2*[1 0]
[0 1]
sage: OG(G(2)) # conversion is not the obvious one
[2 0]
[0 2]
sage: OG(FormalSum([ (1, G(2)), (2, RR(0.77)) ]) )
Traceback (most recent call last):
...
TypeError: 0.770000000000000 is not an element of group General Linear Group of degree 2 over Finite Field of size 7
Ordering of elements in output unpredictable as sort order of such wildly
dissimilar elements is subject to change between platforms and versions
(see trac ticket \#4373).
sage: OG(FormalSum([ (1, G(2)), (2, RR(0.77)) ]), check=False) # random
[2 0]
[0 2] + 2*0.770000000000000
sage: OG(OG.base_ring().gens()[1])
sqrt5*[1 0]
[0 1]
"""
return GroupAlgebraElement(self, x, check)
def __eq__(self, other):
r""" Test for equality.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(AbelianGroup(1)) == GroupAlgebra(AbelianGroup(1))
True
sage: GroupAlgebra(AbelianGroup(1), QQ) == GroupAlgebra(AbelianGroup(1), ZZ)
False
sage: GroupAlgebra(AbelianGroup(2)) == GroupAlgebra(AbelianGroup(1))
False
"""
if not isinstance(other, GroupAlgebra):
return False
else:
return self.base_ring() == other.base_ring() and self.group() == other.group()
def _repr_(self):
r""" String representation of self. See GroupAlgebra.__init__ for a
doctest."""
return "Group algebra of group \"%s\" over base ring %s" % (self.group(), self.base_ring())
def element_class(self):
r"""
The class of elements of self, which is GroupAlgebraElement.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: GroupAlgebra(SU(2, GF(4,'a'))).element_class()
<class 'sage.algebras.group_algebra.GroupAlgebraElement'>
"""
return GroupAlgebraElement
class GroupAlgebraElement(AlgebraElement):
def __init__(self, parent, x, check):
r""" Create an element of the parent group algebra. Not intended to be
called by the user; see GroupAlgebra.__call__ for examples and
doctests."""
AlgebraElement.__init__(self, parent)
if not hasattr(x, 'parent'):
x = IntegerRing()(x) # occasionally coercion framework tries to pass a Python int
if isinstance(x, FormalSum):
if check:
for c,d in x._data:
if d.parent() != self.parent().group():
raise TypeError("%s is not an element of group %s" % (d, self.parent().group()))
self._fs = x
else:
self._fs = x
elif self.base_ring().has_coerce_map_from(x.parent()):
self._fs = self.parent()._formal_sum_module([ (x, self.parent().group()(1)) ])
elif self.parent().group().has_coerce_map_from(x.parent()):
self._fs = self.parent()._formal_sum_module([ (1, self.parent().group()(x)) ])
else:
raise TypeError("Don't know how to create an element of %s from %s" % (self.parent(), x))
def _repr_(self):
return self._fs._repr_()
def _add_(self, other):
r"""
Add self to other.
EXAMPLE:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: G = GL(3, GF(7))
sage: ZG = GroupAlgebra(G)
sage: g1 = G([0,0,2,2,5,0,6,6,2])
sage: s = ZG(g1)
sage: s + s
2*[0 0 2]
[2 5 0]
[6 6 2]
"""
fs_sum = self._fs + other._fs
return self.parent()(fs_sum, check=False)
def _mul_(self, right):
r""" Calculate self*right, where both self and right are GroupAlgebraElements.
EXAMPLE:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: G = GL(3, GF(7))
sage: ZG = GroupAlgebra(G)
sage: a, b = G.random_element(), G.random_element()
sage: za, zb = ZG(a), ZG(b)
sage: za*ZG(2) # random
2*[4,5,0]
[0,5,1]
[2,5,1]
sage: za*2 == za*ZG(2)
True
sage: (ZG(1) + za)*(ZG(2) + zb) == ZG(FormalSum([ (2,G(1)), (2,a), (1, b), (1, a*b)]))
True
sage: za*za == za^2
True
"""
d1 = self._fs._data
d2 = right._fs._data
new = []
for (a1, g1) in d1:
for a2,g2 in d2:
if self.parent().group().is_multiplicative():
new.append( (a1*a2, g1*g2) )
else:
new.append( (a1*a2, g1 + g2) )
return self.parent()( self.parent()._formal_sum_module(new), check=False)
def __eq__(self, other):
r""" Test if self is equal to other.
EXAMPLES:
sage: from sage.algebras.group_algebra import GroupAlgebra
sage: G = AbelianGroup(1,[4])
sage: a = GroupAlgebra(G)(1)
sage: b = GroupAlgebra(G)(2)
sage: a + a == b
True
sage: a == b
False
sage: a == GroupAlgebra(AbelianGroup(1, [5]))(1)
False
"""
if isinstance(other, GroupAlgebraElement) and self.parent() == other.parent():
return self._fs == other._fs
else:
return False
| 38.406326 | 139 | 0.562813 |
f4d715feccbc3e58cc6ee2ad6e71708d49e6f5b0 | 4,325 | py | Python | openshift/test/test_v1_egress_network_policy_peer.py | TristanCacqueray/openshift-restclient-python | 7758cde7a8094acb279904f15c29e5fe3e9f7d33 | [
"Apache-2.0"
] | null | null | null | openshift/test/test_v1_egress_network_policy_peer.py | TristanCacqueray/openshift-restclient-python | 7758cde7a8094acb279904f15c29e5fe3e9f7d33 | [
"Apache-2.0"
] | null | null | null | openshift/test/test_v1_egress_network_policy_peer.py | TristanCacqueray/openshift-restclient-python | 7758cde7a8094acb279904f15c29e5fe3e9f7d33 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_egress_network_policy_peer import V1EgressNetworkPolicyPeer
class TestV1EgressNetworkPolicyPeer(unittest.TestCase):
""" V1EgressNetworkPolicyPeer unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EgressNetworkPolicyPeer(self):
"""
Test V1EgressNetworkPolicyPeer
"""
# FIXME: construct object with mandatory attributes with example values
#model = openshift.client.models.v1_egress_network_policy_peer.V1EgressNetworkPolicyPeer()
pass
if __name__ == '__main__':
unittest.main()
| 96.111111 | 3,375 | 0.790751 |
31a52f7db32d826940e929bbb88d69651aa9ed19 | 330 | py | Python | rover_19_fake_odometry/scripts/odometry_wheel.py | onurnurtaan/rover_19 | dbfd092e58f9d3ba8eec82c68a53382caa993a3b | [
"BSD-2-Clause"
] | 4 | 2019-02-15T12:54:17.000Z | 2021-03-07T14:30:47.000Z | rover_19_fake_odometry/scripts/odometry_wheel.py | onurnurtaan/rover_19 | dbfd092e58f9d3ba8eec82c68a53382caa993a3b | [
"BSD-2-Clause"
] | 1 | 2019-01-25T11:43:17.000Z | 2019-01-25T11:43:17.000Z | rover_19_fake_odometry/scripts/odometry_wheel.py | onurnurtaan/rover_19 | dbfd092e58f9d3ba8eec82c68a53382caa993a3b | [
"BSD-2-Clause"
] | 2 | 2019-01-22T15:06:43.000Z | 2019-01-25T11:38:08.000Z | #!/usr/bin/env python
import rospy
from nav_msgs import Odometry
if __name__ == '__main__':
rospy.init_node('odometry_wheel_publisher')
pub = rospy.Publisher("/number", Odometry, queue_size=10)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
msg = Odometry()
msg.data = 2
pub.publish(msg)
rate.sleep()
| 13.2 | 58 | 0.69697 |
f82593a0e4d78d4438d933c3eabfb0a402ac6bf5 | 2,178 | py | Python | IMLearn/model_selection/cross_validate.py | nogazoh/IML.HUJI | 034af86b441f57354fb279aea95099e3a4f4aad2 | [
"MIT"
] | null | null | null | IMLearn/model_selection/cross_validate.py | nogazoh/IML.HUJI | 034af86b441f57354fb279aea95099e3a4f4aad2 | [
"MIT"
] | null | null | null | IMLearn/model_selection/cross_validate.py | nogazoh/IML.HUJI | 034af86b441f57354fb279aea95099e3a4f4aad2 | [
"MIT"
] | null | null | null | from __future__ import annotations
from copy import deepcopy
from typing import Tuple, Callable
import numpy as np
from IMLearn import BaseEstimator
def cross_validate(estimator: BaseEstimator, X: np.ndarray, y: np.ndarray,
scoring: Callable[[np.ndarray, np.ndarray, ...], float], cv: int = 5) -> Tuple[float, float]:
"""
Evaluate metric by cross-validation for given estimator
Parameters
----------
estimator: BaseEstimator
Initialized estimator to use for fitting the data
X: ndarray of shape (n_samples, n_features)
Input data to fit
y: ndarray of shape (n_samples, )
Responses of input data to fit to
scoring: Callable[[np.ndarray, np.ndarray, ...], float]
Callable to use for evaluating the performance of the cross-validated model.
When called, the scoring function receives the true- and predicted values for each sample
and potentially additional arguments. The function returns the score for given input.
cv: int
Specify the number of folds. יעני לכמה אני מחלקת את זה
Returns
-------
train_score: float
Average train score over folds // loss during training
validation_score: float
Average validation score over folds
"""
X_sub_arrays = np.array_split(X, cv)
y_sub_arrays = np.array_split(y, cv)
train_score = []
validation_score = []
start, end = 0, 0
for i in range(cv):
cur_X, cur_y = X, y
# cur_X = np.squeeze(cur_X)
# cur_y = np.squeeze(cur_y)
start, end = end, end + X_sub_arrays[i].shape[0]
cur_X = np.delete(cur_X, range(start, end), axis=0)
cur_y = np.delete(cur_y, range(start, end), axis=0)
model = estimator.fit(cur_X, cur_y)
# validation_x = np.squeeze(X_sub_arrays[i])
# validation_y = np.squeeze(y_sub_arrays[i])
y_val_pred = model.predict(X_sub_arrays[i])
all_y_pred = model.predict(cur_X)
validation_score.append(scoring(y_sub_arrays[i], y_val_pred))
train_score.append(scoring(cur_y, all_y_pred))
return np.average(train_score), np.average(validation_score)
| 32.507463 | 112 | 0.661616 |
e31620649b9221e15295bd9ca2a20dc650227d43 | 18,683 | py | Python | tokumx/datadog_checks/tokumx/vendor/pymongo/topology_description.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | 663 | 2016-08-23T05:23:45.000Z | 2022-03-29T00:37:23.000Z | tokumx/datadog_checks/tokumx/vendor/pymongo/topology_description.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | 6,642 | 2016-06-09T16:29:20.000Z | 2022-03-31T22:24:09.000Z | tokumx/datadog_checks/tokumx/vendor/pymongo/topology_description.py | vbarbaresi/integrations-core | ab26ab1cd6c28a97c1ad1177093a93659658c7aa | [
"BSD-3-Clause"
] | 1,222 | 2017-01-27T15:51:38.000Z | 2022-03-31T18:17:51.000Z | # Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Represent a deployment of MongoDB servers."""
from collections import namedtuple
from datadog_checks.tokumx.vendor.pymongo import common
from datadog_checks.tokumx.vendor.pymongo.errors import ConfigurationError
from datadog_checks.tokumx.vendor.pymongo.read_preferences import ReadPreference
from datadog_checks.tokumx.vendor.pymongo.server_description import ServerDescription
from datadog_checks.tokumx.vendor.pymongo.server_selectors import Selection
from datadog_checks.tokumx.vendor.pymongo.server_type import SERVER_TYPE
TOPOLOGY_TYPE = namedtuple('TopologyType', ['Single', 'ReplicaSetNoPrimary',
'ReplicaSetWithPrimary', 'Sharded',
'Unknown'])(*range(5))
class TopologyDescription(object):
def __init__(self,
topology_type,
server_descriptions,
replica_set_name,
max_set_version,
max_election_id,
topology_settings):
"""Representation of a deployment of MongoDB servers.
:Parameters:
- `topology_type`: initial type
- `server_descriptions`: dict of (address, ServerDescription) for
all seeds
- `replica_set_name`: replica set name or None
- `max_set_version`: greatest setVersion seen from a primary, or None
- `max_election_id`: greatest electionId seen from a primary, or None
- `topology_settings`: a TopologySettings
"""
self._topology_type = topology_type
self._replica_set_name = replica_set_name
self._server_descriptions = server_descriptions
self._max_set_version = max_set_version
self._max_election_id = max_election_id
# The heartbeat_frequency is used in staleness estimates.
self._topology_settings = topology_settings
# Is PyMongo compatible with all servers' wire protocols?
self._incompatible_err = None
for s in self._server_descriptions.values():
# s.min/max_wire_version is the server's wire protocol.
# MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports.
server_too_new = (
# Server too new.
s.min_wire_version is not None
and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION)
server_too_old = (
# Server too old.
s.max_wire_version is not None
and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION)
if server_too_new or server_too_old:
self._incompatible_err = (
"Server at %s:%d "
"uses wire protocol versions %d through %d, "
"but PyMongo only supports %d through %d"
% (s.address[0], s.address[1],
s.min_wire_version, s.max_wire_version,
common.MIN_SUPPORTED_WIRE_VERSION,
common.MAX_SUPPORTED_WIRE_VERSION))
break
def check_compatible(self):
"""Raise ConfigurationError if any server is incompatible.
A server is incompatible if its wire protocol version range does not
overlap with PyMongo's.
"""
if self._incompatible_err:
raise ConfigurationError(self._incompatible_err)
def has_server(self, address):
return address in self._server_descriptions
def reset_server(self, address):
"""A copy of this description, with one server marked Unknown."""
return updated_topology_description(self, ServerDescription(address))
def reset(self):
"""A copy of this description, with all servers marked Unknown."""
if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
topology_type = self._topology_type
# The default ServerDescription's type is Unknown.
sds = dict((address, ServerDescription(address))
for address in self._server_descriptions)
return TopologyDescription(
topology_type,
sds,
self._replica_set_name,
self._max_set_version,
self._max_election_id,
self._topology_settings)
def server_descriptions(self):
"""Dict of (address,
:class:`~pymongo.server_description.ServerDescription`)."""
return self._server_descriptions.copy()
@property
def topology_type(self):
"""The type of this topology."""
return self._topology_type
@property
def topology_type_name(self):
"""The topology type as a human readable string.
.. versionadded:: 3.4
"""
return TOPOLOGY_TYPE._fields[self._topology_type]
@property
def replica_set_name(self):
"""The replica set name."""
return self._replica_set_name
@property
def max_set_version(self):
"""Greatest setVersion seen from a primary, or None."""
return self._max_set_version
@property
def max_election_id(self):
"""Greatest electionId seen from a primary, or None."""
return self._max_election_id
@property
def known_servers(self):
"""List of Servers of types besides Unknown."""
return [s for s in self._server_descriptions.values()
if s.is_server_type_known]
@property
def common_wire_version(self):
"""Minimum of all servers' max wire versions, or None."""
servers = self.known_servers
if servers:
return min(s.max_wire_version for s in self.known_servers)
return None
@property
def heartbeat_frequency(self):
return self._topology_settings.heartbeat_frequency
def apply_selector(self, selector, address):
def apply_local_threshold(selection):
if not selection:
return []
settings = self._topology_settings
# Round trip time in seconds.
fastest = min(
s.round_trip_time for s in selection.server_descriptions)
threshold = settings.local_threshold_ms / 1000.0
return [s for s in selection.server_descriptions
if (s.round_trip_time - fastest) <= threshold]
if getattr(selector, 'min_wire_version', 0):
common_wv = self.common_wire_version
if common_wv and common_wv < selector.min_wire_version:
raise ConfigurationError(
"%s requires min wire version %d, but topology's min"
" wire version is %d" % (selector,
selector.min_wire_version,
common_wv))
if self.topology_type == TOPOLOGY_TYPE.Single:
# Ignore the selector.
return self.known_servers
elif address:
description = self.server_descriptions().get(address)
return [description] if description else []
elif self.topology_type == TOPOLOGY_TYPE.Sharded:
# Ignore the read preference, but apply localThresholdMS.
return apply_local_threshold(
Selection.from_topology_description(self))
else:
return apply_local_threshold(
selector(Selection.from_topology_description(self)))
def has_readable_server(self, read_preference=ReadPreference.PRIMARY):
"""Does this topology have any readable servers available matching the
given read preference?
:Parameters:
- `read_preference`: an instance of a read preference from
:mod:`~pymongo.read_preferences`. Defaults to
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`.
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4
"""
common.validate_read_preference("read_preference", read_preference)
return any(self.apply_selector(read_preference, None))
def has_writable_server(self):
"""Does this topology have a writable server available?
.. note:: When connected directly to a single server this method
always returns ``True``.
.. versionadded:: 3.4
"""
return self.has_readable_server(ReadPreference.PRIMARY)
# If topology type is Unknown and we receive an ismaster response, what should
# the new topology type be?
_SERVER_TYPE_TO_TOPOLOGY_TYPE = {
SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded,
SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary,
SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary,
}
def updated_topology_description(topology_description, server_description):
"""Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
an ismaster call
Called after attempting (successfully or not) to call ismaster on the
server at server_description.address. Does not modify topology_description.
"""
address = server_description.address
# These values will be updated, if necessary, to form the new
# TopologyDescription.
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
max_set_version = topology_description.max_set_version
max_election_id = topology_description.max_election_id
server_type = server_description.server_type
# Don't mutate the original dict of server descriptions; copy it.
sds = topology_description.server_descriptions()
# Replace this server's description with the new one.
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
# Single type never changes.
return TopologyDescription(
TOPOLOGY_TYPE.Single,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type == SERVER_TYPE.Standalone:
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
# Server type is Unknown or RSGhost: did we just lose the primary?
topology_type = _check_has_primary(sds)
# Return updated copy.
return TopologyDescription(topology_type,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
def _update_rs_from_primary(
sds,
replica_set_name,
server_description,
max_set_version,
max_election_id):
"""Update topology description from a primary's ismaster response.
Pass in a dict of ServerDescriptions, current replica set name, the
ServerDescription we are processing, and the TopologyDescription's
max_set_version and max_election_id if any.
Returns (new topology type, new replica_set_name, new max_set_version,
new max_election_id).
"""
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
# We found a primary but it doesn't have the replica_set_name
# provided by the user.
sds.pop(server_description.address)
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
max_election_tuple = max_set_version, max_election_id
if None not in server_description.election_tuple:
if (None not in max_election_tuple and
max_election_tuple > server_description.election_tuple):
# Stale primary, set to type Unknown.
address = server_description.address
sds[address] = ServerDescription(address)
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
max_election_id = server_description.election_id
if (server_description.set_version is not None and
(max_set_version is None or
server_description.set_version > max_set_version)):
max_set_version = server_description.set_version
# We've heard from the primary. Is it the same primary as before?
for server in sds.values():
if (server.server_type is SERVER_TYPE.RSPrimary
and server.address != server_description.address):
# Reset old primary's type to Unknown.
sds[server.address] = ServerDescription(server.address)
# There can be only one prior primary.
break
# Discover new hosts from this primary's response.
for new_address in server_description.all_hosts:
if new_address not in sds:
sds[new_address] = ServerDescription(new_address)
# Remove hosts not in the response.
for addr in set(sds) - server_description.all_hosts:
sds.pop(addr)
# If the host list differs from the seed list, we may not have a primary
# after all.
return (_check_has_primary(sds),
replica_set_name,
max_set_version,
max_election_id)
def _update_rs_with_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS with known primary. Process a response from a non-primary.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns new topology type.
"""
assert replica_set_name is not None
if replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
elif (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
# Had this member been the primary?
return _check_has_primary(sds)
def _update_rs_no_primary_from_member(
sds,
replica_set_name,
server_description):
"""RS without known primary. Update from a non-primary's response.
Pass in a dict of ServerDescriptions, current replica set name, and the
ServerDescription we are processing.
Returns (new topology type, new replica_set_name).
"""
topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary
if replica_set_name is None:
replica_set_name = server_description.replica_set_name
elif replica_set_name != server_description.replica_set_name:
sds.pop(server_description.address)
return topology_type, replica_set_name
# This isn't the primary's response, so don't remove any servers
# it doesn't report. Only add new servers.
for address in server_description.all_hosts:
if address not in sds:
sds[address] = ServerDescription(address)
if (server_description.me and
server_description.address != server_description.me):
sds.pop(server_description.address)
return topology_type, replica_set_name
def _check_has_primary(sds):
"""Current topology type is ReplicaSetWithPrimary. Is primary still known?
Pass in a dict of ServerDescriptions.
Returns new topology type.
"""
for s in sds.values():
if s.server_type == SERVER_TYPE.RSPrimary:
return TOPOLOGY_TYPE.ReplicaSetWithPrimary
else:
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
| 37.440882 | 85 | 0.644543 |
6fbe7dba9fb7ddd362b5d65c5a33d20340c1f425 | 4,226 | py | Python | lib/ansible/plugins/action/ios.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | lib/ansible/plugins/action/ios.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | lib/ansible/plugins/action/ios.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.ios.ios import ios_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(ios_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'ios'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout']) if provider['timeout'] else None
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
if connection._play_context.timeout is None:
connection._play_context.timeout = connection.get_option('persistent_command_timeout')
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| 43.122449 | 125 | 0.67629 |
90f3fb2fe140e5adf4d3783596d3a4678a2ad0f0 | 1,000 | py | Python | desafio051.py | Darlingcris/Desafios-Python | 43dd1c9ee336482834007c515e7cf327a7c84d44 | [
"MIT"
] | null | null | null | desafio051.py | Darlingcris/Desafios-Python | 43dd1c9ee336482834007c515e7cf327a7c84d44 | [
"MIT"
] | null | null | null | desafio051.py | Darlingcris/Desafios-Python | 43dd1c9ee336482834007c515e7cf327a7c84d44 | [
"MIT"
] | null | null | null | #desenvolva um programa que leia o primeiro
#termo e a razao de uma PA. No final, Mostre
#os 10 primeiro termos dessa progressao.
#OBS: PA= (1,100,10) o primeiro n=1 a razao=10
primeiroTermo=int(input("Digite o primeiro termo da sua PA: "))
razao=int(input("Digite a razao da sua PA: "))
pa=int(input("Qual a posiçao voce quer encontrar? "))
posicao=primeiroTermo+(pa-1)*razao
print(posicao)
print("="*45)
print("{:^45}".format("10 termos de uma PA"))
print("="*45)
primeiroTermo=int(input("\nDigite o primeiro termo da sua PA: "))
razao=int(input("Digite a razao da sua PA: "))
for pa in range(1,11):
posicao=primeiroTermo+(pa-1)*razao
print(posicao, end=" → ")
print("Fim")
print("="*45)
print("{:^45}".format("10 termos de uma PA"))
print("="*45)
primeiroTermo=int(input("\nDigite o primeiro termo da sua PA: "))
razao=int(input("Digite a razao da sua PA: "))
decimo=primeiroTermo+(11-1)*razao
for pa in range(primeiroTermo,decimo,razao):
print(pa, end=" → ")
print("Fim") | 30.30303 | 65 | 0.683 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.