hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2c662f276d75d5cf194b16fa8615d6ac1fdca1d | 1,674 | py | Python | tests/compute/planar/test_rotateZ.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
] | null | null | null | tests/compute/planar/test_rotateZ.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
] | null | null | null | tests/compute/planar/test_rotateZ.py | ianna/vector | c00b258049c0ea1de46f90311849923b96068a02 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2019-2021, Jonas Eschle, Jim Pivarski, Eduardo Rodrigues, and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
import pytest
import vector.backends.numpy_
import vector.backends.object_
| 37.2 | 94 | 0.680406 |
d2c66e24087a653bf88316c9ed3e62b1ba5b4aa5 | 3,791 | py | Python | src/RIOT/tests/pkg_tensorflow-lite/mnist/mnist_mlp.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | 2 | 2020-04-30T08:17:45.000Z | 2020-05-23T08:46:54.000Z | src/RIOT/tests/pkg_tensorflow-lite/mnist/mnist_mlp.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | null | null | null | src/RIOT/tests/pkg_tensorflow-lite/mnist/mnist_mlp.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
# imports for array-handling
import numpy as np
import tensorflow as tf
# keras imports for the dataset and building our neural network
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
# let's keep our keras backend tensorflow quiet
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# load mnist dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# building the input vector from the 28x28 pixels
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# Split the train set in a train + validation set
X_valid = X_train[50000:]
y_valid = y_train[50000:]
X_train = X_train[:50000]
y_train = y_train[:50000]
# Normalize the data
X_train = X_train / 255.0
X_test = X_test / 255.0
X_valid = X_valid / 255.0
# building a very simple linear stack of layers using a sequential model
model = Sequential([
Dense(64, activation='relu', input_shape=(784,)),
Dropout(0.2),
Dense(10, activation='softmax')
])
# compiling the sequential model
model.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'],
optimizer='adam')
batch_size = 32
epochs = 30
# training the model and saving metrics in history
history = model.fit(X_train, y_train,
batch_size=batch_size, epochs=epochs,
verbose=2,
validation_data=(X_valid, y_valid))
# saving the model
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the basic model to disk
open("model_basic.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
(mnist_train, _), (_, _) = mnist.load_data()
mnist_train = mnist_train.reshape(60000, 784)
mnist_train = mnist_train.astype('float32')
mnist_train = mnist_train / 255.0
mnist_ds = tf.data.Dataset.from_tensor_slices((mnist_train)).batch(1)
converter.representative_dataset = representative_data_gen
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_model = converter.convert()
# # Save the quantized model to disk
open("model.tflite", "wb").write(tflite_model)
basic_model_size = os.path.getsize("model_basic.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("model.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
# Now let's verify the model on a few input digits
# Instantiate an interpreter for the model
model_quantized_reloaded = tf.lite.Interpreter('model.tflite')
# Allocate memory for each model
model_quantized_reloaded.allocate_tensors()
# Get the input and output tensors so we can feed in values and get the results
model_quantized_input = model_quantized_reloaded.get_input_details()[0]["index"]
model_quantized_output = model_quantized_reloaded.get_output_details()[0]["index"]
# Create arrays to store the results
model_quantized_predictions = np.empty(X_test.size)
for i in range(10):
# Invoke the interpreter
model_quantized_reloaded.set_tensor(model_quantized_input, X_test[i:i+1, :])
model_quantized_reloaded.invoke()
model_quantized_prediction = model_quantized_reloaded.get_tensor(model_quantized_output)
print("Digit: {} - Prediction:\n{}".format(y_test[i], model_quantized_prediction))
print("")
| 32.127119 | 92 | 0.759166 |
d2c77644e40785600cc8b3b66d9450e3d85ddf12 | 67 | py | Python | lang/Python/random-numbers-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/random-numbers-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/random-numbers-1.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | import random
values = [random.gauss(1, .5) for i in range(1000)]
| 16.75 | 51 | 0.686567 |
d2c876fb5f375461dc3ae3b4e7ececc7c2f8aa23 | 2,091 | py | Python | stock_api_handler.py | Sergix/analyst-server | a2ec7cc92610f78ac2a4ce4a46c52410219cd360 | [
"MIT"
] | 2 | 2020-03-16T01:09:10.000Z | 2020-03-16T03:02:57.000Z | stock_api_handler.py | Sergix/analyst-server | a2ec7cc92610f78ac2a4ce4a46c52410219cd360 | [
"MIT"
] | 1 | 2020-04-21T16:49:53.000Z | 2020-04-29T02:15:45.000Z | stock_api_handler.py | Sergix/analyst-server | a2ec7cc92610f78ac2a4ce4a46c52410219cd360 | [
"MIT"
] | 3 | 2020-03-16T14:46:41.000Z | 2020-03-21T13:55:24.000Z | # This python script handles stock api request from yfinance
# Last Updated: 4/7/2020
# Credits:nto
#Import yfinance api lib
import yfinance as yf
#Import pandas lib
import pandas as pd
#Import json to manipulate api data
import json
#Import math
import math
| 35.440678 | 109 | 0.648015 |
d2c9cfe9e4e2384aabafbe6f290a4052329e6bc7 | 1,493 | py | Python | hth/shows/tests/factories.py | roperi/myband | ec1955626fe6997484fd92ed02127b6899cd7062 | [
"MIT"
] | 1 | 2016-04-12T17:38:26.000Z | 2016-04-12T17:38:26.000Z | hth/shows/tests/factories.py | bhrutledge/jahhills.com | 74fe94a214f1ed5681bd45159315f0b68daf5a33 | [
"MIT"
] | 92 | 2015-04-03T10:04:55.000Z | 2021-07-17T11:13:52.000Z | hth/shows/tests/factories.py | roperi/myband | ec1955626fe6997484fd92ed02127b6899cd7062 | [
"MIT"
] | 1 | 2021-01-26T18:02:49.000Z | 2021-01-26T18:02:49.000Z | from datetime import date
from random import randrange
import factory
import factory.fuzzy
from hth.core.tests.utils import from_today
| 26.192982 | 77 | 0.704622 |
d2ca30ab580a71ee2a0484e370c2d881b8376a24 | 2,143 | py | Python | homeassistant/components/eight_sleep/binary_sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | homeassistant/components/eight_sleep/binary_sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/eight_sleep/binary_sensor.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Support for Eight Sleep binary sensors."""
from __future__ import annotations
import logging
from pyeight.eight import EightSleep
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import (
CONF_BINARY_SENSORS,
DATA_API,
DATA_EIGHT,
DATA_HEAT,
EightSleepBaseEntity,
EightSleepHeatDataCoordinator,
)
_LOGGER = logging.getLogger(__name__)
| 27.474359 | 86 | 0.691087 |
d2cb2cb149ab4d390a0fe9859ee6b67392f9a4c2 | 3,384 | py | Python | tensorbay/opendataset/FLIC/loader.py | rexzheng324-c/tensorbay-python-sdk | 764c28f34069229daa41474e2f104786dbfa973f | [
"MIT"
] | null | null | null | tensorbay/opendataset/FLIC/loader.py | rexzheng324-c/tensorbay-python-sdk | 764c28f34069229daa41474e2f104786dbfa973f | [
"MIT"
] | null | null | null | tensorbay/opendataset/FLIC/loader.py | rexzheng324-c/tensorbay-python-sdk | 764c28f34069229daa41474e2f104786dbfa973f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
# pylint: disable=missing-module-docstring
import os
from typing import Any, Dict, Iterator, Tuple
from tensorbay.dataset import Data, Dataset
from tensorbay.exception import ModuleImportError
from tensorbay.label import Classification, LabeledBox2D, LabeledKeypoints2D
DATASET_NAME = "FLIC"
_VALID_KEYPOINT_INDICES = [0, 1, 2, 3, 4, 5, 6, 9, 12, 13, 16]
def FLIC(path: str) -> Dataset:
"""`FLIC <https://bensapp.github.io/flic-dataset.html>`_ dataset.
The folder structure should be like::
<path>
exampls.mat
images/
2-fast-2-furious-00003571.jpg
...
Arguments:
path: The root directory of the dataset.
Raises:
ModuleImportError: When the module "scipy" can not be found.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
try:
from scipy.io import loadmat # pylint: disable=import-outside-toplevel
except ModuleNotFoundError as error:
raise ModuleImportError(module_name=error.name) from error
root_path = os.path.abspath(os.path.expanduser(path))
dataset = Dataset(DATASET_NAME)
annotations = loadmat(os.path.join(root_path, "examples.mat"))["examples"][0]
dataset.create_segment("train")
dataset.create_segment("test")
dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))
# try whether the dataset has bad segment
try:
_ = annotations["isbad"]
flag = True
dataset.create_segment("bad")
dataset.catalog.classification.add_attribute(name="isunchecked", type_="boolean")
except ValueError:
flag = False
for data, segment_name in _get_data(root_path, annotations, flag):
dataset[segment_name].append(data)
return dataset
| 31.924528 | 89 | 0.638889 |
d2cb4dbefc7f4606adaa9b77d466de95f1e38071 | 3,925 | py | Python | my_answers/homework/OOP/athlete.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
] | null | null | null | my_answers/homework/OOP/athlete.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
] | null | null | null | my_answers/homework/OOP/athlete.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
] | null | null | null |
def get_durations(distances, athletes):
for distance in distances:
for athlete in athletes:
print(f'{athlete.run(distance)} ran {distance} meters in {get_time(athlete.get_duration(distance))}')
if __name__ == "__main__":
runr = Runner("run", 90, 15, 30)
sprt1 = Sprinter("sprnt1", 90, 15, 30)
sprt2 = Sprinter("sprnt2", 80, 10, 25)
mrtn = MarathonRunner("mrtn", 50, 6, 7)
# print('getting running time..')
# print(f'{runr.run(100)} ran for {runr.get_duration(100)}')
distances = (100, 200, 800, 1600, 5000, 20000)
athletes = (runr, sprt1, sprt2, mrtn)
get_durations(distances, athletes)
| 39.25 | 125 | 0.642803 |
d2cbe0ce287e68ba03cda24086915b54c95f413e | 3,391 | py | Python | osisoft/pidevclub/piwebapi/models/pi_data_server_license.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
] | 30 | 2019-01-03T03:09:25.000Z | 2022-03-30T17:42:54.000Z | osisoft/pidevclub/piwebapi/models/pi_data_server_license.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
] | null | null | null | osisoft/pidevclub/piwebapi/models/pi_data_server_license.py | jugillar/PI-Web-API-Client-Python | 9652e18384d8c66194c6d561d5ef01f60d820253 | [
"Apache-2.0"
] | 46 | 2018-11-07T14:46:35.000Z | 2022-03-31T12:23:39.000Z | # coding: utf-8
"""
Copyright 2018 OSIsoft, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
| 23.548611 | 118 | 0.714833 |
d2ccb686d34873a1a30c9b50f3a2bad12ac217e0 | 4,054 | py | Python | bot.py | JavierOramas/scholar_standing_bot | 9afde1fc0d56a3c57cf281092ff5c3d123ddac2f | [
"MIT"
] | null | null | null | bot.py | JavierOramas/scholar_standing_bot | 9afde1fc0d56a3c57cf281092ff5c3d123ddac2f | [
"MIT"
] | null | null | null | bot.py | JavierOramas/scholar_standing_bot | 9afde1fc0d56a3c57cf281092ff5c3d123ddac2f | [
"MIT"
] | 2 | 2021-09-19T21:08:55.000Z | 2021-09-19T21:09:39.000Z | #! /root/anaconda3/bin/python
import os
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from pyrogram import Client, filters
from read_config import read_config
import json
import requests
import schedule
import time
config_data = read_config('./config/config_bot.json')
app = Client(config_data['bot_user_name'], config_data['api_id'], config_data['api_hash'])
# @app.on_message(filters.command('help'))
app.run() | 30.712121 | 228 | 0.619142 |
d2d16238955afe2195185ab27a0954cf27e01b00 | 7,622 | py | Python | skdecide/discrete_optimization/rcpsp_multiskill/parser/rcpsp_multiskill_parser.py | emilienDespres/scikit-decide | 2a3dd2d93e5e6d07984e1bc02b6e969261aeefbc | [
"MIT"
] | 27 | 2020-11-23T11:45:31.000Z | 2022-03-22T08:08:00.000Z | skdecide/discrete_optimization/rcpsp_multiskill/parser/rcpsp_multiskill_parser.py | emilienDespres/scikit-decide | 2a3dd2d93e5e6d07984e1bc02b6e969261aeefbc | [
"MIT"
] | 94 | 2021-02-24T09:50:23.000Z | 2022-02-27T10:07:15.000Z | skdecide/discrete_optimization/rcpsp_multiskill/parser/rcpsp_multiskill_parser.py | emilienDespres/scikit-decide | 2a3dd2d93e5e6d07984e1bc02b6e969261aeefbc | [
"MIT"
] | 12 | 2020-12-08T10:38:26.000Z | 2021-10-01T09:17:04.000Z | # Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict, Tuple
from skdecide.discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import (
Employee,
MS_RCPSPModel,
SkillDetail,
)
| 39.697917 | 126 | 0.494358 |
d2d1d69838e8dd6599bd00b4fca0bacfaf367308 | 530 | py | Python | pipe_anchorages/logging_monkeypatch.py | GlobalFishingWatch/anchorages_pipeline | 88764545b693bfb65fc7a7f62a344fb2afbc3d97 | [
"Apache-2.0"
] | 3 | 2017-12-22T10:19:15.000Z | 2020-04-20T10:28:43.000Z | pipe_tools/beam/logging_monkeypatch.py | GlobalFishingWatch/pipe-tools | 34dff591997bb2c25e018df86d13a9d42972032b | [
"Apache-2.0"
] | 37 | 2017-10-22T12:00:59.000Z | 2022-02-08T19:17:58.000Z | pipe_tools/beam/logging_monkeypatch.py | GlobalFishingWatch/pipe-tools | 34dff591997bb2c25e018df86d13a9d42972032b | [
"Apache-2.0"
] | 3 | 2018-01-21T14:07:58.000Z | 2021-07-28T16:02:20.000Z | import logging
# monkey patch to suppress the annoying warning you get when you import apache_beam
#
# No handlers could be found for logger "oauth2client.contrib.multistore_file"
#
# This warning is harmless, but annooying when you are using beam from a command line app
# see: https://issues.apache.org/jira/browse/BEAM-1183
# This just creates a null handler for that logger so there is no output
logger = logging.getLogger('oauth2client.contrib.multistore_file')
handler = logging.NullHandler()
logger.addHandler(handler)
| 33.125 | 89 | 0.792453 |
d2d2f4b2d01e6090619cd23b148cfe0e1bc36f87 | 330 | py | Python | core/managers.py | Bilal815/ecommerce_storee | 45e61f1d865a65b4c52d74502b4fcab7ee6c1adf | [
"MIT"
] | 95 | 2020-04-13T09:02:30.000Z | 2022-03-25T14:11:34.000Z | core/managers.py | Bilal815/ecommerce_api | a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8 | [
"MIT"
] | 87 | 2020-02-21T17:58:56.000Z | 2022-03-21T21:37:05.000Z | core/managers.py | Bilal815/ecommerce_api | a3d8ce7a9e1fa2528d240d5ab508afe92607c9f8 | [
"MIT"
] | 33 | 2021-01-18T09:30:29.000Z | 2022-03-30T01:31:57.000Z | from django.db import models
| 22 | 44 | 0.681818 |
d2d2fa8cda2955386068decf56b4b942626e5d83 | 22,286 | py | Python | mizani/breaks.py | stillmatic/mizani | 9a9dcb2b2ae8fca9a1c5b5e475be4d1f801bda1c | [
"BSD-3-Clause"
] | null | null | null | mizani/breaks.py | stillmatic/mizani | 9a9dcb2b2ae8fca9a1c5b5e475be4d1f801bda1c | [
"BSD-3-Clause"
] | null | null | null | mizani/breaks.py | stillmatic/mizani | 9a9dcb2b2ae8fca9a1c5b5e475be4d1f801bda1c | [
"BSD-3-Clause"
] | null | null | null | """
All scales have a means by which the values that are mapped
onto the scale are interpreted. Numeric digital scales put
out numbers for direct interpretation, but most scales
cannot do this. What they offer is named markers/ticks that
aid in assessing the values e.g. the common odometer will
have ticks and values to help gauge the speed of the vehicle.
The named markers are what we call breaks. Properly calculated
breaks make interpretation straight forward. These functions
provide ways to calculate good(hopefully) breaks.
"""
from __future__ import division
import numpy as np
import pandas as pd
from matplotlib.dates import MinuteLocator, HourLocator, DayLocator
from matplotlib.dates import WeekdayLocator, MonthLocator, YearLocator
from matplotlib.dates import AutoDateLocator
from matplotlib.dates import num2date, YEARLY
from matplotlib.ticker import MaxNLocator
from .utils import min_max, SECONDS, NANOSECONDS
from .utils import same_log10_order_of_magnitude
__all__ = ['mpl_breaks', 'log_breaks', 'minor_breaks',
'trans_minor_breaks', 'date_breaks',
'timedelta_breaks', 'extended_breaks']
# The break calculations rely on MPL locators to do
# the heavylifting. It may be more convinient to lift
# the calculations out of MPL.
# Matplotlib's YearLocator uses different named
# arguments than the others
LOCATORS = {
'minute': MinuteLocator,
'hour': HourLocator,
'day': DayLocator,
'week': WeekdayLocator,
'month': MonthLocator,
'year': lambda interval: YearLocator(base=interval)
}
# This could be cleaned up, state overload?
def value(self, td):
"""
Return the numeric value representation on a timedelta
"""
if self.package == 'pandas':
return td.value
else:
return td.total_seconds()
def scaled_limits(self):
"""
Minimum and Maximum to use for computing breaks
"""
_min = self.limits[0]/self.factor
_max = self.limits[1]/self.factor
return _min, _max
def timedelta_to_numeric(self, timedeltas):
"""
Convert sequence of timedelta to numerics
"""
return [self.to_numeric(td) for td in timedeltas]
def numeric_to_timedelta(self, numerics):
"""
Convert sequence of numerics to timedelta
"""
if self.package == 'pandas':
return [self.type(int(x*self.factor), units='ns')
for x in numerics]
else:
return [self.type(seconds=x*self.factor)
for x in numerics]
def get_scaling_factor(self, units):
if self.package == 'pandas':
return NANOSECONDS[units]
else:
return SECONDS[units]
def to_numeric(self, td):
"""
Convert timedelta to a number corresponding to the
appropriate units. The appropriate units are those
determined with the object is initialised.
"""
if self.package == 'pandas':
return td.value/NANOSECONDS[self.units]
else:
return td.total_seconds()/SECONDS[self.units]
class extended_breaks(object):
"""
An extension of Wilkinson's tick position algorithm
Parameters
----------
n : int
Desired number of ticks
Q : list
List of nice numbers
only_inside : bool
If ``True``, then all the ticks will be within the given
range.
w : list
Weights applied to the four optimization components
(simplicity, coverage, density, and legibility). They
should add up to 1.
Examples
--------
>>> limits = (0, 9)
>>> extended_breaks()(limits)
array([ 0. , 2.5, 5. , 7.5, 10. ])
>>> extended_breaks(n=6)(limits)
array([ 0., 2., 4., 6., 8., 10.])
References
----------
- Talbot, J., Lin, S., Hanrahan, P. (2010) An Extension of
Wilkinson's Algorithm for Positioning Tick Labels on Axes,
InfoVis 2010.
Additional Credit to Justin Talbot on whose code this
implementation is almost entirely based.
"""
def __call__(self, limits):
"""
Calculate the breaks
Parameters
----------
limits : array
Minimum and maximum values.
Returns
-------
out : array_like
Sequence of break points.
"""
Q = self.Q
w = self.w
only_inside = self.only_inside
simplicity_max = self.simplicity_max
density_max = self.density_max
coverage_max = self.coverage_max
simplicity = self.simplicity
coverage = self.coverage
density = self.density
legibility = self.legibility
log10 = np.log10
ceil = np.ceil
floor = np.floor
dmin, dmax = limits
if dmin > dmax:
dmin, dmax = dmax, dmin
elif dmin == dmax:
return np.array([dmin])
best_score = -2
j = 1
while j < float('inf'):
for q in Q:
sm = simplicity_max(q, j)
if w[0]*sm + w[1] + w[2] + w[3] < best_score:
j = float('inf')
break
k = 2
while k < float('inf'):
dm = density_max(k)
if w[0]*sm + w[1] + w[2]*dm + w[3] < best_score:
break
delta = (dmax-dmin)/(k+1)/j/q
z = ceil(log10(delta))
while z < float('inf'):
step = j*q*(10**z)
cm = coverage_max(dmin, dmax, step*(k-1))
if w[0]*sm + w[1]*cm + w[2]*dm + w[3] < best_score:
break
min_start = int(floor(dmax/step)*j - (k-1)*j)
max_start = int(ceil(dmin/step)*j)
if min_start > max_start:
z = z+1
break
for start in range(min_start, max_start+1):
lmin = start * (step/j)
lmax = lmin + step*(k-1)
lstep = step
s = simplicity(q, j, lmin, lmax, lstep)
c = coverage(dmin, dmax, lmin, lmax)
d = density(k, dmin, dmax, lmin, lmax)
l = legibility(lmin, lmax, lstep)
score = w[0]*s + w[1]*c + w[2]*d + w[3]*l
if (score > best_score and
(not only_inside or
(lmin >= dmin and lmax <= dmax))):
best_score = score
best = (lmin, lmax, lstep, q, k)
z = z+1
k = k+1
j = j+1
try:
locs = best[0] + np.arange(best[4])*best[2]
except UnboundLocalError:
locs = []
return locs
| 28.793282 | 75 | 0.531634 |
d2d32938d031d59331d2f4a11e7ede6bb4a40fe0 | 2,412 | py | Python | examples/04_sweep_wind_directions.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | null | null | null | examples/04_sweep_wind_directions.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | 1 | 2019-03-02T00:29:12.000Z | 2019-03-02T04:59:54.000Z | examples/04_sweep_wind_directions.py | ElieKadoche/floris | d18f4d263ecabf502242592f9d60815a07c7b89c | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import matplotlib.pyplot as plt
import numpy as np
from floris.tools import FlorisInterface
from floris.tools.visualization import visualize_cut_plane
"""
04_sweep_wind_directions
This example demonstrates vectorization of wind direction.
A vector of wind directions is passed to the intialize function
and the powers of the two simulated turbines is computed for all
wind directions in one call
The power of both turbines for each wind direction is then plotted
"""
# Instantiate FLORIS using either the GCH or CC model
fi = FlorisInterface("inputs/gch.yaml") # GCH model matched to the default "legacy_gauss" of V2
# fi = FlorisInterface("inputs/cc.yaml") # New CumulativeCurl model
# Define a two turbine farm
D = 126.
layout_x = np.array([0, D*6])
layout_y = [0, 0]
fi.reinitialize(layout = [layout_x, layout_y])
# Sweep wind speeds but keep wind direction fixed
wd_array = np.arange(250,291,1.)
fi.reinitialize(wind_directions=wd_array)
# Define a matrix of yaw angles to be all 0
# Note that yaw angles is now specified as a matrix whose dimesions are
# wd/ws/turbine
num_wd = len(wd_array) # Number of wind directions
num_ws = 1 # Number of wind speeds
num_turbine = len(layout_x) # Number of turbines
yaw_angles = np.zeros((num_wd, num_ws, num_turbine))
# Calculate
fi.calculate_wake(yaw_angles=yaw_angles)
# Collect the turbine powers
turbine_powers = fi.get_turbine_powers() / 1E3 # In kW
# Pull out the power values per turbine
pow_t0 = turbine_powers[:,:,0].flatten()
pow_t1 = turbine_powers[:,:,1].flatten()
# Plot
fig, ax = plt.subplots()
ax.plot(wd_array,pow_t0,color='k',label='Upstream Turbine')
ax.plot(wd_array,pow_t1,color='r',label='Downstream Turbine')
ax.grid(True)
ax.legend()
ax.set_xlabel('Wind Direction (deg)')
ax.set_ylabel('Power (kW)')
plt.show()
| 31.736842 | 95 | 0.76534 |
d2d3e9419d90d8f17a71b13f9d3381c03813b4d4 | 623 | py | Python | 1.main.py | learning-nn/nn_from_scratch | 8f8f46efd5814a3cca645b644f70ddc07210256f | [
"MIT"
] | null | null | null | 1.main.py | learning-nn/nn_from_scratch | 8f8f46efd5814a3cca645b644f70ddc07210256f | [
"MIT"
] | null | null | null | 1.main.py | learning-nn/nn_from_scratch | 8f8f46efd5814a3cca645b644f70ddc07210256f | [
"MIT"
] | null | null | null | import numpy
import numpy as np
# converting to a layer with 4 input and 3 neuron
inputs = [[1.2, 2.1, 3.4, 1.2],
[1.2, 2.1, 3.4, 1.2],
[1.2, 2.1, 3.4, 1.2]]
print(numpy.shape(inputs))
weights = [[4.1, -4.5, 3.1, 2.3],
[-4.1, 4.5, 2.1, 2.3],
[4.1, 4.5, 3.1, -2.3]]
print(numpy.shape(weights))
biases = [1, 2, 3]
weights2 = [[4.1, -4.5, 3.1],
[-4.1, 4.5, 2.1],
[4.1, 4.5, 3.1]]
biases2 = [1, 2, 3]
layer1_outputs = np.dot(inputs, np.array(weights).T) + biases
layer2_outputs = np.dot(layer1_outputs, np.array(weights2).T) + biases2
print(layer2_outputs)
| 23.961538 | 71 | 0.536116 |
d2d3eacc8c8caee95603f50b68c177c406992381 | 83 | py | Python | backend/grant/task/__init__.py | DSBUGAY2/zcash-grant-system | 729b9edda13bd1eeb3f445d889264230c6470d7e | [
"MIT"
] | 8 | 2019-06-03T16:29:49.000Z | 2021-05-11T20:38:36.000Z | backend/grant/task/__init__.py | DSBUGAY2/zcash-grant-system | 729b9edda13bd1eeb3f445d889264230c6470d7e | [
"MIT"
] | 342 | 2019-01-15T19:13:58.000Z | 2020-03-24T16:38:13.000Z | backend/grant/task/__init__.py | DSBUGAY2/zcash-grant-system | 729b9edda13bd1eeb3f445d889264230c6470d7e | [
"MIT"
] | 5 | 2019-02-15T09:06:47.000Z | 2022-01-24T21:38:41.000Z | from . import models
from . import views
from . import commands
from . import jobs | 16.6 | 22 | 0.759036 |
d2d415b3f1a1db25737dd9e6b40de2eb5823d384 | 325 | py | Python | DjangoTry/venv/Lib/site-packages/django_select2/__init__.py | PavelKoksharov/QR-BOOK | 8b05cecd7a3cffcec281f2e17da398ad9e4c5de5 | [
"MIT"
] | null | null | null | DjangoTry/venv/Lib/site-packages/django_select2/__init__.py | PavelKoksharov/QR-BOOK | 8b05cecd7a3cffcec281f2e17da398ad9e4c5de5 | [
"MIT"
] | null | null | null | DjangoTry/venv/Lib/site-packages/django_select2/__init__.py | PavelKoksharov/QR-BOOK | 8b05cecd7a3cffcec281f2e17da398ad9e4c5de5 | [
"MIT"
] | null | null | null | """
This is a Django_ integration of Select2_.
The application includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: https://select2.org/
"""
from django import get_version
if get_version() < '3.2':
default_app_config = "django_select2.apps.Select2AppConfig"
| 23.214286 | 71 | 0.750769 |
d2d4cdab7ece6cb0f6e54ac92797ae4e32cdf266 | 673 | py | Python | Sorting/bubble.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
] | 1 | 2016-08-21T13:01:42.000Z | 2016-08-21T13:01:42.000Z | Sorting/bubble.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
] | null | null | null | Sorting/bubble.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
] | null | null | null | # O(n ** 2)
print(bubble_sort([8, 1, 13, 34, 5, 2, 21, 3, 1], False))
print(bubble_sort([1, 2, 3, 4, 5, 6]))
| 32.047619 | 67 | 0.473997 |
d2d55fb8abaabd881c853630310ad6dc464e75ff | 8,247 | py | Python | chapter_13/pymail.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | chapter_13/pymail.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | chapter_13/pymail.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
] | null | null | null | "A Console-Based Email Client"
#!/usr/local/bin/python
"""
##########################################################################
pymail - a simple console email interface client in Python; uses Python
poplib module to view POP email messages, smtplib to send new mails, and
the email package to extract mail headers and payload and compose mails;
##########################################################################
"""
import poplib, smtplib, email.utils, mailconfig
from email.parser import Parser
from email.message import Message
fetchEncoding = mailconfig.fetchEncoding
def decodeToUnicode(messageBytes, fetchEncoding=fetchEncoding):
"""
4E, Py3.1: decode fetched bytes to str Unicode string for display or parsing;
use global setting (or by platform default, hdrs inspection, intelligent guess);
in Python 3.2/3.3, this step may not be required: if so, return message intact;
"""
return [line.decode(fetchEncoding) for line in messageBytes]
def splitaddrs(field):
"""
4E: split address list on commas, allowing for commas in name parts
"""
pairs = email.utils.getaddresses([field]) # [(name,addr)]
return [email.utils.formataddr(pair) for pair in pairs] # [name <addr>]
helptext = """
Available commands:
i - index display
l n? - list all messages (or just message n)
d n? - mark all messages for deletion (or just message n)
s n? - save all messages to a file (or just message n)
m - compose and send a new mail message
q - quit pymail
? - display this help text
"""
if __name__ == '__main__':
import getpass, mailconfig
mailserver = mailconfig.popservername # ex: 'pop.rmi.net'
mailuser = mailconfig.popusername # ex: 'lutz'
mailfile = mailconfig.savemailfile # ex: r'c:\stuff\savemail'
mailpswd = getpass.getpass('Password for %s?' % mailserver)
print('[Pymail email client]')
msgList = loadmessages(mailserver, mailuser, mailpswd) # load all
toDelete = interact(msgList, mailfile)
if toDelete: deletemessages(mailserver, mailuser, mailpswd, toDelete)
print('Bye.')
| 37.830275 | 84 | 0.547229 |
d2d6774deb12048e5d8199a5f876c5130870f008 | 1,027 | py | Python | dependencyinjection/internal/param_type_resolver.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
] | null | null | null | dependencyinjection/internal/param_type_resolver.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
] | null | null | null | dependencyinjection/internal/param_type_resolver.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
import typing
import inspect
from .errors import ParameterTypeResolveError
| 31.121212 | 92 | 0.641675 |
d2d69439ae028b8caac841d651293bd86aa4f321 | 639 | py | Python | rest-api/server.py | phenomax/resnet50-miml-rest | 4f78dd2c9454c54d013085eb4d50080d38a833ac | [
"Unlicense"
] | 1 | 2020-08-29T16:51:47.000Z | 2020-08-29T16:51:47.000Z | rest-api/server.py | phenomax/resnet50-miml-rest | 4f78dd2c9454c54d013085eb4d50080d38a833ac | [
"Unlicense"
] | null | null | null | rest-api/server.py | phenomax/resnet50-miml-rest | 4f78dd2c9454c54d013085eb4d50080d38a833ac | [
"Unlicense"
] | null | null | null | import io
import os
from flask import Flask, request, jsonify
from PIL import Image
from resnet_model import MyResnetModel
app = Flask(__name__)
# max filesize 2mb
app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024
# setup resnet model
model = MyResnetModel(os.path.dirname(os.path.abspath(__file__)))
| 22.034483 | 65 | 0.694836 |
d2d95eb0f80255c257603ed734e875c5ce26b88b | 2,945 | py | Python | authors/apps/profiles/tests/test_follow.py | KabohaJeanMark/ah-backend-invictus | a9cf930934e8cbcb4ee370a088df57abe50ee6d6 | [
"BSD-3-Clause"
] | 7 | 2021-03-04T09:29:13.000Z | 2021-03-17T17:35:42.000Z | authors/apps/profiles/tests/test_follow.py | KabohaJeanMark/ah-backend-invictus | a9cf930934e8cbcb4ee370a088df57abe50ee6d6 | [
"BSD-3-Clause"
] | 25 | 2019-04-23T18:51:02.000Z | 2021-06-10T21:22:47.000Z | authors/apps/profiles/tests/test_follow.py | KabohaJeanMark/ah-backend-invictus | a9cf930934e8cbcb4ee370a088df57abe50ee6d6 | [
"BSD-3-Clause"
] | 7 | 2019-06-29T10:40:38.000Z | 2019-09-23T09:05:45.000Z | from django.urls import reverse
from rest_framework import status
from .base import BaseTestCase
| 50.775862 | 86 | 0.711036 |
d2dbd1807d449ae04403cf686fe2378b35d5fa68 | 6,585 | py | Python | OpenPNM/Phases/__GenericPhase__.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
] | null | null | null | OpenPNM/Phases/__GenericPhase__.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
] | null | null | null | OpenPNM/Phases/__GenericPhase__.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
] | 1 | 2020-07-02T02:21:10.000Z | 2020-07-02T02:21:10.000Z | # -*- coding: utf-8 -*-
"""
===============================================================================
module __GenericPhase__: Base class for building Phase objects
===============================================================================
"""
from OpenPNM.Network import GenericNetwork
import OpenPNM.Phases.models
from OpenPNM.Base import Core, Tools, logging
import scipy as sp
logger = logging.getLogger(__name__)
| 39.909091 | 82 | 0.577525 |
d2dbe93b08cbd7c9fba4a7da5b0696432c491446 | 2,860 | py | Python | rqt_mypkg/src/rqt_mypkg/statistics.py | mounteverset/moveit_path_visualizer | 15e55c631cb4c4d052763ebd695ce5fcb6de5a4c | [
"BSD-3-Clause"
] | null | null | null | rqt_mypkg/src/rqt_mypkg/statistics.py | mounteverset/moveit_path_visualizer | 15e55c631cb4c4d052763ebd695ce5fcb6de5a4c | [
"BSD-3-Clause"
] | null | null | null | rqt_mypkg/src/rqt_mypkg/statistics.py | mounteverset/moveit_path_visualizer | 15e55c631cb4c4d052763ebd695ce5fcb6de5a4c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import copy
from moveit_commander import move_group
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi, sqrt, pow
from std_msgs.msg import String
import io
import shutil
import json
#used to convert the points from the gui in a valid message for ros
from geometry_msgs.msg import Pose, PoseStamped
#used to read out the start points
import os
from nav_msgs.msg import Path
#used for publishing the planned path from start to goal
from visualization_msgs.msg import Marker, MarkerArray
#used to make a service request
from moveit_msgs.srv import GetPositionIKRequest, GetPositionIK
from rqt_mypkg import path_planning_interface
from trajectory_msgs.msg import JointTrajectoryPoint
## StatsitcisDefinedPath is used to get the path length of given points/positions generated by the Motion Plan | 39.178082 | 110 | 0.681469 |
d2dbfa2d8a9c4169b00a898c87b761496a338473 | 596 | py | Python | apps/sendmail/admin.py | CasualGaming/studlan | 63daed67c1d309e4d5bd755eb68163e2174d0e00 | [
"MIT"
] | 9 | 2016-03-15T21:03:49.000Z | 2020-12-02T19:45:44.000Z | apps/sendmail/admin.py | piyushd26/studlan | 6eb96ebda182f44759b430cd497a727e0ee5bb63 | [
"MIT"
] | 161 | 2016-02-05T14:11:50.000Z | 2020-10-14T10:13:21.000Z | apps/sendmail/admin.py | piyushd26/studlan | 6eb96ebda182f44759b430cd497a727e0ee5bb63 | [
"MIT"
] | 11 | 2016-07-27T12:20:05.000Z | 2021-04-18T05:49:17.000Z | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Mail
admin.site.register(Mail, MailAdmin)
| 22.074074 | 115 | 0.676174 |
d2dc2ba48e9f74dafb44ffcc8ba8cd1cd50c6109 | 2,922 | py | Python | event/test_event.py | Web-Team-IITI-Gymkhana/gymkhana_server | 67f4eba9dc0a55de04b3006ffeb5f608086b89ce | [
"MIT"
] | null | null | null | event/test_event.py | Web-Team-IITI-Gymkhana/gymkhana_server | 67f4eba9dc0a55de04b3006ffeb5f608086b89ce | [
"MIT"
] | 4 | 2022-01-14T12:31:33.000Z | 2022-01-28T10:25:44.000Z | event/test_event.py | Web-Team-IITI-Gymkhana/gymkhana_server | 67f4eba9dc0a55de04b3006ffeb5f608086b89ce | [
"MIT"
] | null | null | null | from uuid import uuid4
from fastapi.testclient import TestClient
from ..main import app
client = TestClient(app)
| 36.525 | 97 | 0.612936 |
d2dc870265729c9617c1afe744f12af18a12c128 | 24,837 | py | Python | src/tests/ftest/soak/soak.py | cdurf1/daos | f57f682ba07560fd35c0991798c5496c20f10769 | [
"Apache-2.0"
] | null | null | null | src/tests/ftest/soak/soak.py | cdurf1/daos | f57f682ba07560fd35c0991798c5496c20f10769 | [
"Apache-2.0"
] | null | null | null | src/tests/ftest/soak/soak.py | cdurf1/daos | f57f682ba07560fd35c0991798c5496c20f10769 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
(C) Copyright 2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. 8F-30005.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from __future__ import print_function
import os
import time
from apricot import TestWithServers
from ior_utils import IorCommand
import slurm_utils
from test_utils_pool import TestPool
from test_utils_container import TestContainer
from ClusterShell.NodeSet import NodeSet
from general_utils import pcmd
import socket
| 39.930868 | 79 | 0.566856 |
d2dcba40eaf1e9db722986c2a78f80438fb6fdb3 | 1,066 | py | Python | aoc/year_2020/day_06/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
] | 2 | 2020-12-06T10:59:52.000Z | 2021-09-29T22:14:03.000Z | aoc/year_2020/day_06/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
] | null | null | null | aoc/year_2020/day_06/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
] | 2 | 2021-09-29T22:14:18.000Z | 2022-01-18T02:20:26.000Z | """This is the Solution for Year 2020 Day 06"""
import re
from aoc.abstracts.solver import Answers, StrLines
| 32.30303 | 81 | 0.641651 |
d2defb686bfc61f23201cb71e5a9d368779c4dfa | 98 | py | Python | setup.py | kuzxnia/typer | 39007237d552e4f4920b2c6e13e5f0ce482d4427 | [
"MIT"
] | null | null | null | setup.py | kuzxnia/typer | 39007237d552e4f4920b2c6e13e5f0ce482d4427 | [
"MIT"
] | 3 | 2020-04-07T12:39:51.000Z | 2020-04-09T22:49:16.000Z | setup.py | kuzxnia/typer | 39007237d552e4f4920b2c6e13e5f0ce482d4427 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="typer", packages=find_packages(),
)
| 16.333333 | 43 | 0.744898 |
d2dfa41f3a05071765ff4e4b5a6aecdae50d42b0 | 7,105 | py | Python | speedup.py | hjdeheer/malpaca | a0e5471a06175ef34aa95b3a1caea407e4e624a8 | [
"MIT"
] | null | null | null | speedup.py | hjdeheer/malpaca | a0e5471a06175ef34aa95b3a1caea407e4e624a8 | [
"MIT"
] | null | null | null | speedup.py | hjdeheer/malpaca | a0e5471a06175ef34aa95b3a1caea407e4e624a8 | [
"MIT"
] | null | null | null | import numpy as np
from numba import jit, prange
from scipy.stats import mode
from sklearn.metrics import accuracy_score
__all__ = ['dtw_distance', 'KnnDTW']
# Modified from https://github.com/markdregan/K-Nearest-Neighbors-with-Dynamic-Time-Warping
class KnnDTW(object):
"""K-nearest neighbor classifier using dynamic time warping
as the distance measure between pairs of time series arrays
Arguments
---------
n_neighbors : int, optional (default = 1)
Number of neighbors to use by default for KNN
"""
def fit(self, x, y):
"""Fit the model using x as training data and y as class labels
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Training data set for input into KNN classifer
y : array of shape [n_samples]
Training labels for input into KNN classifier
"""
self.x = np.copy(x)
self.y = np.copy(y)
def _dist_matrix(self, x, y):
"""Computes the M x N distance matrix between the training
dataset and testing dataset (y) using the DTW distance measure
Arguments
---------
x : array of shape [n_samples, n_timepoints]
y : array of shape [n_samples, n_timepoints]
Returns
-------
Distance matrix between each item of x and y with
shape [training_n_samples, testing_n_samples]
"""
dm = dtw_distance(x, y)
return dm
def predict(self, x):
"""Predict the class labels or probability estimates for
the provided data
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
Returns
-------
2 arrays representing:
(1) the predicted class labels
(2) the knn label count probability
"""
np.random.seed(0)
dm = self._dist_matrix(x, self.x)
# Identify the k nearest neighbors
knn_idx = dm.argsort()[:, :self.n_neighbors]
# Identify k nearest labels
knn_labels = self.y[knn_idx]
# Model Label
mode_data = mode(knn_labels, axis=1)
mode_label = mode_data[0]
mode_proba = mode_data[1] / self.n_neighbors
return mode_label.ravel(), mode_proba.ravel()
def evaluate(self, x, y):
"""
Predict the class labels or probability estimates for
the provided data and then evaluates the accuracy score.
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
y : array of shape [n_samples]
Array containing the labels of the testing dataset to be classified
Returns
-------
1 floating point value representing the accuracy of the classifier
"""
# Predict the labels and the probabilities
pred_labels, pred_probas = self.predict(x)
# Ensure labels are integers
y = y.astype('int32')
pred_labels = pred_labels.astype('int32')
# Compute accuracy measure
accuracy = accuracy_score(y, pred_labels)
return accuracy
def predict_proba(self, x):
"""Predict the class labels probability estimates for
the provided data
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
Returns
-------
2 arrays representing:
(1) the predicted class probabilities
(2) the knn labels
"""
np.random.seed(0)
dm = self._dist_matrix(x, self.x)
# Invert the distance matrix
dm = -dm
classes = np.unique(self.y)
class_dm = []
# Partition distance matrix by class
for i, cls in enumerate(classes):
idx = np.argwhere(self.y == cls)[:, 0]
cls_dm = dm[:, idx] # [N_test, N_train_c]
# Take maximum distance vector due to softmax probabilities
cls_dm = np.max(cls_dm, axis=-1) # [N_test,]
class_dm.append([cls_dm])
# Concatenate the classwise distance matrices and transpose
class_dm = np.concatenate(class_dm, axis=0) # [C, N_test]
class_dm = class_dm.transpose() # [N_test, C]
# Compute softmax probabilities
class_dm_exp = np.exp(class_dm - class_dm.max())
class_dm = class_dm_exp / np.sum(class_dm_exp, axis=-1, keepdims=True)
probabilities = class_dm
knn_labels = np.argmax(class_dm, axis=-1)
return probabilities, knn_labels | 28.194444 | 91 | 0.570443 |
d2dfc266c6056fe94eecb550bf60b54a02eaa933 | 470 | py | Python | setup.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
] | 1 | 2021-02-04T15:57:03.000Z | 2021-02-04T15:57:03.000Z | setup.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
] | null | null | null | setup.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
] | 2 | 2021-02-04T04:41:08.000Z | 2022-03-01T16:18:14.000Z | from setuptools import setup
setup(
name='uam_simulator',
version='1.0',
description='A tool to simulate different architectures for UAM traffic management',
author='Coline Ramee',
author_email='coline.ramee@gatech.edu',
packages=['uam_simulator'],
install_requires=['numpy', 'scikit-learn', 'gurobipy']
)
# If installing from source the package name is gurobipy, if installing with conda it's gurobi, but when importing it's still gurobipy
| 36.153846 | 134 | 0.734043 |
d2e2156c949fb7522a291e88e911e831ba30e23c | 1,115 | py | Python | DFS/Leetcode1239.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null | DFS/Leetcode1239.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null | DFS/Leetcode1239.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
] | null | null | null |
if __name__ == '__main__':
sol=Solution()
arr = ["un", "iq", "ue"]
# arr = ["cha", "r", "act", "ers"]
# arr = ["abcdefghijklmnopqrstuvwxyz"]
# arr=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"]
print(sol.maxLength(arr))
| 26.547619 | 90 | 0.419731 |
d2e2e8b5aeb34c6ee7b5e4eefd603f0d67226b67 | 419 | py | Python | apps/addresses/migrations/0002_address_picture.py | skyride/python-docker-compose | b3ac1a4da4ae2133b94504447a6cb353cc96f45b | [
"MIT"
] | null | null | null | apps/addresses/migrations/0002_address_picture.py | skyride/python-docker-compose | b3ac1a4da4ae2133b94504447a6cb353cc96f45b | [
"MIT"
] | null | null | null | apps/addresses/migrations/0002_address_picture.py | skyride/python-docker-compose | b3ac1a4da4ae2133b94504447a6cb353cc96f45b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2020-05-25 22:13
from django.db import migrations, models
| 22.052632 | 92 | 0.606205 |
d2e3431a8ca64567f24a9939424b3256a13d8da9 | 34,809 | py | Python | liberapay/payin/common.py | Panquesito7/liberapay.com | d6806390a533061c2b9fb147d7139d06024f9a1b | [
"PostgreSQL",
"CC0-1.0"
] | 1 | 2021-07-26T02:07:13.000Z | 2021-07-26T02:07:13.000Z | liberapay/payin/common.py | Panquesito7/liberapay.com | d6806390a533061c2b9fb147d7139d06024f9a1b | [
"PostgreSQL",
"CC0-1.0"
] | null | null | null | liberapay/payin/common.py | Panquesito7/liberapay.com | d6806390a533061c2b9fb147d7139d06024f9a1b | [
"PostgreSQL",
"CC0-1.0"
] | null | null | null | from collections import namedtuple
from datetime import timedelta
import itertools
from operator import attrgetter
from pando.utils import utcnow
from psycopg2.extras import execute_batch
from ..constants import SEPA
from ..exceptions import (
AccountSuspended, MissingPaymentAccount, RecipientAccountSuspended,
NoSelfTipping, UserDoesntAcceptTips,
)
from ..i18n.currencies import Money, MoneyBasket
from ..utils import group_by
ProtoTransfer = namedtuple(
'ProtoTransfer',
'amount recipient destination context unit_amount period team visibility',
)
def prepare_payin(db, payer, amount, route, proto_transfers, off_session=False):
"""Prepare to charge a user.
Args:
payer (Participant): the user who will be charged
amount (Money): the presentment amount of the charge
route (ExchangeRoute): the payment instrument to charge
proto_transfers ([ProtoTransfer]): the transfers to prepare
off_session (bool):
`True` means that the payment is being initiated because it was scheduled,
`False` means that the payer has initiated the operation just now
Returns:
Record: the row created in the `payins` table
Raises:
AccountSuspended: if the payer's account is suspended
"""
assert isinstance(amount, Money), type(amount)
assert route.participant == payer, (route.participant, payer)
assert route.status in ('pending', 'chargeable')
if payer.is_suspended or not payer.get_email_address():
raise AccountSuspended()
with db.get_cursor() as cursor:
payin = cursor.one("""
INSERT INTO payins
(payer, amount, route, status, off_session)
VALUES (%s, %s, %s, 'pre', %s)
RETURNING *
""", (payer.id, amount, route.id, off_session))
cursor.run("""
INSERT INTO payin_events
(payin, status, error, timestamp)
VALUES (%s, %s, NULL, current_timestamp)
""", (payin.id, payin.status))
payin_transfers = []
for t in proto_transfers:
payin_transfers.append(prepare_payin_transfer(
cursor, payin, t.recipient, t.destination, t.context, t.amount,
t.visibility, t.unit_amount, t.period, t.team,
))
return payin, payin_transfers
def update_payin(
db, payin_id, remote_id, status, error,
amount_settled=None, fee=None, intent_id=None, refunded_amount=None,
):
"""Update the status and other attributes of a charge.
Args:
payin_id (int): the ID of the charge in our database
remote_id (str): the ID of the charge in the payment processor's database
status (str): the new status of the charge
error (str): if the charge failed, an error message to show to the payer
Returns:
Record: the row updated in the `payins` table
"""
with db.get_cursor() as cursor:
payin = cursor.one("""
UPDATE payins
SET status = %(status)s
, error = %(error)s
, remote_id = coalesce(remote_id, %(remote_id)s)
, amount_settled = coalesce(amount_settled, %(amount_settled)s)
, fee = coalesce(fee, %(fee)s)
, intent_id = coalesce(intent_id, %(intent_id)s)
, refunded_amount = coalesce(%(refunded_amount)s, refunded_amount)
WHERE id = %(payin_id)s
RETURNING *
, (SELECT status FROM payins WHERE id = %(payin_id)s) AS old_status
""", locals())
if not payin:
return
if remote_id and payin.remote_id != remote_id:
raise AssertionError(f"the remote IDs don't match: {payin.remote_id!r} != {remote_id!r}")
if status != payin.old_status:
cursor.run("""
INSERT INTO payin_events
(payin, status, error, timestamp)
VALUES (%s, %s, %s, current_timestamp)
""", (payin_id, status, error))
if status in ('pending', 'succeeded'):
cursor.run("""
UPDATE exchange_routes
SET status = 'consumed'
WHERE id = %s
AND one_off IS TRUE
""", (payin.route,))
# Lock to avoid concurrent updates
cursor.run("SELECT * FROM participants WHERE id = %s FOR UPDATE",
(payin.payer,))
# Update scheduled payins, if appropriate
if status in ('pending', 'succeeded'):
sp = cursor.one("""
SELECT *
FROM scheduled_payins
WHERE payer = %s
AND payin = %s
""", (payin.payer, payin.id))
if not sp:
# Try to find a scheduled renewal that matches this payin.
# It doesn't have to be an exact match.
schedule = cursor.all("""
SELECT *
FROM scheduled_payins
WHERE payer = %s
AND payin IS NULL
AND mtime < %s
""", (payin.payer, payin.ctime))
today = utcnow().date()
schedule.sort(key=lambda sp: abs((sp.execution_date - today).days))
payin_tippees = set(cursor.all("""
SELECT coalesce(team, recipient) AS tippee
FROM payin_transfers
WHERE payer = %s
AND payin = %s
""", (payin.payer, payin.id)))
for sp in schedule:
if any((tr['tippee_id'] in payin_tippees) for tr in sp.transfers):
cursor.run("""
UPDATE scheduled_payins
SET payin = %s
, mtime = current_timestamp
WHERE id = %s
""", (payin.id, sp.id))
break
return payin
def adjust_payin_transfers(db, payin, net_amount):
"""Correct a payin's transfers once the net amount is known.
Args:
payin (Record): a row from the `payins` table
net_amount (Money): the amount of money available to transfer
"""
payer = db.Participant.from_id(payin.payer)
route = db.ExchangeRoute.from_id(payer, payin.route)
provider = route.network.split('-', 1)[0]
payer_country = route.country
# We have to update the transfer amounts in a single transaction to
# avoid ending up in an inconsistent state.
with db.get_cursor() as cursor:
payin_transfers = cursor.all("""
SELECT pt.id, pt.amount, pt.status, pt.remote_id, pt.team, pt.recipient, team_p
FROM payin_transfers pt
LEFT JOIN participants team_p ON team_p.id = pt.team
WHERE pt.payin = %s
ORDER BY pt.id
FOR UPDATE OF pt
""", (payin.id,))
assert payin_transfers
if any(pt.status == 'succeeded' for pt in payin_transfers):
# At least one of the transfers has already been executed, so it's
# too complicated to adjust the amounts now.
return
transfers_by_tippee = group_by(
payin_transfers, lambda pt: (pt.team or pt.recipient)
)
prorated_amounts = resolve_amounts(net_amount, {
tippee: MoneyBasket(pt.amount for pt in grouped).fuzzy_sum(net_amount.currency)
for tippee, grouped in transfers_by_tippee.items()
})
teams = set(pt.team for pt in payin_transfers if pt.team is not None)
updates = []
for tippee, prorated_amount in prorated_amounts.items():
transfers = transfers_by_tippee[tippee]
if tippee in teams:
team = transfers[0].team_p
tip = payer.get_tip_to(team)
try:
team_donations = resolve_team_donation(
db, team, provider, payer, payer_country,
prorated_amount, tip, sepa_only=True,
)
except (MissingPaymentAccount, NoSelfTipping):
team_amounts = resolve_amounts(prorated_amount, {
pt.id: pt.amount.convert(prorated_amount.currency)
for pt in transfers
})
for pt in transfers:
if pt.amount != team_amounts.get(pt.id):
assert pt.remote_id is None and pt.status in ('pre', 'pending')
updates.append((team_amounts[pt.id], pt.id))
else:
team_donations = {d.recipient.id: d for d in team_donations}
for pt in transfers:
if pt.status == 'failed':
continue
d = team_donations.pop(pt.recipient, None)
if d is None:
assert pt.remote_id is None and pt.status in ('pre', 'pending')
cursor.run("""
DELETE FROM payin_transfer_events
WHERE payin_transfer = %(pt_id)s
AND status = 'pending';
DELETE FROM payin_transfers WHERE id = %(pt_id)s;
""", dict(pt_id=pt.id))
elif pt.amount != d.amount:
assert pt.remote_id is None and pt.status in ('pre', 'pending')
updates.append((d.amount, pt.id))
n_periods = prorated_amount / tip.periodic_amount.convert(prorated_amount.currency)
for d in team_donations.values():
unit_amount = (d.amount / n_periods).round(allow_zero=False)
prepare_payin_transfer(
db, payin, d.recipient, d.destination, 'team-donation',
d.amount, tip.visibility, unit_amount, tip.period,
team=team.id,
)
else:
pt = transfers[0]
if pt.amount != prorated_amount:
assert pt.remote_id is None and pt.status in ('pre', 'pending')
updates.append((prorated_amount, pt.id))
if updates:
execute_batch(cursor, """
UPDATE payin_transfers
SET amount = %s
WHERE id = %s
AND status <> 'succeeded';
""", updates)
def resolve_tip(
db, tip, tippee, provider, payer, payer_country, payment_amount,
sepa_only=False, excluded_destinations=set(),
):
"""Prepare to fund a tip.
Args:
tip (Row): a row from the `tips` table
tippee (Participant): the intended beneficiary of the donation
provider (str): the payment processor ('paypal' or 'stripe')
payer (Participant): the donor
payer_country (str): the country the money is supposedly coming from
payment_amount (Money): the amount of money being sent
sepa_only (bool): only consider destination accounts within SEPA
excluded_destinations (set): any `payment_accounts.pk` values to exclude
Returns:
a list of `ProtoTransfer` objects
Raises:
MissingPaymentAccount: if no suitable destination has been found
NoSelfTipping: if the donor would end up sending money to themself
RecipientAccountSuspended: if the tippee's account is suspended
UserDoesntAcceptTips: if the tippee doesn't accept donations
"""
assert tip.tipper == payer.id
assert tip.tippee == tippee.id
if not tippee.accepts_tips:
raise UserDoesntAcceptTips(tippee.username)
if tippee.is_suspended:
raise RecipientAccountSuspended(tippee)
if tippee.kind == 'group':
return resolve_team_donation(
db, tippee, provider, payer, payer_country, payment_amount, tip,
sepa_only=sepa_only, excluded_destinations=excluded_destinations,
)
else:
destination = resolve_destination(
db, tippee, provider, payer, payer_country, payment_amount,
sepa_only=sepa_only, excluded_destinations=excluded_destinations,
)
return [ProtoTransfer(
payment_amount, tippee, destination, 'personal-donation',
tip.periodic_amount, tip.period, None, tip.visibility,
)]
def resolve_destination(
db, tippee, provider, payer, payer_country, payin_amount,
sepa_only=False, excluded_destinations=(),
):
"""Figure out where to send a payment.
Args:
tippee (Participant): the intended beneficiary of the payment
provider (str): the payment processor ('paypal' or 'stripe')
payer (Participant): the user who wants to pay
payer_country (str): the country the money is supposedly coming from
payin_amount (Money): the payment amount
sepa_only (bool): only consider destination accounts within SEPA
excluded_destinations (set): any `payment_accounts.pk` values to exclude
Returns:
Record: a row from the `payment_accounts` table
Raises:
MissingPaymentAccount: if no suitable destination has been found
NoSelfTipping: if the payer would end up sending money to themself
"""
tippee_id = tippee.id
if tippee_id == payer.id:
raise NoSelfTipping()
currency = payin_amount.currency
excluded_destinations = list(excluded_destinations)
destination = db.one("""
SELECT *
FROM payment_accounts
WHERE participant = %(tippee_id)s
AND provider = %(provider)s
AND is_current
AND verified
AND coalesce(charges_enabled, true)
AND array_position(%(excluded_destinations)s::bigint[], pk) IS NULL
AND ( country IN %(SEPA)s OR NOT %(sepa_only)s )
ORDER BY default_currency = %(currency)s DESC
, country = %(payer_country)s DESC
, connection_ts
LIMIT 1
""", dict(locals(), SEPA=SEPA))
if destination:
return destination
else:
raise MissingPaymentAccount(tippee)
def resolve_team_donation(
db, team, provider, payer, payer_country, payment_amount, tip,
sepa_only=False, excluded_destinations=(),
):
"""Figure out how to distribute a donation to a team's members.
Args:
team (Participant): the team the donation is for
provider (str): the payment processor ('paypal' or 'stripe')
payer (Participant): the donor
payer_country (str): the country code the money is supposedly coming from
payment_amount (Money): the amount of money being sent
tip (Row): the row from the `tips` table
sepa_only (bool): only consider destination accounts within SEPA
excluded_destinations (set): any `payment_accounts.pk` values to exclude
Returns:
a list of `ProtoTransfer` objects
Raises:
MissingPaymentAccount: if no suitable destination has been found
NoSelfTipping: if the payer would end up sending money to themself
RecipientAccountSuspended: if the team or all of its members are suspended
"""
if team.is_suspended:
raise RecipientAccountSuspended(team)
currency = payment_amount.currency
takes = team.get_current_takes_for_payment(currency, tip.amount)
if all(t.is_suspended for t in takes):
raise RecipientAccountSuspended(takes)
takes = [t for t in takes if not t.is_suspended]
if len(takes) == 1 and takes[0].member == payer.id:
raise NoSelfTipping()
member_ids = tuple([t.member for t in takes])
excluded_destinations = list(excluded_destinations)
payment_accounts = {row.participant: row for row in db.all("""
SELECT DISTINCT ON (participant) *
FROM payment_accounts
WHERE participant IN %(member_ids)s
AND provider = %(provider)s
AND is_current
AND verified
AND coalesce(charges_enabled, true)
AND array_position(%(excluded_destinations)s::bigint[], pk) IS NULL
ORDER BY participant
, default_currency = %(currency)s DESC
, country = %(payer_country)s DESC
, connection_ts
""", locals())}
del member_ids
if not payment_accounts:
raise MissingPaymentAccount(team)
takes = [t for t in takes if t.member in payment_accounts and t.member != payer.id]
if not takes:
raise NoSelfTipping()
takes.sort(key=lambda t: (
-(t.amount / (t.paid_in_advance + payment_amount)),
t.paid_in_advance,
t.ctime
))
# Try to distribute the donation to multiple members.
if sepa_only or provider == 'stripe':
sepa_accounts = {a.participant: a for a in db.all("""
SELECT DISTINCT ON (a.participant) a.*
FROM payment_accounts a
WHERE a.participant IN %(member_ids)s
AND a.provider = %(provider)s
AND a.is_current
AND a.verified
AND coalesce(a.charges_enabled, true)
AND array_position(%(excluded_destinations)s::bigint[], a.pk) IS NULL
AND a.country IN %(SEPA)s
ORDER BY a.participant
, a.default_currency = %(currency)s DESC
, a.connection_ts
""", dict(locals(), SEPA=SEPA, member_ids={t.member for t in takes}))}
if sepa_only or len(sepa_accounts) > 1 and takes[0].member in sepa_accounts:
selected_takes = [
t for t in takes if t.member in sepa_accounts and t.amount != 0
]
if selected_takes:
resolve_take_amounts(payment_amount, selected_takes)
selected_takes.sort(key=attrgetter('member'))
n_periods = payment_amount / tip.periodic_amount.convert(currency)
return [
ProtoTransfer(
t.resolved_amount,
db.Participant.from_id(t.member),
sepa_accounts[t.member],
'team-donation',
(t.resolved_amount / n_periods).round(allow_zero=False),
tip.period,
team.id,
tip.visibility,
)
for t in selected_takes if t.resolved_amount != 0
]
elif sepa_only:
raise MissingPaymentAccount(team)
# Fall back to sending the entire donation to the member who "needs" it most.
member = db.Participant.from_id(takes[0].member)
account = payment_accounts[member.id]
return [ProtoTransfer(
payment_amount, member, account, 'team-donation',
tip.periodic_amount, tip.period, team.id, tip.visibility,
)]
def resolve_take_amounts(payment_amount, takes):
"""Compute team transfer amounts.
Args:
payment_amount (Money): the total amount of money to transfer
takes (list): rows returned by `team.get_current_takes_for_payment(...)`
This function doesn't return anything, instead it mutates the given takes,
adding a `resolved_amount` attribute to each one.
"""
max_weeks_of_advance = 0
for t in takes:
if t.amount == 0:
t.weeks_of_advance = 0
continue
t.weeks_of_advance = t.paid_in_advance / t.amount
if t.weeks_of_advance > max_weeks_of_advance:
max_weeks_of_advance = t.weeks_of_advance
base_amounts = {t.member: t.amount for t in takes}
convergence_amounts = {
t.member: (
t.amount * (max_weeks_of_advance - t.weeks_of_advance)
).round_up()
for t in takes
}
tr_amounts = resolve_amounts(payment_amount, base_amounts, convergence_amounts)
for t in takes:
t.resolved_amount = tr_amounts.get(t.member, payment_amount.zero())
def resolve_amounts(available_amount, base_amounts, convergence_amounts=None, payday_id=1):
"""Compute transfer amounts.
Args:
available_amount (Money):
the payin amount to split into transfer amounts
base_amounts (Dict[Any, Money]):
a map of IDs to raw transfer amounts
convergence_amounts (Dict[Any, Money]):
an optional map of IDs to ideal additional amounts
payday_id (int):
the ID of the current or next payday, used to rotate who receives
the remainder when there is a tie
Returns a copy of `base_amounts` with updated values.
"""
min_transfer_amount = Money.MINIMUMS[available_amount.currency]
r = {}
amount_left = available_amount
# Attempt to converge
if convergence_amounts:
convergence_sum = Money.sum(convergence_amounts.values(), amount_left.currency)
if convergence_sum != 0:
convergence_amounts = {k: v for k, v in convergence_amounts.items() if v != 0}
if amount_left == convergence_sum:
# We have just enough money for convergence.
return convergence_amounts
elif amount_left > convergence_sum:
# We have more than enough money for full convergence, the extra
# funds will be allocated in proportion to `base_amounts`.
r.update(convergence_amounts)
amount_left -= convergence_sum
else:
# We only have enough for partial convergence, the funds will be
# allocated in proportion to `convergence_amounts`.
base_amounts = convergence_amounts
# Compute the prorated amounts
base_sum = Money.sum(base_amounts.values(), amount_left.currency)
base_ratio = 0 if base_sum == 0 else amount_left / base_sum
for key, base_amount in sorted(base_amounts.items()):
if base_amount == 0:
continue
assert amount_left >= min_transfer_amount
amount = min((base_amount * base_ratio).round_down(), amount_left)
r[key] = amount + r.get(key, 0)
amount_left -= amount
# Deal with rounding errors
if amount_left > 0:
# Try to distribute in a way that doesn't skew the percentages much.
# If there's a tie, use the payday ID to rotate the winner every week.
i = itertools.count(1)
n = len(r)
for key, amount in sorted(r.items(), key=compute_priority):
r[key] += min_transfer_amount
amount_left -= min_transfer_amount
if amount_left == 0:
break
# Final check and return
assert amount_left == 0, '%r != 0' % amount_left
return r
def prepare_payin_transfer(
db, payin, recipient, destination, context, amount, visibility,
unit_amount=None, period=None, team=None,
):
"""Prepare the allocation of funds from a payin.
Args:
payin (Record): a row from the `payins` table
recipient (Participant): the user who will receive the money
destination (Record): a row from the `payment_accounts` table
amount (Money): the amount of money that will be received
visibility (int): a copy of `tip.visibility`
unit_amount (Money): the `periodic_amount` of a recurrent donation
period (str): the period of a recurrent payment
team (int): the ID of the project this payment is tied to
Returns:
Record: the row created in the `payin_transfers` table
"""
assert recipient.id == destination.participant, (recipient, destination)
if recipient.is_suspended:
raise RecipientAccountSuspended()
if unit_amount:
n_units = int(amount / unit_amount.convert(amount.currency))
else:
n_units = None
return db.one("""
INSERT INTO payin_transfers
(payin, payer, recipient, destination, context, amount,
unit_amount, n_units, period, team, visibility,
status, ctime)
VALUES (%s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
'pre', clock_timestamp())
RETURNING *
""", (payin.id, payin.payer, recipient.id, destination.pk, context, amount,
unit_amount, n_units, period, team, visibility))
def update_payin_transfer(
db, pt_id, remote_id, status, error,
amount=None, fee=None, update_donor=True, reversed_amount=None,
):
"""Update the status and other attributes of a payment.
Args:
pt_id (int): the ID of the payment in our database
remote_id (str): the ID of the transfer in the payment processor's database
status (str): the new status of the payment
error (str): if the payment failed, an error message to show to the payer
Returns:
Record: the row updated in the `payin_transfers` table
"""
with db.get_cursor() as cursor:
pt = cursor.one("""
UPDATE payin_transfers
SET status = %(status)s
, error = %(error)s
, remote_id = coalesce(remote_id, %(remote_id)s)
, amount = COALESCE(%(amount)s, amount)
, fee = COALESCE(%(fee)s, fee)
, reversed_amount = coalesce(%(reversed_amount)s, reversed_amount)
WHERE id = %(pt_id)s
RETURNING *
, (SELECT amount FROM payin_transfers WHERE id = %(pt_id)s) AS old_amount
, (SELECT reversed_amount FROM payin_transfers WHERE id = %(pt_id)s) AS old_reversed_amount
, (SELECT status FROM payin_transfers WHERE id = %(pt_id)s) AS old_status
""", locals())
if not pt:
return
if remote_id and pt.remote_id != remote_id:
raise AssertionError(f"the remote IDs don't match: {pt.remote_id!r} != {remote_id!r}")
if status != pt.old_status:
cursor.run("""
INSERT INTO payin_transfer_events
(payin_transfer, status, error, timestamp)
VALUES (%s, %s, %s, current_timestamp)
""", (pt_id, status, error))
# If the payment has failed or hasn't been settled yet, then stop here.
if status != 'succeeded':
return pt
# Update the `paid_in_advance` value of the donation.
params = pt._asdict()
params['delta'] = pt.amount
if pt.old_status == 'succeeded':
params['delta'] -= pt.old_amount
if pt.reversed_amount:
params['delta'] += -(pt.reversed_amount - (pt.old_reversed_amount or 0))
elif pt.old_reversed_amount:
params['delta'] += pt.old_reversed_amount
if params['delta'] == 0:
return pt
updated_tips = cursor.all("""
WITH latest_tip AS (
SELECT *
FROM tips
WHERE tipper = %(payer)s
AND tippee = COALESCE(%(team)s, %(recipient)s)
ORDER BY mtime DESC
LIMIT 1
)
UPDATE tips t
SET paid_in_advance = (
coalesce_currency_amount(t.paid_in_advance, t.amount::currency) +
convert(%(delta)s, t.amount::currency)
)
, is_funded = true
FROM latest_tip lt
WHERE t.tipper = lt.tipper
AND t.tippee = lt.tippee
AND t.mtime >= lt.mtime
RETURNING t.*
""", params)
if not updated_tips:
# This transfer isn't linked to a tip.
return pt
assert len(updated_tips) < 10, updated_tips
if any(t.paid_in_advance <= 0 for t in updated_tips):
cursor.run("""
UPDATE tips
SET is_funded = false
WHERE tipper = %(payer)s
AND paid_in_advance <= 0
""", params)
# If it's a team donation, update the `paid_in_advance` value of the take.
if pt.context == 'team-donation':
updated_takes = cursor.all("""
WITH latest_take AS (
SELECT *
FROM takes
WHERE team = %(team)s
AND member = %(recipient)s
AND amount IS NOT NULL
ORDER BY mtime DESC
LIMIT 1
)
UPDATE takes t
SET paid_in_advance = (
coalesce_currency_amount(lt.paid_in_advance, lt.amount::currency) +
convert(%(delta)s, lt.amount::currency)
)
FROM latest_take lt
WHERE t.team = lt.team
AND t.member = lt.member
AND t.mtime >= lt.mtime
RETURNING t.id
""", params)
assert 0 < len(updated_takes) < 10, params
# Recompute the cached `receiving` amount of the donee.
cursor.run("""
WITH our_tips AS (
SELECT t.amount
FROM current_tips t
WHERE t.tippee = %(p_id)s
AND t.is_funded
)
UPDATE participants AS p
SET receiving = taking + coalesce_currency_amount(
(SELECT sum(t.amount, p.main_currency) FROM our_tips t),
p.main_currency
)
, npatrons = (SELECT count(*) FROM our_tips)
WHERE p.id = %(p_id)s
""", dict(p_id=(pt.team or pt.recipient)))
# Recompute the donor's cached `giving` amount and payment schedule.
if update_donor:
donor = db.Participant.from_id(pt.payer)
donor.update_giving()
donor.schedule_renewals()
return pt
def abort_payin(db, payin, error='aborted by payer'):
"""Mark a payin as cancelled.
Args:
payin (Record): a row from the `payins` table
error (str): the error message to attach to the payin
Returns:
Record: the row updated in the `payins` table
"""
payin = update_payin(db, payin.id, payin.remote_id, 'failed', error)
db.run("""
WITH updated_transfers as (
UPDATE payin_transfers
SET status = 'failed'
, error = %(error)s
WHERE payin = %(payin_id)s
AND status <> 'failed'
RETURNING *
)
INSERT INTO payin_transfer_events
(payin_transfer, status, error, timestamp)
SELECT pt.id, 'failed', pt.error, current_timestamp
FROM updated_transfers pt
""", dict(error=error, payin_id=payin.id))
return payin
def record_payin_refund(
db, payin_id, remote_id, amount, reason, description, status, error=None, ctime=None,
):
"""Record a charge refund.
Args:
payin_id (int): the ID of the refunded payin in our database
remote_id (int): the ID of the refund in the payment processor's database
amount (Money): the refund amount, must be less or equal to the payin amount
reason (str): why this refund was initiated (`refund_reason` SQL type)
description (str): details of the circumstances of this refund
status (str): the current status of the refund (`refund_status` SQL type)
error (str): error message, if the refund has failed
ctime (datetime): when the refund was initiated
Returns:
Record: the row inserted in the `payin_refunds` table
"""
refund = db.one("""
INSERT INTO payin_refunds
(payin, remote_id, amount, reason, description,
status, error, ctime)
VALUES (%(payin_id)s, %(remote_id)s, %(amount)s, %(reason)s, %(description)s,
%(status)s, %(error)s, coalesce(%(ctime)s, current_timestamp))
ON CONFLICT (payin, remote_id) DO UPDATE
SET amount = excluded.amount
, reason = excluded.reason
, description = excluded.description
, status = excluded.status
, error = excluded.error
RETURNING *
, ( SELECT old.status
FROM payin_refunds old
WHERE old.payin = %(payin_id)s
AND old.remote_id = %(remote_id)s
) AS old_status
""", locals())
notify = (
refund.status in ('pending', 'succeeded') and
refund.status != refund.old_status and
refund.ctime > (utcnow() - timedelta(hours=24))
)
if notify:
payin = db.one("SELECT * FROM payins WHERE id = %s", (refund.payin,))
payer = db.Participant.from_id(payin.payer)
payer.notify(
'payin_refund_initiated',
payin_amount=payin.amount,
payin_ctime=payin.ctime,
refund_amount=refund.amount,
refund_reason=refund.reason,
email_unverified_address=True,
)
return refund
def record_payin_transfer_reversal(
db, pt_id, remote_id, amount, payin_refund_id=None, ctime=None
):
"""Record a transfer reversal.
Args:
pt_id (int): the ID of the reversed transfer in our database
remote_id (int): the ID of the reversal in the payment processor's database
amount (Money): the reversal amount, must be less or equal to the transfer amount
payin_refund_id (int): the ID of the associated payin refund in our database
ctime (datetime): when the refund was initiated
Returns:
Record: the row inserted in the `payin_transfer_reversals` table
"""
return db.one("""
INSERT INTO payin_transfer_reversals
(payin_transfer, remote_id, amount, payin_refund,
ctime)
VALUES (%(pt_id)s, %(remote_id)s, %(amount)s, %(payin_refund_id)s,
coalesce(%(ctime)s, current_timestamp))
ON CONFLICT (payin_transfer, remote_id) DO UPDATE
SET amount = excluded.amount
, payin_refund = excluded.payin_refund
RETURNING *
""", locals())
| 39.964409 | 108 | 0.579965 |
d2e3ae6e131a5fa41bdb17b19d893736dfd4f861 | 4,967 | py | Python | vendor/func_lib/assert_handle.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
] | null | null | null | vendor/func_lib/assert_handle.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
] | null | null | null | vendor/func_lib/assert_handle.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from vendor.errors.feature import FeatureProcessError
"""
:
**, **
"""
def f_assert_not_null(seq):
""""""
if seq in (None, '', [], {}, ()):
raise FeatureProcessError("value: %s f_assert_not_null Error" % seq)
if isinstance(seq, list):
for value in seq:
if value in (None, '', {}, [], ()):
raise FeatureProcessError("value: %s f_assert_not_null Error" % seq)
return seq
def f_assert_jsonpath_true(seq):
"""jsonpathtrue seq[]"""
if seq in ([],):
raise FeatureProcessError("jsonpath not find field")
return seq
def f_assert_must_int(value_list):
"""int"""
for value in value_list:
if not isinstance(value, int):
raise FeatureProcessError('%s f_assert_must_int Error' % value_list)
return value_list
def f_assert_must_list(value_list):
"""list"""
for value in value_list:
if not isinstance(value, list):
raise FeatureProcessError('%s f_assert_must_list Error' % value_list)
return value_list
def f_assert_must_dict(value_list):
"""dict"""
for value in value_list:
if not isinstance(value, dict):
raise FeatureProcessError('%s f_assert_must_dict Error' % value_list)
return value_list
def f_assert_must_digit(value_list, args=False):
"""
:param value_list:
:param args: false True
:return:
example
:value_list [-2,'-2', 3]
:args false
return
:value_list [-2,'-2', 3]
:args True
return [-2,'-2', 3]
"""
for value in value_list:
if args:
if not str(value).lstrip('-').isdigit():
raise FeatureProcessError('%s negative number=%s f_assert_must_digit Error' % (value_list, args))
else:
if not str(value).isdigit():
raise FeatureProcessError('%s negative number=%s f_assert_must_digit Error' % (value_list, args))
return value_list
def f_assert_must_basestring(value_list):
""""""
for value in value_list:
if not isinstance(value, basestring):
raise FeatureProcessError('%s f_assert_must_basestring Error' % value_list)
return value_list
def f_assert_must_digit_or_float(value_list, args=False):
"""
float, args=false True
:param value_list:
:param args: false True
:return:
example
:value_list [-2.0,'-2', 3]
:args false
return
:value_list [-2.0,'-2', 3]
:args True
return [-2.0,'-2', 3]
"""
for value in value_list:
if args:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()):
raise FeatureProcessError(
'%s negative number=%s f_assert_must_digit_or_float Error' % (value_list, args))
else:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').isdigit()):
raise FeatureProcessError(
'%s negative number=%s f_assert_must_digit_or_float Error' % (value_list, args))
return value_list
def f_assert_must_percent(value_list):
"""
"""
for value in value_list:
if not (str(value)[-1] == '%' and (str(value[:-1]).count('.') <= 1 and str(value[:-1]).replace('.', '').isdigit())):
raise FeatureProcessError(
'%s f_assert_must_percent Error' % value_list)
return value_list
def f_assert_must_between(value_list, args):
"""
args
:param value_list:
:param args:
:return:
example
:value_list [2, 2, 3]
:args [1,3]
:value_list ['-2', '-3', 3]
:args ['-5',3]
"""
assert len(args) == 2
for value in value_list:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()
and float(args[0]) <= float(value) <= float(args[1])):
raise FeatureProcessError('%s f_assert_must_between %s Error' % (value_list, args))
return value_list
def f_assert_seq0_gte_seq1(value_list):
""""""
if not value_list[0] >= value_list[1]:
raise FeatureProcessError('%s f_assert_seq0_gte_seq1 Error' % value_list)
return value_list
if __name__ == '__main__':
print f_assert_must_percent(['7.0%'])
| 29.742515 | 124 | 0.571774 |
d2e3af7e8020910904dd800db879455657d8308e | 4,993 | py | Python | main.py | Potapov-AA/CaesarCipherWithKeyword | 4bd520418254b56950be079d0fce638039d4e202 | [
"MIT"
] | null | null | null | main.py | Potapov-AA/CaesarCipherWithKeyword | 4bd520418254b56950be079d0fce638039d4e202 | [
"MIT"
] | null | null | null | main.py | Potapov-AA/CaesarCipherWithKeyword | 4bd520418254b56950be079d0fce638039d4e202 | [
"MIT"
] | null | null | null | import time
from os import system, walk
from config import CONFIG
from encry import ENCRY
from decry import DECRY
#
#
if __name__ == '__main__':
CONF = object
text = ''
cryptMode = ''
print_menu(cryptMode, CONF, text)
| 32.848684 | 143 | 0.488684 |
d2e4753ddf7c063ce13b4c81cfba0d2c46394e4c | 504 | py | Python | frappe/email/doctype/email_queue_recipient/email_queue_recipient.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
] | null | null | null | frappe/email/doctype/email_queue_recipient/email_queue_recipient.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
] | null | null | null | frappe/email/doctype/email_queue_recipient/email_queue_recipient.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
from frappe.model.document import Document
| 22.909091 | 58 | 0.730159 |
d2e52be160ba41f3c7d6be5212d1c7221d94eb66 | 3,211 | py | Python | tests/groups/family/test_pseudo_dojo.py | mbercx/aiida-pseudo | 070bdfa37d30674e1f83bf6d14987aa977426d92 | [
"MIT"
] | null | null | null | tests/groups/family/test_pseudo_dojo.py | mbercx/aiida-pseudo | 070bdfa37d30674e1f83bf6d14987aa977426d92 | [
"MIT"
] | 2 | 2021-09-21T11:28:55.000Z | 2021-09-21T12:13:48.000Z | tests/groups/family/test_pseudo_dojo.py | mbercx/aiida-pseudo | 070bdfa37d30674e1f83bf6d14987aa977426d92 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=unused-argument,pointless-statement
"""Tests for the `PseudoDojoFamily` class."""
import pytest
from aiida_pseudo.data.pseudo import UpfData, Psp8Data, PsmlData, JthXmlData
from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily
def test_type_string(clear_db):
"""Verify the `_type_string` class attribute is correctly set to the corresponding entry point name."""
assert PseudoDojoFamily._type_string == 'pseudo.family.pseudo_dojo' # pylint: disable=protected-access
def test_pseudo_types():
"""Test the `PseudoDojoFamily.pseudo_types` method."""
assert PseudoDojoFamily.pseudo_types == (UpfData, PsmlData, Psp8Data, JthXmlData)
def test_default_configuration():
"""Test the `PseudoDojoFamily.default_configuration` class attribute."""
assert isinstance(PseudoDojoFamily.default_configuration, PseudoDojoConfiguration)
def test_valid_configurations():
"""Test the `PseudoDojoFamily.valid_configurations` class attribute."""
valid_configurations = PseudoDojoFamily.valid_configurations
assert isinstance(valid_configurations, tuple)
for entry in valid_configurations:
assert isinstance(entry, PseudoDojoConfiguration)
def test_get_valid_labels():
"""Test the `PseudoDojoFamily.get_valid_labels` class method."""
valid_labels = PseudoDojoFamily.get_valid_labels()
assert isinstance(valid_labels, tuple)
for entry in valid_labels:
assert isinstance(entry, str)
def test_format_configuration_label():
"""Test the `PseudoDojoFamily.format_configuration_label` class method."""
configuration = PseudoDojoConfiguration('0.4', 'PBE', 'SR', 'standard', 'psp8')
assert PseudoDojoFamily.format_configuration_label(configuration) == 'PseudoDojo/0.4/PBE/SR/standard/psp8'
def test_constructor():
"""Test that the `PseudoDojoFamily` constructor validates the label."""
with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'):
PseudoDojoFamily()
with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'):
PseudoDojoFamily(label='nc-sr-04_pbe_standard_psp8')
label = PseudoDojoFamily.format_configuration_label(PseudoDojoFamily.default_configuration)
family = PseudoDojoFamily(label=label)
assert isinstance(family, PseudoDojoFamily)
| 40.64557 | 110 | 0.766116 |
d2e5ecb02a9dd4eeeac961445b6d9553ecd7b3a1 | 1,743 | py | Python | converter.py | Poudingue/Max2Mitsuba | 857c67b91f524de3e33f66958f26b022fa0a38f0 | [
"WTFPL"
] | 4 | 2019-10-30T09:18:42.000Z | 2020-06-18T12:50:06.000Z | converter.py | Poudingue/Fbx2Mitsuba | 857c67b91f524de3e33f66958f26b022fa0a38f0 | [
"WTFPL"
] | null | null | null | converter.py | Poudingue/Fbx2Mitsuba | 857c67b91f524de3e33f66958f26b022fa0a38f0 | [
"WTFPL"
] | null | null | null | import sys
import os
if sys.version_info[0] != 3 :
print("Running in python "+sys.version_info[0]+", should be python 3.")
print("Please install python 3.7 from the official site python.org")
print("Exiting now.")
exit()
import shutil
import argparse
import fbx2tree
import builder_fromfbx
import time
# config is useful to keep info for the different modules
import config
parser = argparse.ArgumentParser()
parser.add_argument("file", help="file")
parser.add_argument("-v", "--verbose", help="Print more stuff", action="store_true")
parser.add_argument("-d", "--debug", help="Create intermediate xml files for debug", action="store_true")
parser.add_argument("--closest", help="Try to stick as close to the original materials in 3dsmax, even if it is at the expense of realism", action="store_true")
parser.add_argument("--realist", help="Try to make materials as realist as possible, even if it is at the expense of fidelity to the original scene", action="store_true")
args = parser.parse_args()
if args.closest and args.realist :
print("Incompatible options : --closest and --realist. Choose one, or neither for a balanced result")
exit(0)
fullname = args.file
if fullname.split(".")[-1].lower() != "fbx" :
print("The file is not an fbx file")
exit(0)
config.curr_place = os.path.dirname(os.path.abspath(__file__))
config.filename = ".".join(fullname.split(".")[:-1]).split("\\")[-1]#Remove extension, remove path.
config.filepath = "\\".join(fullname.split("\\")[:-1])+"\\"#Keep only path
config.verbose = args.verbose
config.debug = args.debug
config.closest = args.closest
config.realist = args.realist
fbxtree = fbx2tree.transform()
builder_fromfbx.build(fbxtree)
print("Conversion finished !")
| 34.86 | 176 | 0.724613 |
d2e64e022f433cd3fd044c614f4cd92d7a6f232d | 4,256 | py | Python | run.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | run.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | run.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
] | null | null | null | import sys
import json
import plotly
from flask import Flask
from flask import render_template, request
from plotly.graph_objects import Heatmap, Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
sys.path.append("common")
from common.nlp_common_utils import *
if len(sys.argv) == 1:
sys.argv.append('./data/DisasterResponse.db')
sys.argv.append('./models/classifier.pkl')
# this requires for joblib and pickle
def tokenize(text):
"""
Used a common utility functions for tokenize text in to cleaned token list.
INPUT:
text - raw message
OUTPUT:
clean_tokens -- cleaned tokenized list
"""
return tokenize_text(text)
# create a flask app
app = Flask(__name__, template_folder='app/templates')
#
database_file_location, model_location = sys.argv[1:]
# load data
engine = create_engine('sqlite:///{}'.format(database_file_location))
df = pd.read_sql_table('DisasterResponse', engine)
# category df
df_categories = df.iloc[:, 4:]
# load model
model = joblib.load(model_location)
def generate_graph_with_template(data, title, yaxis_title, xaxi_title):
"""
This common layout can be used to create Plotly graph layout.
INPUT:
data - a graph required JSON data i.e list
title - a tile of the chart
yaxis_title - Y title
xaxix_title - X title
OUTPUT:
layout for particular graph.
"""
return {
'data': [data],
'layout': {
'title': title,
'yaxis': {
'title': yaxis_title
},
'xaxis': {
'title': xaxi_title
}
}
}
def generate_message_genres_bar_chart():
"""
create a graph using extracted data for `genre`
"""
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
data = Bar(x=genre_names, y=genre_counts)
title = 'Distribution of Message Genres'
y_title = 'Count'
x_title = 'Genre'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_message_categories_distribution_bar_chart():
"""
create a graph for distribution of the messages.
"""
data = Bar(x=df_categories.columns,
y=list(df_categories.sum().sort_values(ascending=False)))
title = 'Distribution of Message Categories'
y_title = 'Count'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_two_cat_relation_heat_map():
"""
A correlation matrix for categories
"""
data = Heatmap(
z=df_categories.corr(),
y=df_categories.columns,
x=df_categories.columns)
title = 'Correlation Distribution of Categories'
y_title = 'Category'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title)
# index webpage displays cool visuals and receives user input text for model
# web page that handles user query and displays model results
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| 25.95122 | 79 | 0.672227 |
d2e65a9a5236dcdd44347a721b18b12179871e04 | 840 | py | Python | process.py | s-xie/processing | e0f1a851bed6159a718ae4e4afb3bfe3a30f6af5 | [
"MIT"
] | null | null | null | process.py | s-xie/processing | e0f1a851bed6159a718ae4e4afb3bfe3a30f6af5 | [
"MIT"
] | null | null | null | process.py | s-xie/processing | e0f1a851bed6159a718ae4e4afb3bfe3a30f6af5 | [
"MIT"
] | null | null | null | import re
import sys
from nltk.tokenize import word_tokenize
from unidecode import unidecode
from nltk.tokenize import sent_tokenize
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('fin')
parser.add_argument('fout')
args = parser.parse_args()
textproc = TextProc()
tokenizer = Tokenizer()
sentences=set()
with open(args.fin, 'r') as f:
count = 0
for line in f:
count+=1
sentences.add(line.strip())
if count % 100000==0:
print(count)
with open(args.fout, 'w') as f:
count = 0
group = ''
for s in sentences:
count+=1
if s !='':
group+=s+'\n'
if count % 20==0:
try:
p = sent_tokenize(unidecode(group))
f.write('\n'.join(p))
group = ''
except:
print("nltk error")
if count % 10000==0:
print(count)
| 20 | 45 | 0.613095 |
d2e6ff17f08688e760eb2d19c6c6dfcc805a369d | 1,071 | py | Python | Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py | josue-rosa/Python---Curso-em-Video | 2d74c7421a49952b7c3eadb1010533525f2de338 | [
"MIT"
] | 3 | 2020-10-07T03:21:07.000Z | 2020-10-13T14:18:49.000Z | Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py | josue-rosa/Python---Curso-em-Video | 2d74c7421a49952b7c3eadb1010533525f2de338 | [
"MIT"
] | null | null | null | Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py | josue-rosa/Python---Curso-em-Video | 2d74c7421a49952b7c3eadb1010533525f2de338 | [
"MIT"
] | null | null | null | """
Melhore o jogo do DESAFIO 028 onde o computador vai "pensar" em um numero entre 0 e 10.
S que agora o jogador vai tentar adivinhar at acertar, mostrando no final
quantos palpites foram necessrios para vencer
"""
"""
from random import randint
tentativas = 1
computador = randint(0, 10)
jogador = int(input('Informe um numero para jogarmos '))
while jogador != computador:
jogador = int(input('Errou. Tente novamente. '))
tentativas += 1
print(f'Acertou. Pensei no {computador} tambm.')
print(f'Total de tentativas {tentativas}.')
"""
# Corrigido do Professor
from random import randint
computador = randint(0, 10)
print('Pensei em um nmero entre 0 e 10')
acertou = False
palpites = 0
while not acertou:
jogador = int(input('Qual o seu palpite? '))
palpites += 1
if jogador == computador:
acertou = True
else:
if jogador < computador:
print('Mais..Tente mais uma vez.')
elif jogador > computador:
print('Menos. Tente mais uma vez.')
print(f'Acertou com {palpites} tentativas. Parabns!')
| 28.945946 | 87 | 0.687208 |
d2e7114d8d4486671f83a30e7420ce1d69cd65c1 | 1,550 | py | Python | plugins/googlefight.py | serpis/pynik | 074e6b2d2282557976eee3681d8bfcd5659c011e | [
"MIT"
] | 4 | 2016-08-09T21:25:23.000Z | 2019-08-16T21:55:17.000Z | plugins/googlefight.py | serpis/pynik | 074e6b2d2282557976eee3681d8bfcd5659c011e | [
"MIT"
] | 10 | 2015-01-25T21:25:22.000Z | 2021-01-28T19:50:22.000Z | plugins/googlefight.py | serpis/pynik | 074e6b2d2282557976eee3681d8bfcd5659c011e | [
"MIT"
] | 4 | 2015-05-06T21:45:39.000Z | 2018-07-02T16:47:36.000Z | # coding: utf-8
import re
import utility
from commands import Command
| 22.142857 | 100 | 0.623226 |
d2e7cc251d72d1b4b8afa5565221124b4f826ce6 | 457 | py | Python | was/lib/tuning/actions/ThreadPool.py | rocksun/ucmd | 486de31324195f48c4110e327d635aaafe3d74d6 | [
"Apache-2.0"
] | 2 | 2019-10-09T06:59:47.000Z | 2019-10-10T03:20:17.000Z | was/lib/tuning/actions/ThreadPool.py | rocksun/ucmd | 486de31324195f48c4110e327d635aaafe3d74d6 | [
"Apache-2.0"
] | null | null | null | was/lib/tuning/actions/ThreadPool.py | rocksun/ucmd | 486de31324195f48c4110e327d635aaafe3d74d6 | [
"Apache-2.0"
] | 1 | 2021-11-25T06:41:17.000Z | 2021-11-25T06:41:17.000Z | import os
min=512
max=512
| 30.466667 | 96 | 0.68709 |
d2e833d9d9dbd44a801765209ab9f359cdd98029 | 6,770 | py | Python | app/api/v2/resources/saleorders.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
] | null | null | null | app/api/v2/resources/saleorders.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
] | null | null | null | app/api/v2/resources/saleorders.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
] | 1 | 2018-11-04T18:09:38.000Z | 2018-11-04T18:09:38.000Z | """This module contains objects for saleorders endpoints"""
from flask import Flask, jsonify, request, abort, make_response
from flask_restful import Resource
from flask_jwt_extended import get_jwt_identity, jwt_required
from . import common_functions
from ..models import products, saleorders
from ..utils import verify
from .. import database
| 39.590643 | 180 | 0.578434 |
d2e9b98d6967be78af6014083084b5dab63e624c | 61 | py | Python | nautobot/circuits/__init__.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
] | 384 | 2021-02-24T01:40:40.000Z | 2022-03-30T10:30:59.000Z | nautobot/circuits/__init__.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
] | 1,067 | 2021-02-24T00:58:08.000Z | 2022-03-31T23:38:23.000Z | nautobot/circuits/__init__.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
] | 128 | 2021-02-24T02:45:16.000Z | 2022-03-20T18:48:36.000Z | default_app_config = "nautobot.circuits.apps.CircuitsConfig"
| 30.5 | 60 | 0.852459 |
d2e9f3e2143b7da446094a72db5befcb7fc0a728 | 54,559 | py | Python | autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
] | 4 | 2019-10-29T13:27:23.000Z | 2020-03-24T11:13:35.000Z | autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
] | null | null | null | autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
] | 3 | 2020-02-12T10:29:59.000Z | 2020-03-24T11:13:53.000Z | import copy
import numpy as np
from scipy.special import wofz
from scipy.integrate import quad
from typing import List, Tuple
import autoarray as aa
from autogalaxy.profiles.mass_profiles import MassProfile
from autogalaxy.profiles.mass_profiles.mass_profiles import (
MassProfileMGE,
MassProfileCSE,
)
from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Gaussian light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
Note: sigma is divided by sqrt(q) here.
"""
return np.multiply(
self.intensity,
np.exp(
-0.5
* np.square(
np.divide(grid_radii, self.sigma / np.sqrt(self.axis_ratio))
)
),
)
# noinspection PyAbstractClass
def image_2d_via_radii_from(self, radius: np.ndarray):
"""
Returns the intensity of the profile at a given radius.
Parameters
----------
radius
The distance from the centre of the profile.
"""
return self.intensity * np.exp(
-self.sersic_constant
* (((radius / self.effective_radius) ** (1.0 / self.sersic_index)) - 1)
)
def decompose_convergence_via_mge(self) -> Tuple[List, List]:
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
return self._decompose_convergence_via_mge(
func=sersic_2d, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self,) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into cored steep elliptical (cse) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_cses
The number of CSEs used to approximate the input func.
sample_points: int (should be larger than 'total_cses')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every cored steep elliptical (cse) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=0.0,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
return self._decompose_convergence_via_cse_from(
func=sersic_2d,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class EllSersic(AbstractEllSersic, MassProfileMGE, MassProfileCSE):
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
return self._decompose_convergence_via_mge(
func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_sies
The number of SIEs used to approximate the input func.
sample_points: int (should be larger than 'total_sies')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=self.mass_to_light_gradient,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
return self._decompose_convergence_via_cse_from(
func=sersic_radial_gradient_2D,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
class SphSersicRadialGradient(EllSersicRadialGradient):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
mass_to_light_gradient=mass_to_light_gradient,
)
class EllSersicCore(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity_break,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""
Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
return np.multiply(
np.multiply(
self.intensity_prime,
np.power(
np.add(
1,
np.power(np.divide(self.radius_break, grid_radii), self.alpha),
),
(self.gamma / self.alpha),
),
),
np.exp(
np.multiply(
-self.sersic_constant,
(
np.power(
np.divide(
np.add(
np.power(grid_radii, self.alpha),
(self.radius_break ** self.alpha),
),
(self.effective_radius ** self.alpha),
),
(1.0 / (self.alpha * self.sersic_index)),
)
),
)
),
)
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Chamelon light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
axis_ratio_factor = (1.0 + self.axis_ratio) ** 2.0
return np.multiply(
self.intensity / (1 + self.axis_ratio),
np.add(
np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_0 ** 2.0) / axis_ratio_factor,
)
),
),
-np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_1 ** 2.0) / axis_ratio_factor,
)
),
),
),
)
class SphChameleon(EllChameleon):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The spherica; Chameleon mass profile.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
core_radius_0=core_radius_0,
core_radius_1=core_radius_1,
mass_to_light_ratio=mass_to_light_ratio,
)
def cse_settings_from(
effective_radius, sersic_index, sersic_constant, mass_to_light_gradient
):
if mass_to_light_gradient > 0.5:
if effective_radius > 0.2:
lower_dex = 6.0
upper_dex = np.min(
[np.log10((18.0 / sersic_constant) ** sersic_index), 1.1]
)
if sersic_index <= 1.2:
total_cses = 50
sample_points = 80
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.5
else:
total_cses = 30
sample_points = 50
else:
if sersic_index <= 1.2:
upper_dex = 1.0
total_cses = 50
sample_points = 80
lower_dex = 4.5
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.0
upper_dex = 1.5
else:
upper_dex = 1.1
lower_dex = 6.0
total_cses = 30
sample_points = 50
else:
upper_dex = np.min(
[
np.log10((23.0 / sersic_constant) ** sersic_index),
0.85 - np.log10(effective_radius),
]
)
if (sersic_index <= 0.9) and (sersic_index > 0.8):
total_cses = 50
sample_points = 80
upper_dex = np.log10((18.0 / sersic_constant) ** sersic_index)
lower_dex = 4.3 + np.log10(effective_radius)
elif sersic_index <= 0.8:
total_cses = 50
sample_points = 80
upper_dex = np.log10((16.0 / sersic_constant) ** sersic_index)
lower_dex = 4.0 + np.log10(effective_radius)
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 4.5 + np.log10(effective_radius)
else:
lower_dex = 3.5 + np.log10(effective_radius)
total_cses = 30
sample_points = 50
return upper_dex, lower_dex, total_cses, sample_points
| 36.372667 | 129 | 0.560604 |
d2ea517f3b08f633622c54a6e6b06e1d6019f32c | 627 | py | Python | installer/core/terraform/resources/variable.py | Diffblue-benchmarks/pacbot | 4709eb11f87636bc42a52e7a76b740f9d76d156d | [
"Apache-2.0"
] | 1,165 | 2018-10-05T19:07:34.000Z | 2022-03-28T19:34:27.000Z | installer/core/terraform/resources/variable.py | Diffblue-benchmarks/pacbot | 4709eb11f87636bc42a52e7a76b740f9d76d156d | [
"Apache-2.0"
] | 334 | 2018-10-10T14:00:41.000Z | 2022-03-19T16:32:08.000Z | installer/core/terraform/resources/variable.py | Diffblue-benchmarks/pacbot | 4709eb11f87636bc42a52e7a76b740f9d76d156d | [
"Apache-2.0"
] | 268 | 2018-10-05T19:53:25.000Z | 2022-03-31T07:39:47.000Z | from core.terraform.resources import BaseTerraformVariable
| 31.35 | 98 | 0.6874 |
d2eb169f57649820eef340c3a134f871d837dd00 | 887 | py | Python | bfs.py | mpHarm88/Algorithms-and-Data-Structures-In-Python | a0689e57e0895c375715f39d078704e6faf72f0e | [
"MIT"
] | null | null | null | bfs.py | mpHarm88/Algorithms-and-Data-Structures-In-Python | a0689e57e0895c375715f39d078704e6faf72f0e | [
"MIT"
] | null | null | null | bfs.py | mpHarm88/Algorithms-and-Data-Structures-In-Python | a0689e57e0895c375715f39d078704e6faf72f0e | [
"MIT"
] | null | null | null |
node1 = Node("A");
node2 = Node("B");
node3 = Node("C");
node4 = Node("D");
node5 = Node("E");
node1.adjacencyList.append(node2);
node1.adjacencyList.append(node3);
node2.adjacencyList.append(node4);
node4.adjacencyList.append(node5);
bfs = BreadthFirstSearch();
bfs.bfs(node1); | 21.634146 | 83 | 0.626832 |
d2ed017d8f6bd12bbaded9891125e05125930fde | 3,932 | py | Python | supervisor/dbus/network/connection.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
] | 1 | 2022-02-08T21:32:33.000Z | 2022-02-08T21:32:33.000Z | supervisor/dbus/network/connection.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
] | 310 | 2020-03-12T16:02:13.000Z | 2022-03-31T06:01:49.000Z | supervisor/dbus/network/connection.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
] | 2 | 2021-09-22T00:13:58.000Z | 2021-09-22T15:06:27.000Z | """Connection object for Network Manager."""
from ipaddress import ip_address, ip_interface
from typing import Optional
from ...const import ATTR_ADDRESS, ATTR_PREFIX
from ...utils.gdbus import DBus
from ..const import (
DBUS_ATTR_ADDRESS_DATA,
DBUS_ATTR_CONNECTION,
DBUS_ATTR_GATEWAY,
DBUS_ATTR_ID,
DBUS_ATTR_IP4CONFIG,
DBUS_ATTR_IP6CONFIG,
DBUS_ATTR_NAMESERVER_DATA,
DBUS_ATTR_NAMESERVERS,
DBUS_ATTR_STATE,
DBUS_ATTR_TYPE,
DBUS_ATTR_UUID,
DBUS_NAME_CONNECTION_ACTIVE,
DBUS_NAME_IP4CONFIG,
DBUS_NAME_IP6CONFIG,
DBUS_NAME_NM,
DBUS_OBJECT_BASE,
)
from ..interface import DBusInterfaceProxy
from .configuration import IpConfiguration
| 33.606838 | 88 | 0.630214 |
d2efe900f19b7e3838e3eb40b9017e440e296e62 | 4,969 | py | Python | quark/databricks.py | mistsys/quark | 7baef5e18d5b9d12384a92487151337878958f36 | [
"Apache-2.0"
] | 2 | 2019-02-27T20:51:30.000Z | 2021-05-26T02:35:29.000Z | quark/databricks.py | mistsys/quark | 7baef5e18d5b9d12384a92487151337878958f36 | [
"Apache-2.0"
] | null | null | null | quark/databricks.py | mistsys/quark | 7baef5e18d5b9d12384a92487151337878958f36 | [
"Apache-2.0"
] | 1 | 2020-05-30T22:59:16.000Z | 2020-05-30T22:59:16.000Z | from __future__ import print_function, absolute_import
from .beats import Beat
from StringIO import StringIO
import sys
import os
import json
import urllib
import webbrowser
try:
import pycurl
except:
print("Need pycurl dependency to use qubole as the deployment platform. Run pip install pycurl in your virtualenv and try this again.")
sys.exit(1)
| 33.126667 | 141 | 0.602737 |
d2efeac4ab430fe4ec37a8045db0d9bc80676c48 | 9,658 | py | Python | appimagebuilder/builder/deploy/apt/venv.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
] | null | null | null | appimagebuilder/builder/deploy/apt/venv.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
] | null | null | null | appimagebuilder/builder/deploy/apt/venv.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
] | null | null | null | # Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import fnmatch
import hashlib
import logging
import os
import subprocess
from pathlib import Path
from urllib import request
from appimagebuilder.common import shell
from .package import Package
DEPENDS_ON = ["dpkg-deb", "apt-get", "apt-key", "fakeroot", "apt-cache"]
| 37.003831 | 98 | 0.610685 |
d2f040bef7df7c165fa2e1f80723815e7bebcf83 | 11,453 | py | Python | tests/test_assertion_method.py | katakumpo/nicepy | fa2b0bae8e4b66d92e756687ded58d355c444eca | [
"MIT"
] | null | null | null | tests/test_assertion_method.py | katakumpo/nicepy | fa2b0bae8e4b66d92e756687ded58d355c444eca | [
"MIT"
] | null | null | null | tests/test_assertion_method.py | katakumpo/nicepy | fa2b0bae8e4b66d92e756687ded58d355c444eca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 *-*
import logging
from unittest import TestCase
from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat
log = logging.getLogger(__name__)
| 42.895131 | 100 | 0.516546 |
d2f17cb8a3f0726fbc17e46d02f025d7c4a03f17 | 4,322 | py | Python | usaspending_api/awards/migrations/0074_auto_20170320_1607.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | 1 | 2021-06-17T05:09:00.000Z | 2021-06-17T05:09:00.000Z | usaspending_api/awards/migrations/0074_auto_20170320_1607.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | usaspending_api/awards/migrations/0074_auto_20170320_1607.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-03-20 16:07
from __future__ import unicode_literals
from django.db import migrations, models
| 65.484848 | 726 | 0.672837 |
d2f1e1f4951c3e0fd8684c1a41e6225fa4a4907c | 100 | py | Python | COVIDSafepassage/passsystem/apps.py | VICS-CORE/safepassage_server | 58bc04dbfa55430c0218567211e5259de77518ae | [
"MIT"
] | null | null | null | COVIDSafepassage/passsystem/apps.py | VICS-CORE/safepassage_server | 58bc04dbfa55430c0218567211e5259de77518ae | [
"MIT"
] | 8 | 2020-04-25T09:42:25.000Z | 2022-03-12T00:23:32.000Z | COVIDSafepassage/passsystem/apps.py | VICS-CORE/safepassage_server | 58bc04dbfa55430c0218567211e5259de77518ae | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 16.666667 | 35 | 0.73 |
d2f36e17b1fe05c90facefa1af9d3583979040ce | 220 | py | Python | src/intervals/once.py | Eagerod/tasker | b2bfbd6557063da389d1839f4f151bb4ad78b075 | [
"MIT"
] | null | null | null | src/intervals/once.py | Eagerod/tasker | b2bfbd6557063da389d1839f4f151bb4ad78b075 | [
"MIT"
] | null | null | null | src/intervals/once.py | Eagerod/tasker | b2bfbd6557063da389d1839f4f151bb4ad78b075 | [
"MIT"
] | null | null | null | from base_interval import BaseInterval
| 18.333333 | 38 | 0.718182 |
d2f4c426757a6a0f92d35c0788647479f59e49fb | 118,437 | py | Python | env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py | Ammar12/simplebanking | 6080d638b2e98bfcf96d782703e1dce25aebfcbc | [
"MIT"
] | null | null | null | env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py | Ammar12/simplebanking | 6080d638b2e98bfcf96d782703e1dce25aebfcbc | [
"MIT"
] | null | null | null | env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py | Ammar12/simplebanking | 6080d638b2e98bfcf96d782703e1dce25aebfcbc | [
"MIT"
] | null | null | null | #
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Warning: This code was generated by a tool.
#
# Changes to this file may cause incorrect behavior and will be lost if the
# code is regenerated.
from datetime import datetime
import json
from requests import Session, Request
import time
import uuid
try:
from urllib import quote, unquote
except:
from urllib.parse import quote, unquote
from azure.common import AzureHttpError
from azure.mgmt.common import AzureOperationResponse, OperationStatusResponse, OperationStatus, Service
from azure.mgmt.common.arm import ResourceBase, ResourceBaseExtended
def parse_account_type(self, value):
"""
Parse enum values for type AccountType.
Args:
value (string): The value to parse.
Returns:
AccountType: The enum value.
"""
if 'Standard_LRS'.lower() == value.lower():
return AccountType.StandardLRS
if 'Standard_ZRS'.lower() == value.lower():
return AccountType.StandardZRS
if 'Standard_GRS'.lower() == value.lower():
return AccountType.StandardGRS
if 'Standard_RAGRS'.lower() == value.lower():
return AccountType.StandardRAGRS
if 'Premium_LRS'.lower() == value.lower():
return AccountType.PremiumLRS
raise IndexError('value is outside the valid range.')
def account_type_to_string(self, value):
"""
Convert an enum of type AccountType to a string.
Args:
value (AccountType): The value to convert to a string.
Returns:
string: The enum value as a string.
"""
if value == AccountType.StandardLRS:
return 'Standard_LRS'
if value == AccountType.StandardZRS:
return 'Standard_ZRS'
if value == AccountType.StandardGRS:
return 'Standard_GRS'
if value == AccountType.StandardRAGRS:
return 'Standard_RAGRS'
if value == AccountType.PremiumLRS:
return 'Premium_LRS'
raise IndexError('value is outside the valid range.')
def parse_key_name(self, value):
"""
Parse enum values for type KeyName.
Args:
value (string): The value to parse.
Returns:
KeyName: The enum value.
"""
if 'key1'.lower() == value.lower():
return KeyName.Key1
if 'key2'.lower() == value.lower():
return KeyName.Key2
raise IndexError('value is outside the valid range.')
def key_name_to_string(self, value):
"""
Convert an enum of type KeyName to a string.
Args:
value (KeyName): The value to convert to a string.
Returns:
string: The enum value as a string.
"""
if value == KeyName.Key1:
return 'key1'
if value == KeyName.Key2:
return 'key2'
raise IndexError('value is outside the valid range.')
def get_create_operation_status(self, operation_status_link):
"""
The Get Create Operation Status operation returns the status of the
specified create operation. After calling the asynchronous Begin
Create operation, you can call Get Create Operation Status to
determine whether the operation has succeeded, failed, or is still in
progress.
Args:
operation_status_link (string): The URL where the status of the
long-running create operation can be checked.
Returns:
StorageAccountCreateResponse: The Create storage account operation
response.
"""
# Validate
if operation_status_link is None:
raise ValueError('operation_status_link cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + operation_status_link
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200 and status_code != 202 and status_code != 500:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200 or status_code == 202 or status_code == 500:
response_content = body
result = StorageAccountCreateResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_instance.tags[tags_key] = tags_value
properties_value = response_doc.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.retry_after = int(response.headers.get('retryafter', '0'))
result.request_id = response.headers.get('x-ms-request-id')
if status_code == 409:
result.status = OperationStatus.Failed
if status_code == 500:
result.status = OperationStatus.InProgress
if status_code == 202:
result.status = OperationStatus.InProgress
if status_code == 200:
result.status = OperationStatus.Succeeded
return result
class StorageAccountOperations(object):
"""
Operations for managing storage accounts.
__NOTE__: An instance of this class is automatically created for an
instance of the [StorageManagementClient]
"""
def begin_create(self, resource_group_name, account_name, parameters):
"""
Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent PUT request is issued with exact same
set of properties, then HTTP 200 would be returned.
Args:
resource_group_name (string): The name of the resource group within
the users subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
parameters (StorageAccountCreateParameters): The parameters to provide
for the created account.
Returns:
StorageAccountCreateResponse: The Create storage account operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
if parameters is None:
raise ValueError('parameters cannot be None.')
if parameters.account_type is None:
raise ValueError('parameters.account_type cannot be None.')
if parameters.location is None:
raise ValueError('parameters.location cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'PUT'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_create_parameters_json_value = {}
request_doc = storage_account_create_parameters_json_value
storage_account_create_parameters_json_value['location'] = parameters.location
if parameters.tags is not None:
tags_dictionary = {}
for tags_key in parameters.tags:
tags_value = parameters.tags[tags_key]
tags_dictionary[tags_key] = tags_value
storage_account_create_parameters_json_value['tags'] = tags_dictionary
properties_value = {}
storage_account_create_parameters_json_value['properties'] = properties_value
properties_value['accountType'] = str(parameters.account_type) if parameters.account_type is not None else 'StandardLRS'
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200 and status_code != 202:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200 or status_code == 202:
response_content = body
result = StorageAccountCreateResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key2 = property
tags_value2 = tags_sequence_element[property]
storage_account_instance.tags[tags_key2] = tags_value2
properties_value2 = response_doc.get('properties', None)
if properties_value2 is not None:
provisioning_state_value = properties_value2.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value2.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value2.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value2.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value2.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value2.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value2.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value2.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value2.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value = properties_value2.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value2.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.operation_status_link = response.headers.get('location')
result.retry_after = int(response.headers.get('retryafter', '0'))
result.request_id = response.headers.get('x-ms-request-id')
if status_code == 409 or status_code == 400:
result.status = OperationStatus.Failed
if status_code == 200:
result.status = OperationStatus.Succeeded
return result
def check_name_availability(self, account_name):
"""
Checks that account name is valid and is not in use.
Args:
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
Returns:
CheckNameAvailabilityResponse: The CheckNameAvailability operation
response.
"""
# Validate
if account_name is None:
raise ValueError('account_name cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/providers/Microsoft.Storage/checkNameAvailability'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'POST'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_check_name_availability_parameters_value = {}
request_doc = storage_account_check_name_availability_parameters_value
storage_account_check_name_availability_parameters_value['name'] = account_name
storage_account_check_name_availability_parameters_value['type'] = 'Microsoft.Storage/storageAccounts'
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = CheckNameAvailabilityResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
name_available_value = response_doc.get('nameAvailable', None)
if name_available_value is not None:
name_available_instance = name_available_value
result.name_available = name_available_instance
reason_value = response_doc.get('reason', None)
if reason_value is not None:
reason_instance = reason_value
result.reason = reason_instance
message_value = response_doc.get('message', None)
if message_value is not None:
message_instance = message_value
result.message = message_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def create(self, resource_group_name, account_name, parameters):
"""
Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent create request is issued with exact
same set of properties, the request succeeds.The max number of
storage accounts that can be created per subscription is limited to
20.
Args:
resource_group_name (string): The name of the resource group within
the users subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
parameters (StorageAccountCreateParameters): The parameters to provide
for the created account.
Returns:
StorageAccountCreateResponse: The Create storage account operation
response.
"""
client2 = self.client
response = client2.storage_accounts.begin_create(resource_group_name, account_name, parameters)
if response.status == OperationStatus.succeeded:
return response
result = client2.get_create_operation_status(response.operation_status_link)
delay_in_seconds = response.retry_after
if delay_in_seconds == 0:
delay_in_seconds = 25
if client2.long_running_operation_initial_timeout >= 0:
delay_in_seconds = client2.long_running_operation_initial_timeout
while (result.status != OperationStatus.in_progress) == False:
time.sleep(delay_in_seconds)
result = client2.get_create_operation_status(response.operation_status_link)
delay_in_seconds = result.retry_after
if delay_in_seconds == 0:
delay_in_seconds = 25
if client2.long_running_operation_retry_timeout >= 0:
delay_in_seconds = client2.long_running_operation_retry_timeout
return result
def delete(self, resource_group_name, account_name):
"""
Deletes a storage account in Microsoft Azure.
Args:
resource_group_name (string): The name of the resource group within
the users subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
Returns:
AzureOperationResponse: A standard service response including an HTTP
status code and request ID.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'DELETE'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200 and status_code != 204:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
result = AzureOperationResponse()
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def get_properties(self, resource_group_name, account_name):
"""
Returns the properties for the specified storage account including but
not limited to name, account type, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
Args:
resource_group_name (string): The name of the resource group within
the users subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
Returns:
StorageAccountGetPropertiesResponse: The Get storage account operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountGetPropertiesResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_instance.tags[tags_key] = tags_value
properties_value = response_doc.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def list(self):
"""
Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
Returns:
StorageAccountListResponse: The list storage accounts operation
response.
"""
# Validate
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/providers/Microsoft.Storage/storageAccounts'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountListResponse(storage_accounts=[])
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
value_array = response_doc.get('value', None)
if value_array is not None:
for value_value in value_array:
storage_account_json_instance = StorageAccount(tags={})
result.storage_accounts.append(storage_account_json_instance)
id_value = value_value.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_json_instance.id = id_instance
name_value = value_value.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_json_instance.name = name_instance
type_value = value_value.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_json_instance.type = type_instance
location_value = value_value.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_json_instance.location = location_instance
tags_sequence_element = value_value.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_json_instance.tags[tags_key] = tags_value
properties_value = value_value.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_json_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_json_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_json_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_json_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_json_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_json_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_json_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_json_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_json_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_json_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_json_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
next_link_value = response_doc.get('nextLink', None)
if next_link_value is not None:
next_link_instance = next_link_value
result.next_link = next_link_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def list_by_resource_group(self, resource_group_name):
"""
Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
Args:
resource_group_name (string): The name of the resource group within
the users subscription.
Returns:
StorageAccountListResponse: The list storage accounts operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountListResponse(storage_accounts=[])
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
value_array = response_doc.get('value', None)
if value_array is not None:
for value_value in value_array:
storage_account_json_instance = StorageAccount(tags={})
result.storage_accounts.append(storage_account_json_instance)
id_value = value_value.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_json_instance.id = id_instance
name_value = value_value.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_json_instance.name = name_instance
type_value = value_value.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_json_instance.type = type_instance
location_value = value_value.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_json_instance.location = location_instance
tags_sequence_element = value_value.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_json_instance.tags[tags_key] = tags_value
properties_value = value_value.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_json_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_json_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_json_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_json_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_json_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_json_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_json_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_json_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_json_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_json_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_json_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
next_link_value = response_doc.get('nextLink', None)
if next_link_value is not None:
next_link_instance = next_link_value
result.next_link = next_link_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def list_keys(self, resource_group_name, account_name):
"""
Lists the access keys for the specified storage account.
Args:
resource_group_name (string): The name of the resource group.
account_name (string): The name of the storage account.
Returns:
StorageAccountListKeysResponse: The ListKeys operation response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
url = url + '/listKeys'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'POST'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountListKeysResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_keys_instance = StorageAccountKeys()
result.storage_account_keys = storage_account_keys_instance
key1_value = response_doc.get('key1', None)
if key1_value is not None:
key1_instance = key1_value
storage_account_keys_instance.key1 = key1_instance
key2_value = response_doc.get('key2', None)
if key2_value is not None:
key2_instance = key2_value
storage_account_keys_instance.key2 = key2_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def regenerate_key(self, resource_group_name, account_name, regenerate_key):
"""
Regenerates the access keys for the specified storage account.
Args:
resource_group_name (string): The name of the resource group within
the users subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
regenerate_key (KeyName): Specifies name of the key which should be
regenerated.
Returns:
StorageAccountRegenerateKeyResponse: The RegenerateKey operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
if regenerate_key is None:
raise ValueError('regenerate_key cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
url = url + '/regenerateKey'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'POST'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_regenerate_key_parameters_value = {}
request_doc = storage_account_regenerate_key_parameters_value
storage_account_regenerate_key_parameters_value['keyName'] = str(regenerate_key) if regenerate_key is not None else 'Key1'
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountRegenerateKeyResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_keys_instance = StorageAccountKeys()
result.storage_account_keys = storage_account_keys_instance
key1_value = response_doc.get('key1', None)
if key1_value is not None:
key1_instance = key1_value
storage_account_keys_instance.key1 = key1_instance
key2_value = response_doc.get('key2', None)
if key2_value is not None:
key2_instance = key2_value
storage_account_keys_instance.key2 = key2_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def update(self, resource_group_name, account_name, parameters):
"""
Updates the account type or tags for a storage account. It can also be
used to add a custom domain (note that custom domains cannot be added
via the Create operation). Only one custom domain is supported per
storage account. This API can only be used to update one of tags,
accountType, or customDomain per call. To update multiple of these
properties, call the API multiple times with one change per call.
This call does not change the storage keys for the account. If you
want to change storage account keys, use the RegenerateKey operation.
The location and name of the storage account cannot be changed after
creation.
Args:
resource_group_name (string): The name of the resource group within
the users subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
parameters (StorageAccountUpdateParameters): The parameters to update
on the account. Note that only one property can be changed at a time
using this API.
Returns:
StorageAccountUpdateResponse: The Update storage account operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
if parameters is None:
raise ValueError('parameters cannot be None.')
if parameters.custom_domain is not None:
if parameters.custom_domain.name is None:
raise ValueError('parameters.custom_domain.name cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'PATCH'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_update_parameters_json_value = {}
request_doc = storage_account_update_parameters_json_value
if parameters.tags is not None:
tags_dictionary = {}
for tags_key in parameters.tags:
tags_value = parameters.tags[tags_key]
tags_dictionary[tags_key] = tags_value
storage_account_update_parameters_json_value['tags'] = tags_dictionary
properties_value = {}
storage_account_update_parameters_json_value['properties'] = properties_value
if parameters.account_type is not None:
properties_value['accountType'] = str(parameters.account_type) if parameters.account_type is not None else 'StandardLRS'
if parameters.custom_domain is not None:
custom_domain_value = {}
properties_value['customDomain'] = custom_domain_value
custom_domain_value['name'] = parameters.custom_domain.name
if parameters.custom_domain.use_sub_domain is not None:
custom_domain_value['useSubDomain'] = parameters.custom_domain.use_sub_domain
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountUpdateResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key2 = property
tags_value2 = tags_sequence_element[property]
storage_account_instance.tags[tags_key2] = tags_value2
properties_value2 = response_doc.get('properties', None)
if properties_value2 is not None:
provisioning_state_value = properties_value2.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value2.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value2.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value2.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value2.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value2.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value2.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value2.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value2.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value2 = properties_value2.get('customDomain', None)
if custom_domain_value2 is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value2.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value2.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value2.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
| 44.012263 | 133 | 0.558753 |
d2f56951f340d9aa264e8c54df9fedc28d30df30 | 1,832 | py | Python | src/nucleotide/component/linux/gcc/atom/rtl.py | dmilos/nucleotide | aad5d60508c9e4baf4888069284f2cb5c9fd7c55 | [
"Apache-2.0"
] | 1 | 2020-09-04T13:00:04.000Z | 2020-09-04T13:00:04.000Z | src/nucleotide/component/linux/gcc/atom/rtl.py | dmilos/nucleotide | aad5d60508c9e4baf4888069284f2cb5c9fd7c55 | [
"Apache-2.0"
] | 1 | 2020-04-10T01:52:32.000Z | 2020-04-10T09:11:29.000Z | src/nucleotide/component/linux/gcc/atom/rtl.py | dmilos/nucleotide | aad5d60508c9e4baf4888069284f2cb5c9fd7c55 | [
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# Copyright 2015 Dejan D. M. Milosavljevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import nucleotide
import nucleotide.component
import nucleotide.component.function
atom_linux_RTL = {
'platform' : {
'host' : 'Linux',
'guest' : 'Linux'
},
'cc' : {
'vendor': 'FSF',
'name' : 'gcc',
'version': 'X'
},
'config' : {
'LINKFLAGS' : _linux_RTL_LINKFLAGS
},
'name' :'RTL',
'class': [ 'RTL', 'linux:RTL' ]
}
| 27.343284 | 104 | 0.60917 |
d2f5d91da9ad5c16c7e8d867f33c570f4ad80d87 | 1,127 | py | Python | notebooks/denerator_tests/actions/config.py | Collen-Roller/Rasa-Denerator | 728d21d93f21a18c9de7be303ceae59392de9a41 | [
"MIT"
] | 11 | 2019-09-11T13:48:53.000Z | 2021-11-26T00:48:57.000Z | notebooks/denerator_tests/actions/config.py | Collen-Roller/Rasa-Denerator | 728d21d93f21a18c9de7be303ceae59392de9a41 | [
"MIT"
] | 2 | 2019-10-18T17:21:54.000Z | 2021-10-08T06:45:11.000Z | notebooks/denerator_tests/actions/config.py | Collen-Roller/Rasa-Denerator | 728d21d93f21a18c9de7be303ceae59392de9a41 | [
"MIT"
] | 4 | 2019-10-04T14:43:06.000Z | 2021-06-16T21:23:23.000Z | import os
policy_model_dir = os.environ.get("POLICY_MODEL_DIR", "models/dialogue/")
rasa_nlu_config = os.environ.get("RASA_NLU_CONFIG", "nlu_config.yml")
account_sid = os.environ.get("ACCOUNT_SID", "")
auth_token = os.environ.get("AUTH_TOKEN", "")
twilio_number = os.environ.get("TWILIO_NUMBER", "")
platform_api = os.environ.get("RASA_API_ENDPOINT_URL", "")
self_port = int(os.environ.get("SELF_PORT", "5001"))
core_model_dir = os.environ.get("CORE_MODEL_DIR", "models/dialogue/")
remote_core_endpoint = os.environ.get("RASA_REMOTE_CORE_ENDPOINT_URL", "")
rasa_core_token = os.environ.get("RASA_CORE_TOKEN", "")
mailchimp_api_key = os.environ.get("MAILCHIMP_API_KEY", "")
mailchimp_list = os.environ.get("MAILCHIMP_LIST", "")
gdrive_credentials = os.environ.get("GDRIVE_CREDENTIALS", "")
access_token = os.environ.get("TELEGRAM_TOKEN", "")
verify = os.environ.get("TELEGRAM_VERIFY", "rasas_bot")
webhook_url = os.environ.get("WEBHOOK_URL", "https://website-demo.rasa.com/webhook")
rasa_platform_token = os.environ.get("RASA_PLATFORM_TOKEN", "")
rasa_nlg_endpoint = os.environ.get("RASA_NLG_ENDPOINT_URL", "") | 30.459459 | 84 | 0.747116 |
d2f65b3512d928c10cc32ae1efdfb3cff693d569 | 876 | py | Python | python/moderation_text_token_demo.py | huaweicloud/huaweicloud-sdk-moderation | fa7cfda017a71ec8abf3afc57a0e476dd7508167 | [
"Apache-2.0"
] | 8 | 2019-06-04T06:24:54.000Z | 2022-01-29T13:16:53.000Z | python/moderation_text_token_demo.py | huaweicloud/huaweicloud-sdk-moderation | fa7cfda017a71ec8abf3afc57a0e476dd7508167 | [
"Apache-2.0"
] | 4 | 2021-12-14T21:21:03.000Z | 2022-01-04T16:34:33.000Z | python/moderation_text_token_demo.py | huaweicloud/huaweicloud-sdk-moderation | fa7cfda017a71ec8abf3afc57a0e476dd7508167 | [
"Apache-2.0"
] | 8 | 2019-08-12T02:18:03.000Z | 2021-11-30T10:39:23.000Z | # -*- coding:utf-8 -*-
from moderation_sdk.gettoken import get_token
from moderation_sdk.moderation_text import moderation_text
from moderation_sdk.utils import init_global_env
if __name__ == '__main__':
# Services currently support North China-Beijing(cn-north-4),China East-Shanghai1(cn-east-3), CN-Hong Kong(ap-southeast-1),AP-Singapore(ap-southeast-3)
init_global_env('cn-north-4')
#
# access moderation text enhance,posy data by token
#
user_name = '******'
password = '******'
account_name = '******' # the same as user_name in commonly use
token = get_token(user_name, password, account_name)
# call interface use the text
result = moderation_text(token, '666666luo+110qqfuck666666666666666', 'content',
['ad', 'politics', 'porn', 'abuse', 'contraband', 'flood'])
print(result)
| 38.086957 | 155 | 0.680365 |
d2f6c77eeb49683e8ab27570e5b6c4f101091a5b | 2,195 | py | Python | tests/system/action/test_general.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | tests/system/action/test_general.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | tests/system/action/test_general.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | from .base import BaseActionTestCase
| 35.403226 | 79 | 0.625968 |
d2f71173ca42ab7fa57a0943b698ed9189ef93d3 | 2,897 | py | Python | src/thead/cls/amsart.py | jakub-oprsal/thead | df175adf6ad0b3b16ec0703a31e7020327df4c92 | [
"MIT"
] | null | null | null | src/thead/cls/amsart.py | jakub-oprsal/thead | df175adf6ad0b3b16ec0703a31e7020327df4c92 | [
"MIT"
] | null | null | null | src/thead/cls/amsart.py | jakub-oprsal/thead | df175adf6ad0b3b16ec0703a31e7020327df4c92 | [
"MIT"
] | null | null | null | from .common import *
HEADER = r'''\usepackage{tikz}
\definecolor{purple}{cmyk}{0.55,1,0,0.15}
\definecolor{darkblue}{cmyk}{1,0.58,0,0.21}
\usepackage[colorlinks,
linkcolor=black,
urlcolor=darkblue,
citecolor=purple]{hyperref}
\urlstyle{same}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{claim}[theorem]{Claim}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
'''
| 27.074766 | 75 | 0.643079 |
d2f89e6b57c9a1b93947576a30ec79f4c0bc634e | 88 | py | Python | Workflow/packages/__init__.py | MATS64664-2021-Group-2/Hydride-Connect-Group-2 | fa95d38174ffd85461bf66f923c38a3908a469a7 | [
"MIT"
] | null | null | null | Workflow/packages/__init__.py | MATS64664-2021-Group-2/Hydride-Connect-Group-2 | fa95d38174ffd85461bf66f923c38a3908a469a7 | [
"MIT"
] | 2 | 2021-04-12T20:30:48.000Z | 2021-05-24T14:07:24.000Z | Workflow/packages/__init__.py | MATS64664-2021-Group-2/Hydride_Connection | fa95d38174ffd85461bf66f923c38a3908a469a7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 11:31:06 2021
@author: a77510jm
"""
| 11 | 35 | 0.579545 |
d2f90e2105f715bfa385ede947f0041c8746e8c3 | 6,133 | py | Python | in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
] | 1 | 2021-12-13T05:51:18.000Z | 2021-12-13T05:51:18.000Z | in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
] | null | null | null | in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# <a id='top'></a>
#
#
# # $\texttt{GiRaFFEfood}$: Initial data for $\texttt{GiRaFFE}$
#
# ## Aligned Rotator
#
# $$\label{top}$$
#
# This module provides another initial data option for $\texttt{GiRaFFE}$. This is a flat-spacetime test with initial data $$A_{\phi} = \frac{\mu \varpi}{r^3},$$ where $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$ is the cylindrical radius. We let $A_r = A_\theta = 0$.
#
# Additionally, the drift velocity $v^i = \Omega \textbf{e}_z \times \textbf{r} = [ijk] \Omega \textbf{e}^j_z x^k$, where $[ijk]$ is the Levi-Civita permutation symbol and $\textbf{e}^i_z = (0,0,1)$.
# <a id='preliminaries'></a>
#
# ### Steps 0-1: Preliminaries
# $$\label{preliminaries}$$
#
# \[Back to [top](#top)\]
#
# Here, we will import the NRPy+ core modules and set the reference metric to Cartesian, set commonly used NRPy+ parameters, and set C parameters that will be set from outside the code eventually generated from these expressions. We will also set up a parameter to determine what initial data is set up, although it won't do much yet.
# Step 0: Import the NRPy+ core modules and set the reference metric to Cartesian
import NRPy_param_funcs as par
import indexedexp as ixp
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import reference_metric as rfm
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = __name__
B_p_aligned_rotator,R_NS_aligned_rotator = par.Cparameters("REAL",thismodule,
# B_p_aligned_rotator = the intensity of the magnetic field and
# R_NS_aligned_rotator= "Neutron star" radius
["B_p_aligned_rotator","R_NS_aligned_rotator"],
[1e-5, 1.0])
# The angular velocity of the "neutron star"
Omega_aligned_rotator = par.Cparameters("REAL",thismodule,"Omega_aligned_rotator",1e3)
# <a id='step2'></a>
#
# ### Step 2: Set the vectors A in Spherical coordinates
# $$\label{step2}$$
#
# \[Back to [top](#top)\]
#
# We will first build the fundamental vector $A_i$ in spherical coordinates (see [Table 3](https://arxiv.org/pdf/1704.00599.pdf)). Note that we use reference_metric.py to set $r$ and $\theta$ in terms of Cartesian coordinates; this will save us a step later when we convert to Cartesian coordinates. So, we set
# \begin{align}
# A_{\phi} &= \frac{\mu \varpi}{r^3}, \\
# \end{align}
# with $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$
| 43.807143 | 334 | 0.633458 |
d2fa22173570793bad17191d495756a260b18a45 | 803 | py | Python | deploys/call_httpx.py | vic9527/ViClassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
] | 1 | 2021-11-03T05:05:34.000Z | 2021-11-03T05:05:34.000Z | deploys/call_httpx.py | vic9527/viclassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
] | null | null | null | deploys/call_httpx.py | vic9527/viclassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
] | null | null | null | """
requestspython
https://mp.weixin.qq.com/s/jqGx-4t4ytDDnXxDkzbPqw
HTTPX
https://zhuanlan.zhihu.com/p/103824900
"""
if __name__ == '__main__':
post_url = "http://127.0.0.1:8888"
post_data = {"image": 112, "name": 1}
response = interface(post_url, post_data)
print('status_code: ', response.status_code) #
# print('url: ', response.url) # url
# print('headers: ', response.headers) #
# print('cookies: ', response.cookies) # cookie
print('text: ', response.text) #
# print('content: ', response.content) #
| 27.689655 | 62 | 0.651308 |
d2fb4e383d869720b16333431cb622b5be807b1f | 9,034 | py | Python | src/rgt/THOR/THOR.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | src/rgt/THOR/THOR.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | src/rgt/THOR/THOR.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
THOR detects differential peaks in multiple ChIP-seq profiles associated
with two distinct biological conditions.
Copyright (C) 2014-2016 Manuel Allhoff (allhoff@aices.rwth-aachen.de)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author: Manuel Allhoff
"""
# Python
from __future__ import print_function
import sys
# Internal
from .dpc_help import get_peaks, _fit_mean_var_distr, initialize, merge_output, handle_input
from .tracker import Tracker
from .postprocessing import _output_BED, _output_narrowPeak
from ..THOR.neg_bin_rep_hmm import NegBinRepHMM, get_init_parameters, _get_pvalue_distr
from ..THOR.RegionGiver import RegionGiver
from ..THOR.postprocessing import filter_by_pvalue_strand_lag
from .. import __version__
# External
TEST = False #enable to test THOR locally
def _write_info(tracker, report, **data):
"""Write information to tracker"""
tracker.write(text=data['func_para'][0], header="Parameters for both estimated quadr. function y=max(|a|*x^2 + x + |c|, 0) (a)")
tracker.write(text=data['func_para'][1], header="Parameters for both estimated quadr. function y=max(|a|*x^2 + x + |c|, 0) (c)")
#tracker.write(text=data['init_mu'], header="Inital parameter estimate for HMM's Neg. Bin. Emission distribution (mu)")
#tracker.write(text=data['init_alpha'], header="Inital parameter estimate for HMM's Neg. Bin. Emission distribution (alpha)")
#tracker.write(text=data['m'].mu, header="Final HMM's Neg. Bin. Emission distribution (mu)")
#tracker.write(text=data['m'].alpha, header="Final HMM's Neg. Bin. Emission distribution (alpha)")
#tracker.write(text=data['m']._get_transmat(), header="Transmission matrix")
if report:
tracker.make_html()
def train_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker):
"""Train HMM"""
while True:
train_regions = region_giver.get_training_regionset()
exp_data = initialize(name=options.name, dims=dims, genome_path=genome, regions=train_regions,
stepsize=options.stepsize, binsize=options.binsize, bamfiles=bamfiles,
exts=options.exts, inputs=inputs, exts_inputs=options.exts_inputs,
debug=options.debug, verbose=options.verbose, no_gc_content=options.no_gc_content,
factors_inputs=options.factors_inputs, chrom_sizes=chrom_sizes,
tracker=tracker, norm_regions=options.norm_regions,
scaling_factors_ip=options.scaling_factors_ip, save_wig=options.save_wig,
housekeeping_genes=options.housekeeping_genes, test=TEST, report=options.report,
chrom_sizes_dict=region_giver.get_chrom_dict(), end=True, counter=0, output_bw=False,
save_input=options.save_input, m_threshold=options.m_threshold,
a_threshold=options.a_threshold, rmdup=options.rmdup)
if exp_data.count_positive_signal() > len(train_regions.sequences[0]) * 0.00001:
tracker.write(text=" ".join(map(lambda x: str(x), exp_data.exts)), header="Extension size (rep1, rep2, input1, input2)")
tracker.write(text=map(lambda x: str(x), exp_data.scaling_factors_ip), header="Scaling factors")
break
func, func_para = _fit_mean_var_distr(exp_data.overall_coverage, options.name, options.debug,
verbose=options.verbose, outputdir=options.outputdir,
report=options.report, poisson=options.poisson)
exp_data.compute_putative_region_index()
print('Compute HMM\'s training set', file=sys.stderr)
training_set, s0, s1, s2 = exp_data.get_training_set(TEST, exp_data, options.name, options.foldchange,
options.threshold, options.size_ts, 3)
init_alpha, init_mu = get_init_parameters(s0, s1, s2)
m = NegBinRepHMM(alpha=init_alpha, mu=init_mu, dim_cond_1=dims[0], dim_cond_2=dims[1], func=func)
training_set_obs = exp_data.get_observation(training_set)
print('Train HMM', file=sys.stderr)
m.fit([training_set_obs], options.hmm_free_para)
distr = _get_pvalue_distr(m.mu, m.alpha, tracker)
return m, exp_data, func_para, init_mu, init_alpha, distr
def run_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker, exp_data, m, distr):
"""Run trained HMM chromosome-wise on genomic signal and call differential peaks"""
output, pvalues, ratios, no_bw_files = [], [], [], []
print("Compute HMM's posterior probabilities and Viterbi path to call differential peaks", file=sys.stderr)
for i, r in enumerate(region_giver):
end = True if i == len(region_giver) - 1 else False
print("- taking into account %s" % r.sequences[0].chrom, file=sys.stderr)
exp_data = initialize(name=options.name, dims=dims, genome_path=genome, regions=r,
stepsize=options.stepsize, binsize=options.binsize,
bamfiles=bamfiles, exts=exp_data.exts, inputs=inputs,
exts_inputs=exp_data.exts_inputs, debug=options.debug,
verbose=False, no_gc_content=options.no_gc_content,
factors_inputs=exp_data.factors_inputs, chrom_sizes=chrom_sizes,
tracker=tracker, norm_regions=options.norm_regions,
scaling_factors_ip=exp_data.scaling_factors_ip, save_wig=options.save_wig,
housekeeping_genes=options.housekeeping_genes, test=TEST, report=False,
chrom_sizes_dict=region_giver.get_chrom_dict(), gc_content_cov=exp_data.gc_content_cov,
avg_gc_content=exp_data.avg_gc_content, gc_hist=exp_data.gc_hist,
end=end, counter=i, m_threshold=options.m_threshold, a_threshold=options.a_threshold,
rmdup=options.rmdup)
if exp_data.no_data:
continue
no_bw_files.append(i)
exp_data.compute_putative_region_index()
if exp_data.indices_of_interest is None:
continue
states = m.predict(exp_data.get_observation(exp_data.indices_of_interest))
inst_ratios, inst_pvalues, inst_output = get_peaks(name=options.name, states=states, DCS=exp_data,
distr=distr, merge=options.merge, exts=exp_data.exts,
pcutoff=options.pcutoff, debug=options.debug, p=options.par,
no_correction=options.no_correction,
merge_bin=options.merge_bin, deadzones=options.deadzones)
# if not inst_output:
output += inst_output
pvalues += inst_pvalues
ratios += inst_ratios
res_output, res_pvalues, res_filter_pass = filter_by_pvalue_strand_lag(ratios, options.pcutoff, pvalues, output,
options.no_correction, options.name,
options.singlestrand)
_output_BED(options.name, res_output, res_pvalues, res_filter_pass)
_output_narrowPeak(options.name, res_output, res_pvalues, res_filter_pass)
merge_output(bamfiles, dims, options, no_bw_files, chrom_sizes)
| 55.765432 | 132 | 0.649878 |
d2fb7b436323415834f7a74459e3f1d624c2d737 | 5,864 | py | Python | web/api/classroom.py | bbougon/crm-pilates | 47de4bad3d48208f9b499139fcddb7f8955b2509 | [
"MIT"
] | null | null | null | web/api/classroom.py | bbougon/crm-pilates | 47de4bad3d48208f9b499139fcddb7f8955b2509 | [
"MIT"
] | 2 | 2021-05-26T20:47:29.000Z | 2021-07-11T23:18:55.000Z | web/api/classroom.py | bbougon/crm-pilates | 47de4bad3d48208f9b499139fcddb7f8955b2509 | [
"MIT"
] | 1 | 2021-06-30T15:20:54.000Z | 2021-06-30T15:20:54.000Z | from http import HTTPStatus
from typing import Tuple
from uuid import UUID
from fastapi import status, APIRouter, Response, Depends, HTTPException
from command.command_handler import Status
from domain.classroom.classroom_creation_command_handler import ClassroomCreated
from domain.classroom.classroom_type import ClassroomSubject
from domain.commands import ClassroomCreationCommand, ClassroomPatchCommand
from domain.exceptions import DomainException, AggregateNotFoundException
from infrastructure.command_bus_provider import CommandBusProvider
from web.presentation.domain.detailed_classroom import DetailedClassroom
from web.presentation.service.classroom_service import get_detailed_classroom
from web.schema.classroom_response import ClassroomReadResponse, ClassroomCreatedResponse
from web.schema.classroom_schemas import ClassroomCreation, ClassroomPatch
router = APIRouter()
| 45.8125 | 120 | 0.603342 |
d2fb7f6e9f85db6c80048daaef30c307b92d98da | 2,145 | py | Python | community_codebook/eda.py | etstieber/ledatascifi-2022 | 67bc56a60ec498c62ceba03e0b6b9ae8f3fc7fd9 | [
"MIT"
] | null | null | null | community_codebook/eda.py | etstieber/ledatascifi-2022 | 67bc56a60ec498c62ceba03e0b6b9ae8f3fc7fd9 | [
"MIT"
] | 3 | 2022-01-30T18:34:22.000Z | 2022-02-10T15:48:48.000Z | community_codebook/eda.py | etstieber/ledatascifi-2022 | 67bc56a60ec498c62ceba03e0b6b9ae8f3fc7fd9 | [
"MIT"
] | 14 | 2022-01-26T10:45:19.000Z | 2022-03-28T15:59:56.000Z | ###############################################################
#
# This function is... INSUFFICIENT. It was developed as an
# illustration of EDA lessons in the 2021 class. It's quick and
# works well.
#
# Want a higher grade version of me? Then try pandas-profiling:
# https://github.com/pandas-profiling/pandas-profiling
#
###############################################################
def insufficient_but_starting_eda(df,cat_vars_list=None):
'''
Parameters
----------
df : DATAFRAME
cat_vars_list : LIST, optional
A list of strings containing variable names in the dataframe
for variables where you want to see the number of unique values
and the 10 most common values. Likely used for categorical values.
Returns
-------
None. It simply prints.
Description
-------
This function will print a MINIMUM amount of info about a new dataframe.
You should ****look**** at all this output below and consider the data
exploration and cleaning questions from
https://ledatascifi.github.io/ledatascifi-2021/content/03/02e_eda_golden.html#member
Also LOOK at more of the data manually.
Then write up anything notable you observe.
TIP: put this function in your codebook to reuse easily.
PROTIP: Improve this function (better outputs, better formatting).
FEATURE REQUEST: optionally print the nunique and top 10 values under the describe matrix
FEATURE REQUEST: optionally print more stats (percentiles)
'''
print(df.head(), '\n---')
print(df.tail(), '\n---')
print(df.columns, '\n---')
print("The shape is: ",df.shape, '\n---')
print("Info:",df.info(), '\n---') # memory usage, name, dtype, and # of non-null obs (--> # of missing obs) per variable
print(df.describe(), '\n---') # summary stats, and you can customize the list!
if cat_vars_list != None:
for var in cat_vars_list:
print(var,"has",df[var].nunique(),"values and its top 10 most common are:")
print(df[var].value_counts().head(10), '\n---')
| 35.75 | 124 | 0.607459 |
d2fd24c8d34e5c25a5210eb1ab2a18308730ef2b | 2,778 | py | Python | angr/codenode.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 2 | 2018-05-02T17:41:36.000Z | 2020-05-18T02:49:16.000Z | angr/codenode.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | null | null | null | angr/codenode.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2019-08-07T01:42:01.000Z | 2019-08-07T01:42:01.000Z | import logging
l = logging.getLogger("angr.codenode")
from .block import Block
| 27.78 | 92 | 0.62527 |
d2fd57ba506b050706da4ce9ab6b0a547ce3b622 | 806 | py | Python | 第12章/program/Requester/Launcher.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
] | 274 | 2018-10-01T11:07:25.000Z | 2022-03-17T13:48:45.000Z | 第12章/program/Requester/Launcher.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
] | 6 | 2019-02-28T14:18:21.000Z | 2022-03-02T14:57:39.000Z | 第12章/program/Requester/Launcher.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
] | 110 | 2018-10-16T06:08:37.000Z | 2022-03-16T08:19:29.000Z | import os
scrapy_project_path = '/Users/kingname/book/chapter_12/DeploySpider'
os.chdir(scrapy_project_path) #
os.system('scrapyd-deploy')
import json
import time
import requests
start_url = 'http://45.76.110.210:6800/schedule.json'
start_data = {'project': 'DeploySpider',
'spider': 'Example'}
end_url = 'http://45.76.110.210:6800/cancel.json'
end_data = {'project': 'DeploySpider'}
result = requests.post(start_url, data=start_data, auth=('kingname', 'genius')).text
result = requests.post(end_url, data=end_data, auth=('kingname', 'genius')).text
# result_dict = json.loads(result)
# job_id = result_dict['jobid']
# print(f'jobid{job_id}')
#
# time.sleep(5)
# end_data['job'] = job_id
# result = requests.post(end_url, data=end_data).text
# print(result)
| 26.866667 | 84 | 0.719603 |
d2fdf4a6c5371384e165ae59f3bd959f997c90d9 | 511 | py | Python | unittest_example/mathfunc.py | RobinCPC/experiment_code | 0d3791a97815651945ad7787ba4e6c7df037740b | [
"MIT"
] | null | null | null | unittest_example/mathfunc.py | RobinCPC/experiment_code | 0d3791a97815651945ad7787ba4e6c7df037740b | [
"MIT"
] | null | null | null | unittest_example/mathfunc.py | RobinCPC/experiment_code | 0d3791a97815651945ad7787ba4e6c7df037740b | [
"MIT"
] | null | null | null | """
Simple math operating functions for unit test
"""
def add(a, b):
"""
Adding to parameters and return result
:param a:
:param b:
:return:
"""
return a + b
def minus(a, b):
"""
subtraction
:param a:
:param b:
:return:
"""
return a - b
def multi(a, b):
"""
multiple
:param a:
:param b:
:return:
"""
return a * b
def divide(a, b):
"""
division
:param a:
:param b:
:return:
"""
return a // b
| 11.613636 | 45 | 0.473581 |
d2ff009598eedc70cbe497c5d19827bdffd07954 | 144,055 | py | Python | test/test_parameters.py | HubukiNinten/imgaug | 2570c5651ed1c90addbaffc0f8be226646c55334 | [
"MIT"
] | 1 | 2019-10-25T17:43:20.000Z | 2019-10-25T17:43:20.000Z | test/test_parameters.py | HubukiNinten/imgaug | 2570c5651ed1c90addbaffc0f8be226646c55334 | [
"MIT"
] | null | null | null | test/test_parameters.py | HubukiNinten/imgaug | 2570c5651ed1c90addbaffc0f8be226646c55334 | [
"MIT"
] | null | null | null | from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import skimage.morphology
import scipy
import scipy.special
import imgaug as ia
import imgaug.random as iarandom
from imgaug import parameters as iap
from imgaug.testutils import reseed
def test___init__(self):
param = iap.Beta(0.5, 0.5)
assert (
param.__str__()
== param.__repr__()
== "Beta("
"Deterministic(float 0.50000000), "
"Deterministic(float 0.50000000)"
")"
)
def test_draw_sample(self):
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
assert samples.shape == (100, 1000)
assert np.all(
np.logical_and(
0 - _eps(samples) <= samples,
samples <= 1.0 + _eps(samples)
)
)
def test_via_comparison_to_np_beta(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).beta(
a=0.5, b=0.5, size=(100, 1000))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
class TestDeterministic(unittest.TestCase):
class TestFromLowerResolution(unittest.TestCase):
class TestClip(unittest.TestCase):
class TestDiscretize(unittest.TestCase):
# TODO why are these tests applied to DiscreteUniform instead of Uniform?
class TestMultiply(unittest.TestCase):
class TestDivide(unittest.TestCase):
class TestAdd(unittest.TestCase):
class TestSubtract(unittest.TestCase):
class TestPower(unittest.TestCase):
class TestAbsolute(unittest.TestCase):
class TestRandomSign(unittest.TestCase):
class TestForceSign(unittest.TestCase):
class TestPositive(unittest.TestCase):
class TestNegative(unittest.TestCase):
class TestIterativeNoiseAggregator(unittest.TestCase):
class TestSigmoid(unittest.TestCase):
| 34.712048 | 83 | 0.557204 |
9600225ca5edde94d999985a5e32bc3c498cea99 | 1,731 | py | Python | ml_snek/datasets/jsnek_dataset.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
] | null | null | null | ml_snek/datasets/jsnek_dataset.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
] | 13 | 2019-12-25T21:04:49.000Z | 2020-01-04T20:25:05.000Z | ml_snek/datasets/jsnek_dataset.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
] | null | null | null | """
jsnek_saved_games_dataset that returns flat (vectorized) data
"""
from .jsnek_base_dataset import JSnekBaseDataset
from .. import utils
| 26.630769 | 82 | 0.622184 |
960075d5d481ca0949f159a6dd4c4e2e599c3197 | 391 | py | Python | src/posts/migrations/0007_recipe_preface.py | eduardkh/matkonim2 | d836b16403d7fce0db88dd39dac2ba24575e6fca | [
"MIT"
] | null | null | null | src/posts/migrations/0007_recipe_preface.py | eduardkh/matkonim2 | d836b16403d7fce0db88dd39dac2ba24575e6fca | [
"MIT"
] | null | null | null | src/posts/migrations/0007_recipe_preface.py | eduardkh/matkonim2 | d836b16403d7fce0db88dd39dac2ba24575e6fca | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-15 15:40
from django.db import migrations, models
| 20.578947 | 58 | 0.595908 |
96021a52c512a37d56b88bb769ca1d2cad4e3a5c | 490 | py | Python | app/database/db.py | flych3r/spotify-tracker | 306d549da6a57866ea480c85286d870e7653a1eb | [
"MIT"
] | 2 | 2021-06-25T00:24:13.000Z | 2021-07-10T13:00:39.000Z | app/database/db.py | flych3r/spotify-tracker | 306d549da6a57866ea480c85286d870e7653a1eb | [
"MIT"
] | null | null | null | app/database/db.py | flych3r/spotify-tracker | 306d549da6a57866ea480c85286d870e7653a1eb | [
"MIT"
] | 2 | 2021-05-16T01:40:39.000Z | 2021-07-10T12:59:07.000Z | import os
import databases
import sqlalchemy
DB_CONNECTOR = os.getenv('APP_DB_CONNECTOR')
DB_USERNAME = os.getenv('APP_DB_USERNAME')
DB_PASSWORD = os.getenv('APP_DB_PASSWORD')
DB_HOST = os.getenv('APP_DB_HOST')
DB_PORT = os.getenv('APP_DB_PORT')
DB_DATABASE = os.getenv('APP_DB_DATABASE')
DB_URL = f'{DB_CONNECTOR}://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DATABASE}'
db: databases.Database = databases.Database(DB_URL)
metadata: sqlalchemy.MetaData = sqlalchemy.MetaData()
| 28.823529 | 90 | 0.777551 |
96023217ef1c244003018d7cd3aa5cc748e1d708 | 7,631 | py | Python | examples/stl10/main_info.py | hehaodele/align_uniform | 898b9fed960316d4cab6f8b6080490125fc362cd | [
"MIT"
] | null | null | null | examples/stl10/main_info.py | hehaodele/align_uniform | 898b9fed960316d4cab6f8b6080490125fc362cd | [
"MIT"
] | null | null | null | examples/stl10/main_info.py | hehaodele/align_uniform | 898b9fed960316d4cab6f8b6080490125fc362cd | [
"MIT"
] | null | null | null | import os
import time
import argparse
import torchvision
import torch
import torch.nn as nn
from util import AverageMeter, TwoAugUnsupervisedDataset
from encoder import SmallAlexNet
from align_uniform import align_loss, uniform_loss
import json
if __name__ == '__main__':
main()
| 43.357955 | 129 | 0.657974 |
96024e0d78c0a224ad13e044ee7fc8d5953df2e6 | 259 | py | Python | app/__init__.py | nic-mon/IAIOLab | b8c4a23c95ee722938b393e4824b7fc94447f17c | [
"MIT"
] | null | null | null | app/__init__.py | nic-mon/IAIOLab | b8c4a23c95ee722938b393e4824b7fc94447f17c | [
"MIT"
] | null | null | null | app/__init__.py | nic-mon/IAIOLab | b8c4a23c95ee722938b393e4824b7fc94447f17c | [
"MIT"
] | 1 | 2018-04-11T00:34:09.000Z | 2018-04-11T00:34:09.000Z | from flask import Flask
""" 1. Creating a flask application instance, the name argument is passed to flask
application constructor. It's used to determine the root path"""
app = Flask(__name__)
app.config.from_object('config')
from app import views, models
| 28.777778 | 82 | 0.776062 |
96037b162a17a26e6138061ce184f323626f7486 | 5,305 | py | Python | ptf/tests/linerate/qos_metrics.py | dariusgrassi/upf-epc | aef4648db118d6e1bdb23a07e4774177bd58fc50 | [
"Apache-2.0"
] | null | null | null | ptf/tests/linerate/qos_metrics.py | dariusgrassi/upf-epc | aef4648db118d6e1bdb23a07e4774177bd58fc50 | [
"Apache-2.0"
] | 13 | 2021-12-15T18:39:52.000Z | 2022-03-31T00:08:21.000Z | ptf/tests/linerate/qos_metrics.py | dariusgrassi/upf-epc | aef4648db118d6e1bdb23a07e4774177bd58fc50 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
# Copyright(c) 2021 Open Networking Foundation
import time
from ipaddress import IPv4Address
from pprint import pprint
from trex_test import TrexTest
from grpc_test import *
from trex_stl_lib.api import (
STLVM,
STLPktBuilder,
STLStream,
STLTXCont,
)
import ptf.testutils as testutils
UPF_DEST_MAC = "0c:c4:7a:19:6d:ca"
# Port setup
TREX_SENDER_PORT = 0
TREX_RECEIVER_PORT = 1
BESS_SENDER_PORT = 2
BESS_RECEIVER_PORT = 3
# Test specs
DURATION = 10
RATE = 100_000 # 100 Kpps
UE_COUNT = 10_000 # 10k UEs
GTPU_PORT = 2152
PKT_SIZE = 64
| 30.314286 | 96 | 0.535344 |
9604a31aa1a2fd0161bb919247c6389804233e2e | 6,209 | py | Python | archives_app/documents_serializers.py | DITGO/2021.1-PC-GO1-Archives | d9f28bb29dbe96331b6e2d0beb7ca37875d61300 | [
"MIT"
] | 1 | 2021-08-22T13:39:56.000Z | 2021-08-22T13:39:56.000Z | archives_app/documents_serializers.py | DITGO/2021.1-PC-GO1-Archives | d9f28bb29dbe96331b6e2d0beb7ca37875d61300 | [
"MIT"
] | 36 | 2021-09-01T19:12:17.000Z | 2022-03-18T23:43:13.000Z | archives_app/documents_serializers.py | DITGO/2021.1-PC-GO1-Archives | d9f28bb29dbe96331b6e2d0beb7ca37875d61300 | [
"MIT"
] | 5 | 2021-09-10T21:01:07.000Z | 2021-09-17T16:35:21.000Z | from rest_framework import serializers
from archives_app.documents_models import (FrequencyRelation, BoxArchiving,
AdministrativeProcess, OriginBox,
FrequencySheet, DocumentTypes)
| 29.995169 | 82 | 0.573522 |
960632beca7334764b877e64f50cf461743b9b2b | 7,132 | py | Python | src/fparser/common/tests/test_base_classes.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 33 | 2017-08-18T16:31:27.000Z | 2022-03-28T09:43:50.000Z | src/fparser/common/tests/test_base_classes.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 319 | 2017-01-12T14:22:07.000Z | 2022-03-23T20:53:25.000Z | src/fparser/common/tests/test_base_classes.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 17 | 2017-10-13T07:12:28.000Z | 2022-02-11T14:42:18.000Z | # -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) 2017 Science and Technology Facilities Council
#
# All rights reserved.
#
# Modifications made as part of the fparser project are distributed
# under the following license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##############################################################################
# Modified M.Hambley, UK Met Office
##############################################################################
'''
Test battery associated with fparser.common.base_classes package.
'''
import re
import pytest
import fparser.common.base_classes
import fparser.common.readfortran
import fparser.common.sourceinfo
import fparser.common.utils
from fparser import api
def test_statement_logging(log, monkeypatch):
'''
Tests the Statement class' logging methods.
'''
reader = fparser.common.readfortran.FortranStringReader("dummy = 1")
parser = DummyParser(reader)
monkeypatch.setattr(fparser.common.base_classes.Statement,
'process_item', lambda x: None, raising=False)
unit_under_test = fparser.common.base_classes.Statement(parser, None)
unit_under_test.error('Scary biscuits')
assert(log.messages == {'critical': [],
'debug': [],
'error': ['Scary biscuits'],
'info': [],
'warning': []})
log.reset()
unit_under_test.warning('Trepidacious Cetations')
assert(log.messages == {'critical': [],
'debug': [],
'error': [],
'info': [],
'warning': ['Trepidacious Cetations']})
log.reset()
unit_under_test.info('Hilarious Ontologies')
assert(log.messages == {'critical': [],
'debug': [],
'error': [],
'info': ['Hilarious Ontologies'],
'warning': []})
def test_log_comment_mix(log):
'''
Tests that unexpected Fortran 90 comment in fixed format source is logged.
'''
code = ' x=1 ! Cheese'
parent = fparser.common.readfortran.FortranStringReader(
code, ignore_comments=False)
parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True))
item = fparser.common.readfortran.Line(code, (1, 1), None, None, parent)
with pytest.raises(fparser.common.utils.AnalyzeError):
__ = BeginHarness(parent, item)
expected = ' 1: x=1 ! Cheese <== ' \
+ 'no parse pattern found for "x=1 ! cheese" ' \
+ "in 'BeginHarness' block, " \
+ 'trying to remove inline comment (not in Fortran 77).'
result = log.messages['warning'][0].split('\n')[1]
assert result == expected
def test_log_unexpected(log):
'''
Tests that an unexpected thing between begin and end statements logs an
event.
'''
code = [' jumper', ' end thing']
parent = fparser.common.readfortran.FortranStringReader('\n'.join(code))
parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True))
item = fparser.common.readfortran.Line(code[0], (1, 1), None, None, parent)
with pytest.raises(fparser.common.utils.AnalyzeError):
__ = BeginThing(parent, item)
expected = ' 1: jumper <== no parse pattern found for "jumper" ' \
"in 'BeginThing' block."
result = log.messages['warning'][0].split('\n')[1]
assert result == expected
def test_space_after_enddo():
'''Make sure that there is no space after an 'END DO' without name,
but there is a space if there is a name after 'END DO'.
'''
# Unnamed loop:
source_str = '''\
subroutine foo
integer i, r
do i = 1,100
r = r + 1
end do
end subroutine foo
'''
tree = api.parse(source_str, isfree=True, isstrict=False)
assert "END DO " not in tree.tofortran()
# Named loop:
source_str = '''\
subroutine foo
integer i, r
loop1: do i = 1,100
r = r + 1
end do loop1
end subroutine foo
'''
tree = api.parse(source_str, isfree=True, isstrict=False)
assert "END DO loop1" in tree.tofortran()
| 35.839196 | 79 | 0.599411 |
96065ad383494de22a076bf5a911760ad23ad0e8 | 87 | py | Python | pyvecorg/__main__.py | torsava/pyvec.org | 809812395e4bffdb0522a52c6a7f7468ffc7ccd6 | [
"MIT"
] | 3 | 2016-09-08T09:28:02.000Z | 2019-08-25T11:56:26.000Z | pyvecorg/__main__.py | torsava/pyvec.org | 809812395e4bffdb0522a52c6a7f7468ffc7ccd6 | [
"MIT"
] | 97 | 2016-08-20T17:11:34.000Z | 2022-03-29T07:52:13.000Z | pyvecorg/__main__.py | torsava/pyvec.org | 809812395e4bffdb0522a52c6a7f7468ffc7ccd6 | [
"MIT"
] | 7 | 2016-11-26T20:38:29.000Z | 2021-08-20T11:11:47.000Z | from elsa import cli
from pyvecorg import app
cli(app, base_url='http://pyvec.org')
| 12.428571 | 37 | 0.735632 |
96072e15a870bb0da5695f16be671c56e832f75e | 10,397 | py | Python | ppython/input_handler.py | paberr/ppython | 0c59d503cbd1ca619ad51b627614ae2dd9549c38 | [
"MIT"
] | 1 | 2016-06-15T17:21:22.000Z | 2016-06-15T17:21:22.000Z | ppython/input_handler.py | paberr/ppython | 0c59d503cbd1ca619ad51b627614ae2dd9549c38 | [
"MIT"
] | null | null | null | ppython/input_handler.py | paberr/ppython | 0c59d503cbd1ca619ad51b627614ae2dd9549c38 | [
"MIT"
] | null | null | null | import curtsies.events as ev
import sys
DELIMITERS = ' .'
WHITESPACE = ' '
def print_console(txt, npadding=0, newline=False, flush=True):
"""
Prints txt without newline, cursor positioned at the end.
:param txt: The text to print
:param length: The txt will be padded with spaces to fit this length
:param newline: If True, a newline character will be appended
:return:
"""
sys.stdout.write('\r{0}{1}'.format(txt, WHITESPACE * npadding))
if newline:
sys.stdout.write('\n')
if flush:
sys.stdout.flush()
def find_next_in_list(lst, what, start=0, reverse=False):
"""
Finds the next occurrence of what in lst starting at start.
:param lst: The list to search
:param what: The item to find, should be an iterable
:param start: The starting position in the list
:param reverse: Set this to True in order to traverse the list towards 0
:return: False if no occurrence found, index otherwise
"""
if start < 0 or start >= len(lst):
return False
end = -1 if reverse else len(lst)
step = -1 if reverse else 1
for i in range(start, end, step):
if lst[i] in what:
return i
return False
| 28.1 | 101 | 0.541406 |
960742a391af9a30c0acaaa433fd60815de5da1f | 1,601 | py | Python | pycon_graphql/events/tests/test_models.py | CarlosMart626/graphql-workshop-pycon.co2019 | 466e56052efcfc7455336a0ac5c6637c68fcb3b9 | [
"MIT"
] | 1 | 2019-02-10T12:35:14.000Z | 2019-02-10T12:35:14.000Z | pycon_graphql/events/tests/test_models.py | CarlosMart626/graphql-workshop-pycon.co2019 | 466e56052efcfc7455336a0ac5c6637c68fcb3b9 | [
"MIT"
] | null | null | null | pycon_graphql/events/tests/test_models.py | CarlosMart626/graphql-workshop-pycon.co2019 | 466e56052efcfc7455336a0ac5c6637c68fcb3b9 | [
"MIT"
] | 1 | 2019-02-10T15:02:30.000Z | 2019-02-10T15:02:30.000Z | from django.core.exceptions import ValidationError
from django.utils import timezone
from django.test import TestCase
from events.models import Event, Invitee
from users.tests.factories import UserFactory
from users.models import get_sentinel_user
| 36.386364 | 83 | 0.67208 |
9607844773359aa6aa0c7976c01c1f1c73d9292a | 145 | py | Python | cryptos.py | pogoetic/tricero | 6cb60e780bf9056ad9887a84e2ba7d73787ac2fc | [
"MIT"
] | null | null | null | cryptos.py | pogoetic/tricero | 6cb60e780bf9056ad9887a84e2ba7d73787ac2fc | [
"MIT"
] | null | null | null | cryptos.py | pogoetic/tricero | 6cb60e780bf9056ad9887a84e2ba7d73787ac2fc | [
"MIT"
] | null | null | null | cryptolist = ['ETH','BTC','XRP','EOS','ADA','NEO','STEEM',
'BTS','ZEC','XMR','XVG','XEM','OMG','MIOTA','XTZ','SC',
'CVC','BAT','XLM','ZRX','VEN'] | 48.333333 | 58 | 0.524138 |
96085c19f88d75b4448b45a1368f150dc76f3edb | 2,615 | py | Python | python/test/utils/test_sliced_data_iterator.py | kodavatimahendra/nnabla | 72009f670af075f17ffca9c809b07d48cca30bd9 | [
"Apache-2.0"
] | null | null | null | python/test/utils/test_sliced_data_iterator.py | kodavatimahendra/nnabla | 72009f670af075f17ffca9c809b07d48cca30bd9 | [
"Apache-2.0"
] | null | null | null | python/test/utils/test_sliced_data_iterator.py | kodavatimahendra/nnabla | 72009f670af075f17ffca9c809b07d48cca30bd9 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import numpy as np
from nnabla.utils.data_source_loader import load_image
from nnabla.utils.data_iterator import data_iterator_simple
from .test_data_iterator import check_data_iterator_result
| 34.866667 | 94 | 0.66348 |
96090a33ab17b3ef5237b33e54e263f6d813f39f | 819 | py | Python | python/leetcode/646.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
] | null | null | null | python/leetcode/646.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
] | null | null | null | python/leetcode/646.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
] | null | null | null | # n
#
# b < c(c, d)(a, b)
#
#
#
# :
#
# : [[1,2], [2,3], [3,4]]
# : 2
# : [1,2] -> [3,4]
#
#
# [1, 1000]
#
# LeetCode
# https://leetcode-cn.com/problems/maximum-length-of-pair-chain
#
from typing import List
if __name__ == '__main__':
s = Solution()
assert s.findLongestChain([[1, 2], [2, 3], [3, 4]]) == 2 | 22.75 | 70 | 0.57265 |
960b4705f7f9212fc6fe401b9f516bcb627b27a2 | 19,044 | py | Python | FactorTestMain.py | WeiYouyi/FactorTest | fc23e23252614ce4ed8973416f7fbb2d0dbb5ccc | [
"MIT"
] | null | null | null | FactorTestMain.py | WeiYouyi/FactorTest | fc23e23252614ce4ed8973416f7fbb2d0dbb5ccc | [
"MIT"
] | null | null | null | FactorTestMain.py | WeiYouyi/FactorTest | fc23e23252614ce4ed8973416f7fbb2d0dbb5ccc | [
"MIT"
] | null | null | null | from FactorTest.FactorTestPara import *
from FactorTest.FactorTestBox import *
| 45.342857 | 313 | 0.56548 |
960b6014f14f9123b0ec09ae60429c45aaf956f5 | 3,094 | py | Python | src/qm/terachem/terachem.py | hkimaf/unixmd | 616634c720d0589fd600e3268afab9da957e18bb | [
"MIT"
] | null | null | null | src/qm/terachem/terachem.py | hkimaf/unixmd | 616634c720d0589fd600e3268afab9da957e18bb | [
"MIT"
] | null | null | null | src/qm/terachem/terachem.py | hkimaf/unixmd | 616634c720d0589fd600e3268afab9da957e18bb | [
"MIT"
] | null | null | null | from __future__ import division
from qm.qm_calculator import QM_calculator
from misc import call_name
import os
| 43.577465 | 108 | 0.61894 |
960c00e5d06118cad7de3e170d517ce0e7416494 | 11,668 | py | Python | tests/unit/modules/test_reg_win.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_reg_win.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_reg_win.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows Registry Module 'module.reg'
:platform: Windows
:maturity: develop
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
versionadded:: 2016.11.0
'''
# Import Python future libs
from __future__ import absolute_import
from __future__ import unicode_literals
# Import Python Libs
import sys
import time
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.modules.reg as win_mod_reg
from salt.ext import six
try:
from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module
NO_WINDOWS_MODULES = False
except ImportError:
NO_WINDOWS_MODULES = True
PY2 = sys.version_info[0] == 2
# The following used to make sure we are not
# testing already existing data
# Note strftime retunrns a str, so we need to make it unicode
TIMEINT = int(time.time())
if PY2:
TIME_INT_UNICODE = six.text_type(TIMEINT)
TIMESTR = time.strftime('%X %x %Z').decode('utf-8')
else:
TIMESTR = time.strftime('%X %x %Z')
TIME_INT_UNICODE = str(TIMEINT) # pylint: disable=R0204
# we do not need to prefix this with u, as we are
# using from __future__ import unicode_literals
UNICODETEST_WITH_SIGNS = 'Testing Unicode \N{COPYRIGHT SIGN},\N{TRADE MARK SIGN},\N{REGISTERED SIGN} '+TIMESTR
UNICODETEST_WITHOUT_SIGNS = 'Testing Unicode'+TIMESTR
UNICODE_TEST_KEY = 'UnicodeKey \N{TRADE MARK SIGN} '+TIME_INT_UNICODE
UNICODE_TEST_KEY_DEL = 'Delete Me \N{TRADE MARK SIGN} '+TIME_INT_UNICODE
| 38.508251 | 110 | 0.573706 |
960c27eda1d8cb31a885faeca6a1d05da5d1bc43 | 9,197 | py | Python | glacier/glacierexception.py | JeffAlyanak/amazon-glacier-cmd-interface | f9e50cbc49156233a87f1975323e315370aeeabe | [
"MIT"
] | 166 | 2015-01-01T14:14:56.000Z | 2022-02-20T21:59:45.000Z | glacier/glacierexception.py | JeffAlyanak/amazon-glacier-cmd-interface | f9e50cbc49156233a87f1975323e315370aeeabe | [
"MIT"
] | 31 | 2015-01-04T13:18:02.000Z | 2022-01-10T18:40:52.000Z | glacier/glacierexception.py | JeffAlyanak/amazon-glacier-cmd-interface | f9e50cbc49156233a87f1975323e315370aeeabe | [
"MIT"
] | 75 | 2015-01-03T10:33:41.000Z | 2022-02-22T21:21:47.000Z | import traceback
import re
import sys
import logging
"""
**********
Note by wvmarle:
This file contains the complete code from chained_exception.py plus the
error handling code from GlacierWrapper.py, allowing it to be used in other
modules like glaciercorecalls as well.
**********
"""
if __name__ == '__main__':
try:
try:
raise ChildrenException("parent")
except ChildrenException, e:
raise ParentException("children", cause=e)
except ParentException, e:
e.write(indentation='|| ')
| 38.805907 | 100 | 0.577145 |
960d016ae24c4293c672a990c11ba81afe431984 | 29,912 | py | Python | modes/import_corpus.py | freingruber/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
] | 91 | 2022-01-24T07:32:34.000Z | 2022-03-31T23:37:15.000Z | modes/import_corpus.py | zeusguy/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
] | null | null | null | modes/import_corpus.py | zeusguy/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
] | 11 | 2022-01-24T14:21:12.000Z | 2022-03-31T23:37:23.000Z | # Copyright 2022 @ReneFreingruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This mode can be started by passing the "--import_corpus_mode" flag to the fuzzer
# or by starting the fuzzer the first time (when no OUTPUT directory exists yet).
#
# The script imports new testcases into the current corpus.
# Please note that the progress of the script is not linear (especially when creating an initial corpus).
# The script will start slow (because it will find a lot of testcases with new behavior and this requires
# standardization, minimization & state creation.
# These operations are slow because they require to restart the JS engine multiple times,
# and therefore it will take a longer time. After some time, the import-mode will be faster because it finds less files
# with new coverage. At the end, the mode will again be slow (or maybe very slow) because it's processing the
# bigger testcases (testcases are sorted based on file size and handled from small files to big files).
# State creation for big input files is extremely slow.
# It's maybe better to skip these big testcases and continue because later testcases can maybe further be
# minimized (which would then be again fast). => I created my initial corpus with a different script,
# skipping the big testcases is therefore not implemented here yet (and must manually be done).
# TODO: In my original code I also removed v8 native functions because they quickly lead to crashes
# But I couldn't find the code anymore. I guess this should be implemented in this file somewhere at the end?
# This affect at least the functions:
# %ProfileCreateSnapshotDataBlob
# %LiveEditPatchScript
# %IsWasmCode
# %IsAsmWasmCode
# %ConstructConsString
# %HaveSameMap
# %IsJSReceiver
# %HasSmiElements
# %HasObjectElements
# %HasDoubleElements
# %HasDictionaryElements
# %HasHoleyElements
# %HasSloppyArgumentsElements
# %HaveSameMap
# %HasFastProperties
# %HasPackedElements
#
# More information can be found in my master thesis page 115.
import utils
import os
import config as cfg
import native_code.speed_optimized_functions as speed_optimized_functions
from native_code.executor import Execution_Status
import sys
import random
import string
import re
code_prefix = "function my_opt_func() {\n"
code_suffix1 = """
}
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
code_suffix2 = """
}
%PrepareFunctionForOptimization(my_opt_func);
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
code_suffix3 = """
}
my_opt_func();
%PrepareFunctionForOptimization(my_opt_func);
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
# These are just used for debugging
debugging_number_exceptions = 0
debugging_number_success = 0
debugging_number_new_coverage = 0
# This is a debug version of the above one.
# The above one does all the required calculations (standardization, minimization, state creation)
# which is very slow. But If I just want to quickly check how many files I can import,
# then I'm using this debugging versions (which skips all these steps)
# This version does also not restart the exec engine.
# To use it, just replace the call with this function
# TODO: This is pretty old code and needs a lot of refactoring/improvement ...
# TODO: Also better implement these whole "\t" and " " and "\" checking...
# One testcase file can contain multiple testcases
# That's why this function returns a list of samples
| 46.7375 | 233 | 0.688587 |
960d83d2c94c5959a98a0bd8469e0e2f1a880ff6 | 5,590 | py | Python | crazyflie_demo/scripts/mapping/mapper.py | wydmynd/crazyflie_tom | 0d1cc63dcd0f055d78da82515729ce2098e086cf | [
"MIT"
] | null | null | null | crazyflie_demo/scripts/mapping/mapper.py | wydmynd/crazyflie_tom | 0d1cc63dcd0f055d78da82515729ce2098e086cf | [
"MIT"
] | null | null | null | crazyflie_demo/scripts/mapping/mapper.py | wydmynd/crazyflie_tom | 0d1cc63dcd0f055d78da82515729ce2098e086cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
""" Simple occupancy-grid-based mapping without localization.
Subscribed topics:
/scan
Published topics:
/map
/map_metadata
Author: Nathan Sprague
Version: 2/13/14
"""
import rospy
from nav_msgs.msg import OccupancyGrid, MapMetaData
from geometry_msgs.msg import Pose, Point, Quaternion
from sensor_msgs.msg import LaserScan
import numpy as np
if __name__ == '__main__':
try:
m = Mapper()
except rospy.ROSInterruptException:
pass
| 33.878788 | 75 | 0.581038 |
960dcc8a44c5847743443e7deb1bcd0169e59d72 | 469 | py | Python | flags.py | oaxiom/glbase3 | 9d3fc1efaad58ffb97e5b8126c2a96802daf9bac | [
"MIT"
] | 8 | 2019-06-11T02:13:20.000Z | 2022-02-22T09:27:23.000Z | flags.py | JackNg88/glbase3 | 4af190d06b89ef360dcba201d9e4e81f41ef8379 | [
"MIT"
] | 6 | 2020-12-18T15:08:14.000Z | 2021-05-22T00:31:57.000Z | flags.py | JackNg88/glbase3 | 4af190d06b89ef360dcba201d9e4e81f41ef8379 | [
"MIT"
] | 2 | 2020-05-06T04:27:03.000Z | 2022-02-22T09:28:25.000Z | """
flags.py
. should be renamed helpers...
. This file is scheduled for deletion
"""
"""
valid accessory tags:
"any_tag": {"code": "code_insert_as_string"} # execute arbitrary code to construct this key.
"dialect": csv.excel_tab # dialect of the file, default = csv, set this to use tsv. or sniffer
"skip_lines": number # number of lines to skip at the head of the file.
"skiptill": skip until I see the first instance of <str>
"""
# lists of format-specifiers.
| 23.45 | 94 | 0.712154 |
960deebf26b738896cbcd2ee2bd2d46605e19141 | 2,106 | py | Python | packages/jet_bridge/jet_bridge/app.py | goncalomi/jet-bridge | ed968ac3407affdc99059faafb86ec67ac995838 | [
"MIT"
] | 2 | 2020-04-18T14:34:44.000Z | 2020-04-18T14:34:47.000Z | packages/jet_bridge/jet_bridge/app.py | goncalomi/jet-bridge | ed968ac3407affdc99059faafb86ec67ac995838 | [
"MIT"
] | null | null | null | packages/jet_bridge/jet_bridge/app.py | goncalomi/jet-bridge | ed968ac3407affdc99059faafb86ec67ac995838 | [
"MIT"
] | null | null | null | import os
import tornado.ioloop
import tornado.web
from jet_bridge.handlers.temporary_redirect import TemporaryRedirectHandler
from jet_bridge_base import settings as base_settings
from jet_bridge_base.views.api import ApiView
from jet_bridge_base.views.image_resize import ImageResizeView
from jet_bridge_base.views.file_upload import FileUploadView
from jet_bridge_base.views.message import MessageView
from jet_bridge_base.views.model import ModelViewSet
from jet_bridge_base.views.model_description import ModelDescriptionView
from jet_bridge_base.views.register import RegisterView
from jet_bridge_base.views.reload import ReloadView
from jet_bridge_base.views.sql import SqlView
from jet_bridge import settings, media
from jet_bridge.handlers.view import view_handler
from jet_bridge.handlers.not_found import NotFoundHandler
from jet_bridge.router import Router
| 39 | 99 | 0.738367 |
960e05f94b044cbb96eace708beb765aa68c9708 | 1,553 | py | Python | openslides_backend/services/media/adapter.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | openslides_backend/services/media/adapter.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | openslides_backend/services/media/adapter.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
] | null | null | null | import requests
from ...shared.exceptions import MediaServiceException
from ...shared.interfaces.logging import LoggingModule
from .interface import MediaService
| 37.878049 | 79 | 0.647778 |
960faa636c63399c1988c58ce0e7c98b90dc797e | 169 | py | Python | Lib/async/test/test_echoupper.py | pyparallel/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | [
"PSF-2.0"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | Lib/async/test/test_echoupper.py | tpn/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | [
"PSF-2.0"
] | 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | Lib/async/test/test_echoupper.py | tpn/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | [
"PSF-2.0"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | import async
from async.services import EchoUpperData
server = async.server('10.211.55.3', 20007)
async.register(transport=server, protocol=EchoUpperData)
async.run()
| 21.125 | 56 | 0.792899 |
960fe6f4df41a131c506151d154738d3ea6e3c53 | 533 | py | Python | alerter/src/alerter/alert_code/node/evm_alert_code.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 41 | 2019-08-23T12:40:42.000Z | 2022-03-28T11:06:02.000Z | alerter/src/alerter/alert_code/node/evm_alert_code.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 147 | 2019-08-30T22:09:48.000Z | 2022-03-30T08:46:26.000Z | alerter/src/alerter/alert_code/node/evm_alert_code.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 3 | 2019-09-03T21:12:28.000Z | 2021-08-18T14:27:56.000Z | from ..alert_code import AlertCode
| 38.071429 | 74 | 0.806754 |
96106fecaab4ad8d3cfef08e2a652f7ab8fec921 | 422 | py | Python | blaze/compute/tests/test_pmap.py | jdmcbr/blaze | 79515a8f0d25a0ff7f87a4cfbed615858241c832 | [
"BSD-3-Clause"
] | 1 | 2015-05-17T23:17:12.000Z | 2015-05-17T23:17:12.000Z | blaze/compute/tests/test_pmap.py | jreback/blaze | 85c39335cac4ef7f2921a7f621bc13525880fc44 | [
"BSD-3-Clause"
] | null | null | null | blaze/compute/tests/test_pmap.py | jreback/blaze | 85c39335cac4ef7f2921a7f621bc13525880fc44 | [
"BSD-3-Clause"
] | null | null | null | from blaze import compute, resource, symbol, discover
from blaze.utils import example
flag = [False]
| 21.1 | 53 | 0.637441 |
9610832f6a592c17ec9781319d909b5b964100ab | 15,186 | py | Python | mwtab/mwschema.py | MoseleyBioinformaticsLab/mwtab | 1bc1e3715538348b29a5760a9c3184fe04f568a6 | [
"BSD-3-Clause-Clear"
] | 7 | 2018-02-02T07:50:20.000Z | 2021-03-14T22:46:58.000Z | mwtab/mwschema.py | MoseleyBioinformaticsLab/mwtab | 1bc1e3715538348b29a5760a9c3184fe04f568a6 | [
"BSD-3-Clause-Clear"
] | 2 | 2019-02-14T08:38:54.000Z | 2020-02-19T08:08:02.000Z | mwtab/mwschema.py | MoseleyBioinformaticsLab/mwtab | 1bc1e3715538348b29a5760a9c3184fe04f568a6 | [
"BSD-3-Clause-Clear"
] | 1 | 2019-10-12T23:38:44.000Z | 2019-10-12T23:38:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mwtab.mwschema
~~~~~~~~~~~~~~
This module provides schema definitions for different sections of the
``mwTab`` Metabolomics Workbench format.
"""
import sys
from schema import Schema, Optional, Or
if sys.version_info.major == 2:
str = unicode
metabolomics_workbench_schema = Schema(
{
"VERSION": str,
"CREATED_ON": str,
Optional("STUDY_ID"): str,
Optional("ANALYSIS_ID"): str,
Optional("PROJECT_ID"): str,
Optional("HEADER"): str,
Optional("DATATRACK_ID"): str
}
)
project_schema = Schema(
{
"PROJECT_TITLE": str,
Optional("PROJECT_TYPE"): str,
"PROJECT_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("FUNDING_SOURCE"): str,
Optional("PROJECT_COMMENTS"): str,
Optional("PUBLICATIONS"): str,
Optional("CONTRIBUTORS"): str,
Optional("DOI"): str
}
)
study_schema = Schema(
{
"STUDY_TITLE": str,
Optional("STUDY_TYPE"): str,
"STUDY_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("NUM_GROUPS"): str,
Optional("TOTAL_SUBJECTS"): str,
Optional("NUM_MALES"): str,
Optional("NUM_FEMALES"): str,
Optional("STUDY_COMMENTS"): str,
Optional("PUBLICATIONS"): str, # assumed
Optional("SUBMIT_DATE"): str # assumed
}
)
subject_schema = Schema(
{
"SUBJECT_TYPE": str,
"SUBJECT_SPECIES": str,
Optional("TAXONOMY_ID"): str,
Optional("GENOTYPE_STRAIN"): str,
Optional("AGE_OR_AGE_RANGE"): str,
Optional("WEIGHT_OR_WEIGHT_RANGE"): str,
Optional("HEIGHT_OR_HEIGHT_RANGE"): str,
Optional("GENDER"): str,
Optional("HUMAN_RACE"): str,
Optional("HUMAN_ETHNICITY"): str,
Optional("HUMAN_TRIAL_TYPE"): str,
Optional("HUMAN_LIFESTYLE_FACTORS"): str,
Optional("HUMAN_MEDICATIONS"): str,
Optional("HUMAN_PRESCRIPTION_OTC"): str,
Optional("HUMAN_SMOKING_STATUS"): str,
Optional("HUMAN_ALCOHOL_DRUG_USE"): str,
Optional("HUMAN_NUTRITION"): str,
Optional("HUMAN_INCLUSION_CRITERIA"): str,
Optional("HUMAN_EXCLUSION_CRITERIA"): str,
Optional("ANIMAL_ANIMAL_SUPPLIER"): str,
Optional("ANIMAL_HOUSING"): str,
Optional("ANIMAL_LIGHT_CYCLE"): str,
Optional("ANIMAL_FEED"): str,
Optional("ANIMAL_WATER"): str,
Optional("ANIMAL_INCLUSION_CRITERIA"): str,
Optional("CELL_BIOSOURCE_OR_SUPPLIER"): str,
Optional("CELL_STRAIN_DETAILS"): str,
Optional("SUBJECT_COMMENTS"): str,
Optional("CELL_PRIMARY_IMMORTALIZED"): str,
Optional("CELL_PASSAGE_NUMBER"): str,
Optional("CELL_COUNTS"): str,
Optional("SPECIES_GROUP"): str
}
)
subject_sample_factors_schema = Schema(
[
{
"Subject ID": str,
"Sample ID": str,
"Factors": dict,
Optional("Additional sample data"): {
Optional("RAW_FILE_NAME"): str,
Optional(str): str
}
}
]
)
collection_schema = Schema(
{
"COLLECTION_SUMMARY": str,
Optional("COLLECTION_PROTOCOL_ID"): str,
Optional("COLLECTION_PROTOCOL_FILENAME"): str,
Optional("COLLECTION_PROTOCOL_COMMENTS"): str,
Optional("SAMPLE_TYPE"): str, # assumed optional due to large number of files without
Optional("COLLECTION_METHOD"): str,
Optional("COLLECTION_LOCATION"): str,
Optional("COLLECTION_FREQUENCY"): str,
Optional("COLLECTION_DURATION"): str,
Optional("COLLECTION_TIME"): str,
Optional("VOLUMEORAMOUNT_COLLECTED"): str,
Optional("STORAGE_CONDITIONS"): str,
Optional("COLLECTION_VIALS"): str,
Optional("STORAGE_VIALS"): str,
Optional("COLLECTION_TUBE_TEMP"): str,
Optional("ADDITIVES"): str,
Optional("BLOOD_SERUM_OR_PLASMA"): str,
Optional("TISSUE_CELL_IDENTIFICATION"): str,
Optional("TISSUE_CELL_QUANTITY_TAKEN"): str
}
)
treatment_schema = Schema(
{
"TREATMENT_SUMMARY": str,
Optional("TREATMENT_PROTOCOL_ID"): str,
Optional("TREATMENT_PROTOCOL_FILENAME"): str,
Optional("TREATMENT_PROTOCOL_COMMENTS"): str,
Optional("TREATMENT"): str,
Optional("TREATMENT_COMPOUND"): str,
Optional("TREATMENT_ROUTE"): str,
Optional("TREATMENT_DOSE"): str,
Optional("TREATMENT_DOSEVOLUME"): str,
Optional("TREATMENT_DOSEDURATION"): str,
Optional("TREATMENT_VEHICLE"): str,
Optional("ANIMAL_VET_TREATMENTS"): str,
Optional("ANIMAL_ANESTHESIA"): str,
Optional("ANIMAL_ACCLIMATION_DURATION"): str,
Optional("ANIMAL_FASTING"): str,
Optional("ANIMAL_ENDP_EUTHANASIA"): str,
Optional("ANIMAL_ENDP_TISSUE_COLL_LIST"): str,
Optional("ANIMAL_ENDP_TISSUE_PROC_METHOD"): str,
Optional("ANIMAL_ENDP_CLINICAL_SIGNS"): str,
Optional("HUMAN_FASTING"): str,
Optional("HUMAN_ENDP_CLINICAL_SIGNS"): str,
Optional("CELL_STORAGE"): str,
Optional("CELL_GROWTH_CONTAINER"): str,
Optional("CELL_GROWTH_CONFIG"): str,
Optional("CELL_GROWTH_RATE"): str,
Optional("CELL_INOC_PROC"): str,
Optional("CELL_MEDIA"): str,
Optional("CELL_ENVIR_COND"): str,
Optional("CELL_HARVESTING"): str,
Optional("PLANT_GROWTH_SUPPORT"): str,
Optional("PLANT_GROWTH_LOCATION"): str,
Optional("PLANT_PLOT_DESIGN"): str,
Optional("PLANT_LIGHT_PERIOD"): str,
Optional("PLANT_HUMIDITY"): str,
Optional("PLANT_TEMP"): str,
Optional("PLANT_WATERING_REGIME"): str,
Optional("PLANT_NUTRITIONAL_REGIME"): str,
Optional("PLANT_ESTAB_DATE"): str,
Optional("PLANT_HARVEST_DATE"): str,
Optional("PLANT_GROWTH_STAGE"): str,
Optional("PLANT_METAB_QUENCH_METHOD"): str,
Optional("PLANT_HARVEST_METHOD"): str,
Optional("PLANT_STORAGE"): str,
Optional("CELL_PCT_CONFLUENCE"): str,
Optional("CELL_MEDIA_LASTCHANGED"): str
}
)
sampleprep_schema = Schema(
{
"SAMPLEPREP_SUMMARY": str,
Optional("SAMPLEPREP_PROTOCOL_ID"): str,
Optional("SAMPLEPREP_PROTOCOL_FILENAME"): str,
Optional("SAMPLEPREP_PROTOCOL_COMMENTS"): str,
Optional("PROCESSING_METHOD"): str,
Optional("PROCESSING_STORAGE_CONDITIONS"): str,
Optional("EXTRACTION_METHOD"): str,
Optional("EXTRACT_CONCENTRATION_DILUTION"): str,
Optional("EXTRACT_ENRICHMENT"): str,
Optional("EXTRACT_CLEANUP"): str,
Optional("EXTRACT_STORAGE"): str,
Optional("SAMPLE_RESUSPENSION"): str,
Optional("SAMPLE_DERIVATIZATION"): str,
Optional("SAMPLE_SPIKING"): str,
Optional("ORGAN"): str,
Optional("ORGAN_SPECIFICATION"): str,
Optional("CELL_TYPE"): str,
Optional("SUBCELLULAR_LOCATION"): str
}
)
chromatography_schema = Schema(
{
Optional("CHROMATOGRAPHY_SUMMARY"): str,
"CHROMATOGRAPHY_TYPE": str,
"INSTRUMENT_NAME": str,
"COLUMN_NAME": str,
Optional("FLOW_GRADIENT"): str,
Optional("FLOW_RATE"): str,
Optional("COLUMN_TEMPERATURE"): str,
Optional("METHODS_FILENAME"): str,
Optional("SOLVENT_A"): str,
Optional("SOLVENT_B"): str,
Optional("METHODS_ID"): str,
Optional("COLUMN_PRESSURE"): str,
Optional("INJECTION_TEMPERATURE"): str,
Optional("INTERNAL_STANDARD"): str,
Optional("INTERNAL_STANDARD_MT"): str,
Optional("RETENTION_INDEX"): str,
Optional("RETENTION_TIME"): str,
Optional("SAMPLE_INJECTION"): str,
Optional("SAMPLING_CONE"): str,
Optional("ANALYTICAL_TIME"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("MIGRATION_TIME"): str,
Optional("OVEN_TEMPERATURE"): str,
Optional("PRECONDITIONING"): str,
Optional("RUNNING_BUFFER"): str,
Optional("RUNNING_VOLTAGE"): str,
Optional("SHEATH_LIQUID"): str,
Optional("TIME_PROGRAM"): str,
Optional("TRANSFERLINE_TEMPERATURE"): str,
Optional("WASHING_BUFFER"): str,
Optional("WEAK_WASH_SOLVENT_NAME"): str,
Optional("WEAK_WASH_VOLUME"): str,
Optional("STRONG_WASH_SOLVENT_NAME"): str,
Optional("STRONG_WASH_VOLUME"): str,
Optional("TARGET_SAMPLE_TEMPERATURE"): str,
Optional("SAMPLE_LOOP_SIZE"): str,
Optional("SAMPLE_SYRINGE_SIZE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("CHROMATOGRAPHY_COMMENTS"): str
}
)
analysis_schema = Schema(
{
"ANALYSIS_TYPE": str,
Optional("LABORATORY_NAME"): str,
Optional("OPERATOR_NAME"): str,
Optional("DETECTOR_TYPE"): str,
Optional("SOFTWARE_VERSION"): str,
Optional("ACQUISITION_DATE"): str,
Optional("ANALYSIS_PROTOCOL_FILE"): str,
Optional("ACQUISITION_PARAMETERS_FILE"): str,
Optional("PROCESSING_PARAMETERS_FILE"): str,
Optional("DATA_FORMAT"): str,
# not specified in mwTab specification (assumed)
Optional("ACQUISITION_ID"): str,
Optional("ACQUISITION_TIME"): str,
Optional("ANALYSIS_COMMENTS"): str,
Optional("ANALYSIS_DISPLAY"): str,
Optional("INSTRUMENT_NAME"): str,
Optional("INSTRUMENT_PARAMETERS_FILE"): str,
Optional("NUM_FACTORS"): str,
Optional("NUM_METABOLITES"): str,
Optional("PROCESSED_FILE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("RAW_FILE"): str,
}
)
ms_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"MS_TYPE": str,
"ION_MODE": str,
"MS_COMMENTS": str, # changed to required
Optional("CAPILLARY_TEMPERATURE"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("COLLISION_ENERGY"): str,
Optional("COLLISION_GAS"): str,
Optional("DRY_GAS_FLOW"): str,
Optional("DRY_GAS_TEMP"): str,
Optional("FRAGMENT_VOLTAGE"): str,
Optional("FRAGMENTATION_METHOD"): str,
Optional("GAS_PRESSURE"): str,
Optional("HELIUM_FLOW"): str,
Optional("ION_SOURCE_TEMPERATURE"): str,
Optional("ION_SPRAY_VOLTAGE"): str,
Optional("IONIZATION"): str,
Optional("IONIZATION_ENERGY"): str,
Optional("IONIZATION_POTENTIAL"): str,
Optional("MASS_ACCURACY"): str,
Optional("PRECURSOR_TYPE"): str,
Optional("REAGENT_GAS"): str,
Optional("SOURCE_TEMPERATURE"): str,
Optional("SPRAY_VOLTAGE"): str,
Optional("ACTIVATION_PARAMETER"): str,
Optional("ACTIVATION_TIME"): str,
Optional("ATOM_GUN_CURRENT"): str,
Optional("AUTOMATIC_GAIN_CONTROL"): str,
Optional("BOMBARDMENT"): str,
Optional("CDL_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("CDL_TEMPERATURE"): str,
Optional("DATAFORMAT"): str,
Optional("DESOLVATION_GAS_FLOW"): str,
Optional("DESOLVATION_TEMPERATURE"): str,
Optional("INTERFACE_VOLTAGE"): str,
Optional("IT_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("LASER"): str,
Optional("MATRIX"): str,
Optional("NEBULIZER"): str,
Optional("OCTPOLE_VOLTAGE"): str,
Optional("PROBE_TIP"): str,
Optional("RESOLUTION_SETTING"): str,
Optional("SAMPLE_DRIPPING"): str,
Optional("SCAN_RANGE_MOVERZ"): str,
Optional("SCANNING"): str,
Optional("SCANNING_CYCLE"): str,
Optional("SCANNING_RANGE"): str,
Optional("SKIMMER_VOLTAGE"): str,
Optional("TUBE_LENS_VOLTAGE"): str,
Optional("MS_RESULTS_FILE"): Or(str, dict)
}
)
nmr_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"NMR_EXPERIMENT_TYPE": str,
Optional("NMR_COMMENTS"): str,
Optional("FIELD_FREQUENCY_LOCK"): str,
Optional("STANDARD_CONCENTRATION"): str,
"SPECTROMETER_FREQUENCY": str,
Optional("NMR_PROBE"): str,
Optional("NMR_SOLVENT"): str,
Optional("NMR_TUBE_SIZE"): str,
Optional("SHIMMING_METHOD"): str,
Optional("PULSE_SEQUENCE"): str,
Optional("WATER_SUPPRESSION"): str,
Optional("PULSE_WIDTH"): str,
Optional("POWER_LEVEL"): str,
Optional("RECEIVER_GAIN"): str,
Optional("OFFSET_FREQUENCY"): str,
Optional("PRESATURATION_POWER_LEVEL"): str,
Optional("CHEMICAL_SHIFT_REF_CPD"): str,
Optional("TEMPERATURE"): str,
Optional("NUMBER_OF_SCANS"): str,
Optional("DUMMY_SCANS"): str,
Optional("ACQUISITION_TIME"): str,
Optional("RELAXATION_DELAY"): str,
Optional("SPECTRAL_WIDTH"): str,
Optional("NUM_DATA_POINTS_ACQUIRED"): str,
Optional("REAL_DATA_POINTS"): str,
Optional("LINE_BROADENING"): str,
Optional("ZERO_FILLING"): str,
Optional("APODIZATION"): str,
Optional("BASELINE_CORRECTION_METHOD"): str,
Optional("CHEMICAL_SHIFT_REF_STD"): str,
Optional("BINNED_INCREMENT"): str,
Optional("BINNED_DATA_NORMALIZATION_METHOD"): str,
Optional("BINNED_DATA_PROTOCOL_FILE"): str,
Optional("BINNED_DATA_CHEMICAL_SHIFT_RANGE"): str,
Optional("BINNED_DATA_EXCLUDED_RANGE"): str
}
)
data_schema = Schema(
[
{
Or("Metabolite", "Bin range(ppm)", only_one=True): str,
Optional(str): str,
},
]
)
extended_schema = Schema(
[
{
"Metabolite": str,
Optional(str): str,
"sample_id": str
},
]
)
ms_metabolite_data_schema = Schema(
{
"Units": str,
"Data": data_schema,
"Metabolites": data_schema,
Optional("Extended"): extended_schema
}
)
nmr_binned_data_schema = Schema(
{
"Units": str,
"Data": data_schema
}
)
section_schema_mapping = {
"METABOLOMICS WORKBENCH": metabolomics_workbench_schema,
"PROJECT": project_schema,
"STUDY": study_schema,
"ANALYSIS": analysis_schema,
"SUBJECT": subject_schema,
"SUBJECT_SAMPLE_FACTORS": subject_sample_factors_schema,
"COLLECTION": collection_schema,
"TREATMENT": treatment_schema,
"SAMPLEPREP": sampleprep_schema,
"CHROMATOGRAPHY": chromatography_schema,
"MS": ms_schema,
"NM": nmr_schema,
"MS_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_BINNED_DATA": nmr_binned_data_schema,
}
| 34.049327 | 94 | 0.61965 |