hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3df75b6d600c5367171502575bab8ca0ea22ae60 | 4,101 | py | Python | azure/mgmt/storage/v2016_01_01/operations/usage_operations.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | azure/mgmt/storage/v2016_01_01/operations/usage_operations.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/storage/v2016_01_01/operations/usage_operations.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsageOperations(object):
"""UsageOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-01-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-01-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the current usage count and the limit for the resources under the
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Usage
<azure.mgmt.storage.v2016_01_01.models.Usage>`
:rtype: :class:`UsagePaged
<azure.mgmt.storage.v2016_01_01.models.UsagePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| 39.815534 | 144 | 0.626433 |
6002d8d83b0f814a16bfe9b4c302d7fb50a07452 | 421 | py | Python | tests/h/sentry_test.py | discodavey/h | 7bff8478b3a5b936de82ac9fcd89b355f4afd3aa | [
"MIT"
] | 2 | 2021-11-07T23:14:54.000Z | 2021-11-17T10:11:55.000Z | tests/h/sentry_test.py | 0b01/h | d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14 | [
"MIT"
] | 16 | 2018-03-14T21:23:46.000Z | 2019-04-29T18:55:28.000Z | tests/h/sentry_test.py | 0b01/h | d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14 | [
"MIT"
] | 1 | 2021-03-12T09:45:04.000Z | 2021-03-12T09:45:04.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from h.sentry import get_client
class TestGetClient(object):
@pytest.mark.parametrize('settings,env', [
({}, 'dev'),
({'h.env': 'qa'}, 'qa'),
({'h.env': 'prod'}, 'prod'),
])
def test_set_environment(self, settings, env):
client = get_client(settings)
assert client.environment == env
| 20.047619 | 50 | 0.598575 |
93fc61c05cdf576126d528bff275c0e4c4217699 | 596 | py | Python | hatefull/apps/answers/migrations/0002_auto_20160214_0028.py | MauricioDinki/hatefull | fdefd69251ea136798ff483bfa90a3b08a871ec7 | [
"BSD-3-Clause"
] | null | null | null | hatefull/apps/answers/migrations/0002_auto_20160214_0028.py | MauricioDinki/hatefull | fdefd69251ea136798ff483bfa90a3b08a871ec7 | [
"BSD-3-Clause"
] | null | null | null | hatefull/apps/answers/migrations/0002_auto_20160214_0028.py | MauricioDinki/hatefull | fdefd69251ea136798ff483bfa90a3b08a871ec7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-14 06:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('answers', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='answer',
name='question',
),
migrations.RemoveField(
model_name='answer',
name='question_owner',
),
migrations.RemoveField(
model_name='answer',
name='tests',
),
]
| 21.285714 | 47 | 0.557047 |
9ac57f41efede0102f0ec9445abc23a70563cc91 | 1,693 | py | Python | pre-process.py | yelinyun123/Image-Captioning-v2 | b37c7e21dc7a2323ade3217ff4de3a1aa8fe6dfc | [
"Apache-2.0"
] | 80 | 2020-01-09T08:53:53.000Z | 2022-03-11T08:41:47.000Z | pre_process.py | snowflowersnowflake/Image-Captioning-PyTorch | 89276fa520e85fa25b603900a8f24a2d926b55bb | [
"Apache-2.0"
] | 19 | 2020-02-05T11:01:41.000Z | 2022-02-17T02:48:03.000Z | pre_process.py | snowflowersnowflake/Image-Captioning-PyTorch | 89276fa520e85fa25b603900a8f24a2d926b55bb | [
"Apache-2.0"
] | 26 | 2020-03-26T03:30:44.000Z | 2022-03-08T04:48:49.000Z | import json
import zipfile
from collections import Counter
import jieba
from tqdm import tqdm
from config import *
from utils import ensure_folder
def extract(folder):
filename = '{}.zip'.format(folder)
print('Extracting {}...'.format(filename))
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall('data')
def create_input_files():
json_path = train_annotations_filename
# Read JSON
with open(json_path, 'r') as j:
samples = json.load(j)
# Read image paths and captions for each image
word_freq = Counter()
for sample in tqdm(samples):
caption = sample['caption']
for c in caption:
seg_list = jieba.cut(c, cut_all=True)
# Update word frequency
word_freq.update(seg_list)
# Create word map
words = [w for w in word_freq.keys() if word_freq[w] > min_word_freq]
word_map = {k: v + 1 for v, k in enumerate(words)}
word_map['<unk>'] = len(word_map) + 1
word_map['<start>'] = len(word_map) + 1
word_map['<end>'] = len(word_map) + 1
word_map['<pad>'] = 0
print(len(word_map))
print(words[:10])
# Save word map to a JSON
with open(os.path.join(data_folder, 'WORDMAP.json'), 'w') as j:
json.dump(word_map, j)
if __name__ == '__main__':
# parameters
ensure_folder('data')
if not os.path.isdir(train_image_folder):
extract(train_folder)
if not os.path.isdir(valid_image_folder):
extract(valid_folder)
if not os.path.isdir(test_a_image_folder):
extract(test_a_folder)
if not os.path.isdir(test_b_image_folder):
extract(test_b_folder)
create_input_files()
| 24.536232 | 73 | 0.645009 |
8d0ece4fd2b2c7d381607b86c9e4bbc9c6bf55f5 | 6,022 | py | Python | bamboo/unit_tests/test_unit_layer_softmax.py | naoyam/lbann | d30e053b6f86d1cf8cca1d61c94bbbdbfc4945c4 | [
"Apache-2.0"
] | null | null | null | bamboo/unit_tests/test_unit_layer_softmax.py | naoyam/lbann | d30e053b6f86d1cf8cca1d61c94bbbdbfc4945c4 | [
"Apache-2.0"
] | 66 | 2018-04-04T22:24:42.000Z | 2020-10-23T01:50:34.000Z | bamboo/unit_tests/test_unit_layer_softmax.py | naoyam/lbann | d30e053b6f86d1cf8cca1d61c94bbbdbfc4945c4 | [
"Apache-2.0"
] | null | null | null | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
np.random.seed(201910142)
_num_samples = 19
_sample_size = 7
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# NumPy softmax
# ==============================================
def numpy_softmax(x):
"""NumPy implementation of softmax.
The computation is performed with 64-bit floats. There is also an
implementation of softmax in SciPy 1.2.0 (scipy.special.softmax).
"""
if x.dtype is not np.float64:
x = x.astype(np.float64)
y = np.exp(x - np.max(x))
return y / np.sum(y)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
trainer = lbann.Trainer()
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(),
dims=tools.str_list(_sample_size)),
lbann.WeightsLayer(weights=x_weights,
dims=tools.str_list(_sample_size)))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Softmax(x, data_layout='data_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = numpy_softmax(x)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Model-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Softmax(x, data_layout='model_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = numpy_softmax(x)
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
mini_batch_size = num_samples() // 2
num_epochs = 0
return lbann.Model(mini_batch_size,
num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for test in tools.create_tests(setup_experiment, __file__):
globals()[test.__name__] = test
| 28.951923 | 80 | 0.549817 |
f466673f81791645d4b633c77140ecff6350ffee | 836 | py | Python | rio_color/workers.py | kylebarron/rio-color | 2a7aabf107d1779ae19442c84c92d8f31b4ddba8 | [
"MIT"
] | 104 | 2016-04-22T15:40:04.000Z | 2022-02-07T19:12:25.000Z | rio_color/workers.py | vincentsarago/rio-color | 979eb50dec98319e45022f642f0d8edc546bceae | [
"MIT"
] | 63 | 2015-08-03T20:05:12.000Z | 2022-02-03T18:37:54.000Z | rio_color/workers.py | vincentsarago/rio-color | 979eb50dec98319e45022f642f0d8edc546bceae | [
"MIT"
] | 23 | 2017-02-21T09:21:30.000Z | 2021-11-15T18:15:28.000Z | """Color functions for use with rio-mucho."""
from .operations import parse_operations, simple_atmo
from .utils import to_math_type, scale_dtype
# Rio workers
def atmos_worker(srcs, window, ij, args):
"""A simple atmospheric correction user function."""
src = srcs[0]
rgb = src.read(window=window)
rgb = to_math_type(rgb)
atmos = simple_atmo(rgb, args["atmo"], args["contrast"], args["bias"])
# should be scaled 0 to 1, scale to outtype
return scale_dtype(atmos, args["out_dtype"])
def color_worker(srcs, window, ij, args):
"""A user function."""
src = srcs[0]
arr = src.read(window=window)
arr = to_math_type(arr)
for func in parse_operations(args["ops_string"]):
arr = func(arr)
# scaled 0 to 1, now scale to outtype
return scale_dtype(arr, args["out_dtype"])
| 26.125 | 74 | 0.671053 |
be2b12bb66cdee0ff1059399234f518ba2badd4e | 2,684 | py | Python | django/contrib/gis/db/backends/postgis/schema.py | hackerbot/DjangoDevelopmentVersion | e7e39d32fd3aa53cf2861f083c24f9d1b38572d6 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/db/backends/postgis/schema.py | hackerbot/DjangoDevelopmentVersion | e7e39d32fd3aa53cf2861f083c24f9d1b38572d6 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/db/backends/postgis/schema.py | hackerbot/DjangoDevelopmentVersion | e7e39d32fd3aa53cf2861f083c24f9d1b38572d6 | [
"BSD-3-Clause"
] | null | null | null | from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
class PostGISSchemaEditor(DatabaseSchemaEditor):
geom_index_type = 'GIST'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
sql_add_spatial_index = "CREATE INDEX %(index)s ON %(table)s USING %(index_type)s (%(column)s %(ops)s)"
sql_clear_geometry_columns = "DELETE FROM geometry_columns WHERE f_table_name = %(table)s"
def __init__(self, *args, **kwargs):
super(PostGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
from django.contrib.gis.db.models.fields import GeometryField
if not isinstance(field, GeometryField):
return super(PostGISSchemaEditor, self).column_sql(model, field, include_default)
column_sql = super(PostGISSchemaEditor, self).column_sql(model, field, include_default)
if field.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
if field.geography:
index_ops = ''
else:
# Use either "nd" ops which are fast on multidimensional cases
# or just plain gist index for the 2d case.
if field.dim > 2:
index_ops = self.geom_index_ops_nd
else:
index_ops = ''
self.geometry_sql.append(
self.sql_add_spatial_index % {
"index": self.quote_name('%s_%s_id' % (model._meta.db_table, field.column)),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"index_type": self.geom_index_type,
"ops": index_ops,
}
)
return column_sql
def create_model(self, model):
super(PostGISSchemaEditor, self).create_model(model)
# Create geometry columns
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def delete_model(self, model):
super(PostGISSchemaEditor, self).delete_model(model)
self.execute(self.sql_clear_geometry_columns % {
"table": self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super(PostGISSchemaEditor, self).add_field(model, field)
# Create geometry columns
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
| 39.470588 | 107 | 0.620343 |
7f75d06e3efc8e1f02c9129aea3b2dfc9793ab0a | 8,202 | py | Python | egs/babel/s5b/g2p/myg2p_format_dictionary.py | Shuang777/kaldi | 3df67141b55cfd5e2ba7305a72e795e7706d8d30 | [
"Apache-2.0"
] | 1 | 2019-02-06T09:31:59.000Z | 2019-02-06T09:31:59.000Z | egs/babel/s5b/g2p/myg2p_format_dictionary.py | Shuang777/kaldi | 3df67141b55cfd5e2ba7305a72e795e7706d8d30 | [
"Apache-2.0"
] | null | null | null | egs/babel/s5b/g2p/myg2p_format_dictionary.py | Shuang777/kaldi | 3df67141b55cfd5e2ba7305a72e795e7706d8d30 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# $Id: lex2news.py 336 2013-01-18 18:22:37Z arlo $
DESCRIPTION="""
This converts from a lexicon format to the 'news' format used by
m2maligner. This borrows heavily from the hescii script, and is
used by the Swordfish team fo the Babel Project.
Encoding: UTF8 -> hescii
A UTF8 byte string is encoded to hescii as follows:
1. A Unicode character string is decoded from a UTF-8 byte string
2. Non-ASCII whitespace (> \u007F) maps to plain space (\u0020)
3. Split into substrings delimited by whitespace separators
4. Non-whitespace substrings are mapped as follows:
- First encode Unicode characters -> UTF8 bytes
- Then encode UTF8 bytes -> hexadecimal (ASCII) characters
- Optionally prepend a prefix character (default: 'x')
5. Join the substrings, retaining whitespace separators
Decoding: hescii -> byte string
A hescii string (ASCII-encoded text) is decoded to UTF8 as follows:
1. Search left-to-right for prefix followed by pairs of hexadecimals
2. For each match, substitute as follows:
- Discard the prefix, if any
- Convert the hexadecimal to a byte string
Note that encoding is stricter than decoding, which may not return
a valid UTF8 byte string.
When run as a command-line tool, this tool reads from stdin and writes
to stdout, encoding or decoding the streams as specified by options.
"""
import sys
import re
def dump(utf8Str, fileObj, prefix='x'):
"""
Dumps the hescii encoding of a utf-8 byte string to fileObj.
Args:
utf8Str: A utf-8 byte string
fileObj: A python file open for writing
prefix: string to prefix all encoded hex values. Default 'x'
Returns:
Nothing
"""
fileObj.write(dumps(utf8Str, prefix))
fileObj.flush()
def dumps(utf8Str, prefix='x'):
"""
Returns the hescii encoding of a utf-8 byte string.
Args:
utf8Str: A utf-8 byte string
prefix: string to prefix all encoded hex values. Default 'x'
Returns:
A hescii string
"""
unicodeStr = utf8Str.decode('utf8')
def repl_whitespace(char):
if char.isspace() and char > u'\u007F':
return u'\u0020'
else:
return char
asciiWhitespaceStr = ''.join(map(repl_whitespace, unicodeStr))
def repl_encode(match):
s = match.group()
return prefix + s.encode('utf8').encode('hex')
hesciiStr = re.sub(r'\S+', repl_encode, asciiWhitespaceStr)
return hesciiStr.encode('ascii')
def load(fileObj, prefix='x'):
"""
Loads a hescii-encoded file and returns the utf-8 byte string.
Args:
fileObj: A python file open for reading
prefix: string used to prefix all encoded hex values. Default 'x'
Returns:
A utf-8 byte string
"""
return loads(fileObj.read(), prefix)
def loads(hesciiStr, prefix='x'):
"""
Takes a hescii-encoded string and returns the utf-8 byte string.
Args:
hesciiStr: a hescii-encoded string
prefix: string used to prefix all encoded hex values. Default 'x'
Returns:
A utf-8 byte string
"""
def repl(match):
s = match.group()
return s[len(prefix):].decode('hex')
pattern = prefix + r'([0123456789abcdefABCDEF][0123456789abcdefABCDEF])+'
return re.sub(pattern, repl, hesciiStr)
if __name__ == '__main__':
"""
Take an input file and return the hex encoding of its contents,
leaving ascii whitespace unchanged
"""
# Parse commandline arguments
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION)
parser.add_argument('-c', '--column', default=3, type=int,
help='Column to use for pronunciation\n' + \
'Default: 3')
parser.add_argument('-p', '--prefix', default='x',
help='Specify prefix for hex values.\n' + \
'Default: x')
parser.add_argument('-d', '--decode', action='store_true', help="Decode from hescii (not yet working)")
parser.add_argument('-s', '--syllable_prediction', action='store_true', help='Predict syllables (must have . syllable separator)')
parser.add_argument('-t', '--syllable_phone_separator', default='_', help='When predicting syllables, concatenate phones with this separator')
parser.add_argument('-m', '--syllable_marks', action='store_true', help='Keep syllable marks')
parser.add_argument('--version', action='version', version='$Id: hescii.py 336 2013-01-18 18:22:37Z arlo $')
args = parser.parse_args()
# Set params from commandline
prefix = args.prefix
decode = args.decode
column = args.column-1;
syllable_marks = args.syllable_marks;
syllable_separator = args.syllable_phone_separator;
syllable_prediction = args.syllable_prediction;
# read from stdin
for line in sys.stdin:
sline=line.rstrip().split('\t')
uword = sline[0].decode('utf8').lower()
# remove lines that start with < (e.g. <hes>)
# TODO: HANDLE MORPHS
#morphs=uword.split('__')
if (len(sline)>1):
if (syllable_prediction):
# merge phones into syllables
pron1 = re.sub(r'[%] ',"",sline[column].strip())
pparts=pron1.split(' # ')
for pindex,p in enumerate(pparts):
syls=p.split(' . ')
for sindex,s in enumerate(syls):
syls[sindex]=re.sub(r' *',syllable_separator,s)
if (syllable_marks):
pparts[pindex]=' . '.join(syls)
else:
pparts[pindex]=' '.join(syls)
pron1=' # '.join(pparts)
else:
if (syllable_marks):
pron1 = re.sub(r'[%] ',"",sline[column].strip())
else:
pron1 = re.sub(r'[%\.] ',"",sline[column].strip())
pron = re.sub(r' *'," ",pron1)
else:
pron = ''
if (re.search(r'(^__|__$)',uword)):
subwords=[uword]
subprons=[pron]
else:
subwords = uword.split('_')
subprons = pron.split('#')
def repl_encode(match):
s = match.group()
return 'x' + s.encode('utf8').encode('hex')
if (len(subwords) == len(subprons)):
for swindex,subword in enumerate(subwords):
if (len(subwords)>1 and len(subword)==1):
if (subword > u'\u007F'):
subword=re.sub(r'\S+', repl_encode, subword)
print(subword+'+\t'+subprons[swindex].strip())
else:
letters=list(subword)
for index,l in enumerate(letters):
if (l > u'\u007F'):
hesciiStr = re.sub(r'\S+', repl_encode, l)
letters[index]=hesciiStr.encode('ascii')
print(' '.join(letters)+'\t'+subprons[swindex].strip())
elif (len(subprons)==1):
output=[]
for swindex,subword in enumerate(subwords):
if (len(subword)==1 and
subword>='a' and
subword<='z'):
swout=''
if (swindex>0):
swout='_ '
swout+=subword+'+'
# if (swindex<len(subwords)-1):
# swout+=' _'
output.append(swout)
else:
letters=list(subword)
for index,l in enumerate(letters):
if (l > u'\u007F'):
hesciiStr = re.sub(r'\S+', repl_encode, l)
letters[index]=hesciiStr.encode('ascii')
if (len(output)>0):
output.append('_')
output.append(' '.join(letters))
print(' '.join(output)+'\t'+subprons[0].strip())
| 37.972222 | 146 | 0.564131 |
df8b55cbe78e9e8fc85756e9bed11da9668eaddd | 8,783 | py | Python | docs/conf.py | zetaron/asyncio-redis | 7d174e0ebe2dc52340001097eeee48a868454480 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | docs/conf.py | zetaron/asyncio-redis | 7d174e0ebe2dc52340001097eeee48a868454480 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | docs/conf.py | zetaron/asyncio-redis | 7d174e0ebe2dc52340001097eeee48a868454480 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2021-07-16T06:21:38.000Z | 2021-07-16T06:21:38.000Z | # -*- coding: utf-8 -*-
#
# asyncio_redis documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 31 08:50:13 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Take signatures from docstrings.
autodoc_docstring_signature = True
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"asyncio_redis"
copyright = u"2013, Jonathan Slenders"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if on_rtd:
html_theme = "default"
else:
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = "pyramid"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "asyncio_redisdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"asyncio_redis.tex",
u"asyncio\\_redis Documentation",
u"Jonathan Slenders",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"asyncio_redis",
u"asyncio_redis Documentation",
[u"Jonathan Slenders"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"asyncio_redis",
u"asyncio_redis Documentation",
u"Jonathan Slenders",
"asyncio_redis",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
| 30.817544 | 80 | 0.700444 |
a10e00ecdd3e6cdb7999663795696c0b7c3e61c7 | 3,452 | py | Python | mediagoblin/tests/test_metadata.py | saksham1115/mediagoblin | 41302ad2b622b340caeb13339338ab3a5d0f7e6b | [
"CC0-1.0"
] | 60 | 2015-01-17T01:19:47.000Z | 2021-09-17T01:25:47.000Z | mediagoblin/tests/test_metadata.py | saksham1115/mediagoblin | 41302ad2b622b340caeb13339338ab3a5d0f7e6b | [
"CC0-1.0"
] | 12 | 2015-02-03T09:14:42.000Z | 2020-12-04T12:18:03.000Z | mediagoblin/tests/test_metadata.py | saksham1115/mediagoblin | 41302ad2b622b340caeb13339338ab3a5d0f7e6b | [
"CC0-1.0"
] | 23 | 2015-08-18T01:32:50.000Z | 2021-09-05T23:22:55.000Z | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from mediagoblin.tools.metadata import compact_and_validate
from jsonschema import ValidationError
class TestMetadataFunctionality:
@pytest.fixture(autouse=True)
def _setup(self, test_app):
self.test_app = test_app
def testCompactAndValidate(self):
# First, test out a well formatted piece of metadata
######################################################
test_metadata = {
'dc:title':'My Pet Bunny',
'dc:description':'A picture displaying how cute my pet bunny is.',
'location':'/home/goblin/Pictures/bunny.png',
'license':'http://www.gnu.org/licenses/gpl.txt'
}
jsonld_metadata =compact_and_validate(test_metadata)
assert jsonld_metadata
assert jsonld_metadata.get('dc:title') == 'My Pet Bunny'
# Free floating nodes should be removed
assert jsonld_metadata.get('location') is None
assert jsonld_metadata.get('@context') == \
u"http://www.w3.org/2013/json-ld-context/rdfa11"
# Next, make sure that various badly formatted metadata
# will be rejected.
#######################################################
#,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.
# Metadata with a non-URI license should fail :
#`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'
metadata_fail_1 = {
'dc:title':'My Pet Bunny',
'dc:description':'A picture displaying how cute my pet bunny is.',
'location':'/home/goblin/Pictures/bunny.png',
'license':'All Rights Reserved.'
}
jsonld_fail_1 = None
try:
jsonld_fail_1 = compact_and_validate(metadata_fail_1)
except ValidationError as e:
assert e.message == "'All Rights Reserved.' is not a 'uri'"
assert jsonld_fail_1 == None
#,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,.,
# Metadata with an ivalid date-time dc:created should fail :
#`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`'`''
metadata_fail_2 = {
'dc:title':'My Pet Bunny',
'dc:description':'A picture displaying how cute my pet bunny is.',
'location':'/home/goblin/Pictures/bunny.png',
'license':'http://www.gnu.org/licenses/gpl.txt',
'dc:created':'The other day'
}
jsonld_fail_2 = None
try:
jsonld_fail_2 = compact_and_validate(metadata_fail_2)
except ValidationError as e:
assert e.message == "'The other day' is not a 'date-time'"
assert jsonld_fail_2 == None
| 43.696203 | 78 | 0.585747 |
749af869e295020dc230f31267a3a40694cef607 | 56,495 | py | Python | pwnlib/rop/rop.py | rhythmize/pwntools | 0c72e3a923c8f6367762c79355232bf0d20601b8 | [
"MIT"
] | 1 | 2021-07-14T15:11:46.000Z | 2021-07-14T15:11:46.000Z | pwnlib/rop/rop.py | rhythmize/pwntools | 0c72e3a923c8f6367762c79355232bf0d20601b8 | [
"MIT"
] | null | null | null | pwnlib/rop/rop.py | rhythmize/pwntools | 0c72e3a923c8f6367762c79355232bf0d20601b8 | [
"MIT"
] | null | null | null | r"""
Return Oriented Programming
Manual ROP
-------------------
The ROP tool can be used to build stacks pretty trivially.
Let's create a fake binary which has some symbols which might
have been useful.
>>> context.clear(arch='i386')
>>> binary = ELF.from_assembly('add esp, 0x10; ret; pop eax; ret; pop ecx; pop ebx; ret')
>>> binary.symbols = {'read': 0xdeadbeef, 'write': 0xdecafbad, 'execve': 0xcafebabe, 'exit': 0xfeedface}
Creating a ROP object which looks up symbols in the binary is pretty straightforward.
>>> rop = ROP(binary)
Once to ROP object has been loaded, you can trivially find gadgets, by using magic properties on the ``ROP`` object.
Each :class:`Gadget` has an ``address`` property which has the real address as well.
>>> rop.eax
Gadget(0x10000004, ['pop eax', 'ret'], ['eax'], 0x8)
>>> hex(rop.eax.address)
'0x10000004'
Other, more complicated gdagets also happen magically
>>> rop.ecx
Gadget(0x10000006, ['pop ecx', 'pop ebx', 'ret'], ['ecx', 'ebx'], 0xc)
The easiest way to set up individual registers is to invoke the ``ROP`` object as a callable, with the registers as arguments.
>>> rop(eax=0x11111111, ecx=0x22222222)
Setting register values this way accounts for padding and extra registers which are popped off the stack.
Values which are filled with garbage (i.e. are not used) are filled with the :func:`cyclic` pattern
which corresponds to their offset, which is useful when debuggging your exploit.
>>> print(rop.dump())
0x0000: 0x10000006 pop ecx; pop ebx; ret
0x0004: 0x22222222
0x0008: b'caaa' <pad ebx>
0x000c: 0x10000004 pop eax; ret
0x0010: 0x11111111
Let's re-create our ROP object now to show for some other examples.:
>>> rop = ROP(binary)
With the ROP object, you can manually add stack frames.
>>> rop.raw(0)
>>> rop.raw(unpack(b'abcd'))
>>> rop.raw(2)
Inspecting the ROP stack is easy, and laid out in an easy-to-read
manner.
>>> print(rop.dump())
0x0000: 0x0
0x0004: 0x64636261
0x0008: 0x2
The ROP module is also aware of how to make function calls with
standard Linux ABIs.
>>> rop.call('read', [4,5,6])
>>> print(rop.dump())
0x0000: 0x0
0x0004: 0x64636261
0x0008: 0x2
0x000c: 0xdeadbeef read(4, 5, 6)
0x0010: b'eaaa' <return address>
0x0014: 0x4 arg0
0x0018: 0x5 arg1
0x001c: 0x6 arg2
You can also use a shorthand to invoke calls.
The stack is automatically adjusted for the next frame
>>> rop.write(7,8,9)
>>> rop.exit()
>>> print(rop.dump())
0x0000: 0x0
0x0004: 0x64636261
0x0008: 0x2
0x000c: 0xdeadbeef read(4, 5, 6)
0x0010: 0x10000000 <adjust @0x24> add esp, 0x10; ret
0x0014: 0x4 arg0
0x0018: 0x5 arg1
0x001c: 0x6 arg2
0x0020: b'iaaa' <pad>
0x0024: 0xdecafbad write(7, 8, 9)
0x0028: 0x10000000 <adjust @0x3c> add esp, 0x10; ret
0x002c: 0x7 arg0
0x0030: 0x8 arg1
0x0034: 0x9 arg2
0x0038: b'oaaa' <pad>
0x003c: 0xfeedface exit()
You can also append complex arguments onto stack when the stack pointer is known.
>>> rop = ROP(binary, base=0x7fffe000)
>>> rop.call('execve', [b'/bin/sh', [[b'/bin/sh'], [b'-p'], [b'-c'], [b'ls']], 0])
>>> print(rop.dump())
0x7fffe000: 0xcafebabe execve([b'/bin/sh'], [[b'/bin/sh'], [b'-p'], [b'-c'], [b'ls']], 0)
0x7fffe004: b'baaa' <return address>
0x7fffe008: 0x7fffe014 arg0 (+0xc)
0x7fffe00c: 0x7fffe01c arg1 (+0x10)
0x7fffe010: 0x0 arg2
0x7fffe014: b'/bin/sh\x00'
0x7fffe01c: 0x7fffe02c (+0x10)
0x7fffe020: 0x7fffe034 (+0x14)
0x7fffe024: 0x7fffe038 (+0x14)
0x7fffe028: 0x7fffe03c (+0x14)
0x7fffe02c: b'/bin/sh\x00'
0x7fffe034: b'-p\x00$'
0x7fffe038: b'-c\x00$'
0x7fffe03c: b'ls\x00$'
ROP also detects 'jmp $sp' gadget to help exploit binaries with NX disabled.
You can get this gadget on 'i386':
>>> context.clear(arch='i386')
>>> elf = ELF.from_assembly('nop; jmp esp; ret')
>>> rop = ROP(elf)
>>> jmp_gadget = rop.jmp_esp
>>> elf.read(jmp_gadget.address, 2) == asm('jmp esp')
True
You can also get this gadget on 'amd64':
>>> context.clear(arch='amd64')
>>> elf = ELF.from_assembly('nop; jmp rsp; ret')
>>> rop = ROP(elf)
>>> jmp_gadget = rop.jmp_rsp
>>> elf.read(jmp_gadget.address, 2) == asm('jmp rsp')
True
Gadgets whose address has badchar are filtered out:
>>> context.clear(arch='i386')
>>> elf = ELF.from_assembly('nop; pop eax; jmp esp; int 0x80; jmp esp; ret')
>>> rop = ROP(elf, badchars=b'\x02')
>>> jmp_gadget = rop.jmp_esp # It returns the second gadget
>>> elf.read(jmp_gadget.address, 2) == asm('jmp esp')
True
>>> rop = ROP(elf, badchars=b'\x02\x06')
>>> rop.jmp_esp == None # The address of both gadgets has badchar
True
ROP Example
-------------------
Let's assume we have a trivial binary that just reads some data
onto the stack, and returns.
>>> context.clear(arch='i386')
>>> c = constants
>>> assembly = 'read:' + shellcraft.read(c.STDIN_FILENO, 'esp', 1024)
>>> assembly += 'ret\n'
Let's provide some simple gadgets:
>>> assembly += 'add_esp: add esp, 0x10; ret\n'
And perhaps a nice "write" function.
>>> assembly += 'write: enter 0,0\n'
>>> assembly += ' mov ebx, [ebp+4+4]\n'
>>> assembly += ' mov ecx, [ebp+4+8]\n'
>>> assembly += ' mov edx, [ebp+4+12]\n'
>>> assembly += shellcraft.write('ebx', 'ecx', 'edx')
>>> assembly += ' leave\n'
>>> assembly += ' ret\n'
>>> assembly += 'flag: .asciz "The flag"\n'
And a way to exit cleanly.
>>> assembly += 'exit: ' + shellcraft.exit(0)
>>> binary = ELF.from_assembly(assembly)
Finally, let's build our ROP stack
>>> rop = ROP(binary)
>>> rop.write(c.STDOUT_FILENO, binary.symbols['flag'], 8)
>>> rop.exit()
>>> print(rop.dump())
0x0000: 0x10000012 write(STDOUT_FILENO, 0x10000026, 8)
0x0004: 0x1000000e <adjust @0x18> add esp, 0x10; ret
0x0008: 0x1 STDOUT_FILENO
0x000c: 0x10000026 flag
0x0010: 0x8 arg2
0x0014: b'faaa' <pad>
0x0018: 0x1000002f exit()
The raw data from the ROP stack is available via `str`.
>>> raw_rop = rop.chain()
>>> print(enhex(raw_rop))
120000100e000010010000002600001008000000666161612f000010
Let's try it out!
>>> p = process(binary.path)
>>> p.send(raw_rop)
>>> print(repr(p.recvall(timeout=5)))
b'The flag'
ROP Example (amd64)
-------------------
For amd64 binaries, the registers are loaded off the stack. Pwntools can do
basic reasoning about simple "pop; pop; add; ret"-style gadgets, and satisfy
requirements so that everything "just works".
>>> context.clear(arch='amd64')
>>> assembly = 'pop rdx; pop rdi; pop rsi; add rsp, 0x20; ret; target: ret'
>>> binary = ELF.from_assembly(assembly)
>>> rop = ROP(binary)
>>> rop.target(1,2,3)
>>> print(rop.dump())
0x0000: 0x10000000 pop rdx; pop rdi; pop rsi; add rsp, 0x20; ret
0x0008: 0x3 [arg2] rdx = 3
0x0010: 0x1 [arg0] rdi = 1
0x0018: 0x2 [arg1] rsi = 2
0x0020: b'iaaajaaa' <pad 0x20>
0x0028: b'kaaalaaa' <pad 0x18>
0x0030: b'maaanaaa' <pad 0x10>
0x0038: b'oaaapaaa' <pad 0x8>
0x0040: 0x10000008 target
>>> rop.target(1)
>>> print(rop.dump())
0x0000: 0x10000000 pop rdx; pop rdi; pop rsi; add rsp, 0x20; ret
0x0008: 0x3 [arg2] rdx = 3
0x0010: 0x1 [arg0] rdi = 1
0x0018: 0x2 [arg1] rsi = 2
0x0020: b'iaaajaaa' <pad 0x20>
0x0028: b'kaaalaaa' <pad 0x18>
0x0030: b'maaanaaa' <pad 0x10>
0x0038: b'oaaapaaa' <pad 0x8>
0x0040: 0x10000008 target
0x0048: 0x10000001 pop rdi; pop rsi; add rsp, 0x20; ret
0x0050: 0x1 [arg0] rdi = 1
0x0058: b'waaaxaaa' <pad rsi>
0x0060: b'yaaazaab' <pad 0x20>
0x0068: b'baabcaab' <pad 0x18>
0x0070: b'daabeaab' <pad 0x10>
0x0078: b'faabgaab' <pad 0x8>
0x0080: 0x10000008 target
Pwntools will also filter out some bad instructions while setting the registers
( e.g. syscall, int 0x80... )
>>> assembly = 'syscall; pop rdx; pop rsi; ret ; pop rdi ; int 0x80; pop rsi; pop rdx; ret ; pop rdi ; ret'
>>> binary = ELF.from_assembly(assembly)
>>> rop = ROP(binary)
>>> rop.call(0xdeadbeef, [1, 2, 3])
>>> print(rop.dump())
0x0000: 0x1000000b pop rdi; ret
0x0008: 0x1 [arg0] rdi = 1
0x0010: 0x10000002 pop rdx; pop rsi; ret
0x0018: 0x3 [arg2] rdx = 3
0x0020: 0x2 [arg1] rsi = 2
0x0028: 0xdeadbeef
ROP + Sigreturn
-----------------------
In some cases, control of the desired register is not available.
However, if you have control of the stack, EAX, and can find a
`int 0x80` gadget, you can use sigreturn.
Even better, this happens automagically.
Our example binary will read some data onto the stack, and
not do anything else interesting.
>>> context.clear(arch='i386')
>>> c = constants
>>> assembly = 'read:' + shellcraft.read(c.STDIN_FILENO, 'esp', 1024)
>>> assembly += 'ret\n'
>>> assembly += 'pop eax; ret\n'
>>> assembly += 'int 0x80\n'
>>> assembly += 'binsh: .asciz "/bin/sh"'
>>> binary = ELF.from_assembly(assembly)
Let's create a ROP object and invoke the call.
>>> context.kernel = 'amd64'
>>> rop = ROP(binary)
>>> binsh = binary.symbols['binsh']
>>> rop.execve(binsh, 0, 0)
That's all there is to it.
>>> print(rop.dump())
0x0000: 0x1000000e pop eax; ret
0x0004: 0x77 [arg0] eax = SYS_sigreturn
0x0008: 0x1000000b int 0x80; ret
0x000c: 0x0 gs
0x0010: 0x0 fs
0x0014: 0x0 es
0x0018: 0x0 ds
0x001c: 0x0 edi
0x0020: 0x0 esi
0x0024: 0x0 ebp
0x0028: 0x0 esp
0x002c: 0x10000012 ebx = binsh
0x0030: 0x0 edx
0x0034: 0x0 ecx
0x0038: 0xb eax = SYS_execve
0x003c: 0x0 trapno
0x0040: 0x0 err
0x0044: 0x1000000b int 0x80; ret
0x0048: 0x23 cs
0x004c: 0x0 eflags
0x0050: 0x0 esp_at_signal
0x0054: 0x2b ss
0x0058: 0x0 fpstate
Let's try it out!
>>> p = process(binary.path)
>>> p.send(rop.chain())
>>> time.sleep(1)
>>> p.sendline(b'echo hello; exit')
>>> p.recvline()
b'hello\n'
"""
from __future__ import absolute_import
from __future__ import division
import collections
import copy
import hashlib
import itertools
import os
import re
import shutil
import six
import string
import struct
import sys
import tempfile
from pwnlib import abi
from pwnlib import constants
from pwnlib.context import LocalContext
from pwnlib.context import context
from pwnlib.elf import ELF
from pwnlib.log import getLogger
from pwnlib.rop import srop
from . import ret2dlresolve
from pwnlib.rop.call import AppendedArgument
from pwnlib.rop.call import Call
from pwnlib.rop.call import CurrentStackPointer
from pwnlib.rop.call import NextGadgetAddress
from pwnlib.rop.call import StackAdjustment
from pwnlib.rop.call import Unresolved
from pwnlib.rop.gadgets import Gadget
from pwnlib.util import lists
from pwnlib.util import packing
from pwnlib.util.cyclic import cyclic
from pwnlib.util.packing import pack
from pwnlib.util.misc import python_2_bytes_compatible
log = getLogger(__name__)
__all__ = ['ROP']
enums = Call, constants.Constant
try:
from enum import Enum
except ImportError:
pass
else:
enums += Enum,
class Padding(object):
"""
Placeholder for exactly one pointer-width of padding.
"""
def __init__(self, name='<pad>'):
self.name = name
def _slot_len(x):
if isinstance(x, six.integer_types+(Unresolved, Padding, Gadget)):
return context.bytes
else:
return len(packing.flat(x))
class DescriptiveStack(list):
"""
List of resolved ROP gadgets that correspond to the ROP calls that
the user has specified.
"""
#: Base address
address = 0
#: Dictionary of \`{address: [list of descriptions]}`
descriptions = {}
def __init__(self, address):
self.descriptions = collections.defaultdict(list)
self.address = address or 0
self._next_next = 0
self._next_last = 0
@property
def next(self):
for x in self[self._next_last:]:
self._next_next += _slot_len(x)
self._next_last = len(self)
return self.address + self._next_next
def describe(self, text, address = None):
if address is None:
address = self.next
self.descriptions[address] = text
def dump(self):
rv = []
addr = self.address
for i, data in enumerate(self):
off = None
line = '0x%04x:' % addr
if isinstance(data, (str, bytes)):
line += ' %16r' % data
elif isinstance(data, six.integer_types):
line += ' %#16x' % data
if self.address != 0 and self.address < data < self.next:
off = data - addr
else:
log.error("Don't know how to dump %r" % data)
desc = self.descriptions.get(addr, '')
if desc:
line += ' %s' % desc
if off is not None:
line += ' (+%#x)' % off
rv.append(line)
addr += _slot_len(data)
return '\n'.join(rv)
@python_2_bytes_compatible
class ROP(object):
r"""Class which simplifies the generation of ROP-chains.
Example:
.. code-block:: python
elf = ELF('ropasaurusrex')
rop = ROP(elf)
rop.read(0, elf.bss(0x80))
rop.dump()
# ['0x0000: 0x80482fc (read)',
# '0x0004: 0xdeadbeef',
# '0x0008: 0x0',
# '0x000c: 0x80496a8']
bytes(rop)
# '\xfc\x82\x04\x08\xef\xbe\xad\xde\x00\x00\x00\x00\xa8\x96\x04\x08'
>>> context.clear(arch = "i386", kernel = 'amd64')
>>> assembly = 'int 0x80; ret; add esp, 0x10; ret; pop eax; ret'
>>> e = ELF.from_assembly(assembly)
>>> e.symbols['funcname'] = e.entry + 0x1234
>>> r = ROP(e)
>>> r.funcname(1, 2)
>>> r.funcname(3)
>>> r.execve(4, 5, 6)
>>> print(r.dump())
0x0000: 0x10001234 funcname(1, 2)
0x0004: 0x10000003 <adjust @0x18> add esp, 0x10; ret
0x0008: 0x1 arg0
0x000c: 0x2 arg1
0x0010: b'eaaa' <pad>
0x0014: b'faaa' <pad>
0x0018: 0x10001234 funcname(3)
0x001c: 0x10000007 <adjust @0x24> pop eax; ret
0x0020: 0x3 arg0
0x0024: 0x10000007 pop eax; ret
0x0028: 0x77 [arg0] eax = SYS_sigreturn
0x002c: 0x10000000 int 0x80; ret
0x0030: 0x0 gs
0x0034: 0x0 fs
0x0038: 0x0 es
0x003c: 0x0 ds
0x0040: 0x0 edi
0x0044: 0x0 esi
0x0048: 0x0 ebp
0x004c: 0x0 esp
0x0050: 0x4 ebx
0x0054: 0x6 edx
0x0058: 0x5 ecx
0x005c: 0xb eax = SYS_execve
0x0060: 0x0 trapno
0x0064: 0x0 err
0x0068: 0x10000000 int 0x80; ret
0x006c: 0x23 cs
0x0070: 0x0 eflags
0x0074: 0x0 esp_at_signal
0x0078: 0x2b ss
0x007c: 0x0 fpstate
>>> r = ROP(e, 0x8048000)
>>> r.funcname(1, 2)
>>> r.funcname(3)
>>> r.execve(4, 5, 6)
>>> print(r.dump())
0x8048000: 0x10001234 funcname(1, 2)
0x8048004: 0x10000003 <adjust @0x8048018> add esp, 0x10; ret
0x8048008: 0x1 arg0
0x804800c: 0x2 arg1
0x8048010: b'eaaa' <pad>
0x8048014: b'faaa' <pad>
0x8048018: 0x10001234 funcname(3)
0x804801c: 0x10000007 <adjust @0x8048024> pop eax; ret
0x8048020: 0x3 arg0
0x8048024: 0x10000007 pop eax; ret
0x8048028: 0x77 [arg0] eax = SYS_sigreturn
0x804802c: 0x10000000 int 0x80; ret
0x8048030: 0x0 gs
0x8048034: 0x0 fs
0x8048038: 0x0 es
0x804803c: 0x0 ds
0x8048040: 0x0 edi
0x8048044: 0x0 esi
0x8048048: 0x0 ebp
0x804804c: 0x8048080 esp
0x8048050: 0x4 ebx
0x8048054: 0x6 edx
0x8048058: 0x5 ecx
0x804805c: 0xb eax = SYS_execve
0x8048060: 0x0 trapno
0x8048064: 0x0 err
0x8048068: 0x10000000 int 0x80; ret
0x804806c: 0x23 cs
0x8048070: 0x0 eflags
0x8048074: 0x0 esp_at_signal
0x8048078: 0x2b ss
0x804807c: 0x0 fpstate
>>> elf = ELF.from_assembly('ret')
>>> r = ROP(elf)
>>> r.ret.address == 0x10000000
True
>>> r = ROP(elf, badchars=b'\x00')
>>> r.gadgets == {}
True
>>> r.ret is None
True
"""
BAD_ATTRS = [
'trait_names', # ipython tab-complete
'download', # frequent typo
'upload', # frequent typo
]
X86_SUFFIXES = ['ax', 'bx', 'cx', 'dx', 'bp', 'sp', 'di', 'si',
'r8', 'r9', '10', '11', '12', '13', '14', '15']
def __init__(self, elfs, base = None, badchars = b'', **kwargs):
"""
Arguments:
elfs(list): List of :class:`.ELF` objects for mining
base(int): Stack address where the first byte of the ROP chain lies, if known.
badchars(str): Characters which should not appear in ROP gadget addresses.
"""
import ropgadget
# Permit singular ROP(elf) vs ROP([elf])
if isinstance(elfs, ELF):
elfs = [elfs]
elif isinstance(elfs, (bytes, six.text_type)):
elfs = [ELF(elfs)]
#: List of individual ROP gadgets, ROP calls, SROP frames, etc.
#: This is intended to be the highest-level abstraction that we can muster.
self._chain = []
#: List of ELF files which are available for mining gadgets
self.elfs = elfs
#: Stack address where the first byte of the ROP chain lies, if known.
self.base = base
#: Whether or not the ROP chain directly sets the stack pointer to a value
#: which is not contiguous
self.migrated = False
#: Characters which should not appear in ROP gadget addresses.
self._badchars = set(badchars)
self.__load()
@staticmethod
@LocalContext
def from_blob(blob, *a, **kw):
return ROP(ELF.from_bytes(blob, *a, **kw))
def setRegisters(self, registers):
"""
Returns an list of addresses/values which will set the specified register context.
Arguments:
registers(dict): Dictionary of ``{register name: value}``
Returns:
A list of tuples, ordering the stack.
Each tuple is in the form of ``(value, name)`` where ``value`` is either a
gadget address or literal value to go on the stack, and ``name`` is either
a string name or other item which can be "unresolved".
Note:
This is basically an implementation of the Set Cover Problem, which is
NP-hard. This means that we will take polynomial time N**2, where N is
the number of gadgets. We can reduce runtime by discarding useless and
inferior gadgets ahead of time.
"""
if not registers:
return []
regset = set(registers)
bad_instructions = set(('syscall', 'sysenter', 'int 0x80'))
# Collect all gadgets which use these registers
# Also collect the "best" gadget for each combination of registers
gadgets = []
best_gadgets = {}
for gadget in self.gadgets.values():
# Do not use gadgets which doesn't end with 'ret'
if gadget.insns[-1] != 'ret':
continue
# Do not use gadgets which contain 'syscall' or 'int'
if set(gadget.insns) & bad_instructions:
continue
touched = tuple(regset & set(gadget.regs))
if not touched:
continue
old = best_gadgets.get(touched, gadget)
# if we have a new gadget for the touched registers, choose it
# if the new gadget requires less stack space, choose it
# if both gadgets require same stack space, choose the one with less instructions
if (old is gadget) \
or (old.move > gadget.move) \
or (old.move == gadget.move and len(old.insns) > len(gadget.insns)):
best_gadgets[touched] = gadget
winner = None
budget = 999999999
for num_gadgets in range(len(registers)):
for combo in itertools.combinations(sorted(best_gadgets.values(), key=repr, reverse=True), 1+num_gadgets):
# Is this better than what we can already do?
cost = sum((g.move for g in combo))
if cost > budget:
continue
# Does it hit all of the registers we want?
coverage = set(sum((g.regs for g in combo), [])) & regset
if coverage != regset:
continue
# It is better than what we had, and hits all of the registers.
winner = combo
budget = cost
if not winner:
log.error("Could not satisfy setRegisters(%r)", registers)
# We have our set of "winner" gadgets, let's build a stack!
stack = []
for gadget in winner:
moved = context.bytes # Account for the gadget itself
goodregs = set(gadget.regs) & regset
name = ",".join(goodregs)
stack.append((gadget.address, gadget))
for r in gadget.regs:
moved += context.bytes
if r in registers:
stack.append((registers[r], r))
else:
stack.append((Padding('<pad %s>' % r), r))
for slot in range(moved, gadget.move, context.bytes):
left = gadget.move - slot
stack.append((Padding('<pad %#x>' % left), 'stack padding'))
return stack
def __call__(self, *args, **kwargs):
"""Set the given register(s)' by constructing a rop chain.
This is a thin wrapper around :meth:`setRegisters` which
actually executes the rop chain.
You can call this :class:`ROP` instance and provide keyword arguments,
or a dictionary.
Arguments:
regs(dict): Mapping of registers to values.
Can instead provide ``kwargs``.
>>> context.clear(arch='amd64')
>>> assembly = 'pop rax; pop rdi; pop rsi; ret; pop rax; ret;'
>>> e = ELF.from_assembly(assembly)
>>> r = ROP(e)
>>> r(rax=0xdead, rdi=0xbeef, rsi=0xcafe)
>>> print(r.dump())
0x0000: 0x10000000
0x0008: 0xdead
0x0010: 0xbeef
0x0018: 0xcafe
>>> r = ROP(e)
>>> r({'rax': 0xdead, 'rdi': 0xbeef, 'rsi': 0xcafe})
>>> print(r.dump())
0x0000: 0x10000000
0x0008: 0xdead
0x0010: 0xbeef
0x0018: 0xcafe
"""
if len(args) == 1 and isinstance(args[0], dict):
for value, _ in self.setRegisters(args[0]):
self.raw(value)
else:
self(kwargs)
def resolve(self, resolvable):
"""Resolves a symbol to an address
Arguments:
resolvable(str,int): Thing to convert into an address
Returns:
int containing address of 'resolvable', or None
"""
if isinstance(resolvable, str):
for elf in self.elfs:
if resolvable in elf.symbols:
return elf.symbols[resolvable]
if isinstance(resolvable, six.integer_types):
return resolvable
def unresolve(self, value):
"""Inverts 'resolve'. Given an address, it attempts to find a symbol
for it in the loaded ELF files. If none is found, it searches all
known gadgets, and returns the disassembly
Arguments:
value(int): Address to look up
Returns:
String containing the symbol name for the address, disassembly for a gadget
(if there's one at that address), or an empty string.
"""
for elf in self.elfs:
for name, addr in elf.symbols.items():
if addr == value:
return name
if value in self.gadgets:
return '; '.join(self.gadgets[value].insns)
return ''
def generatePadding(self, offset, count):
"""
Generates padding to be inserted into the ROP stack.
>>> context.clear(arch='i386')
>>> rop = ROP([])
>>> val = rop.generatePadding(5,15)
>>> cyclic_find(val[:4])
5
>>> len(val)
15
>>> rop.generatePadding(0,0)
b''
"""
# Ensure we don't generate a cyclic pattern which contains badchars
alphabet = b''.join(packing.p8(c) for c in bytearray(string.ascii_lowercase.encode()) if c not in self._badchars)
if count:
return cyclic(offset + count, alphabet=alphabet)[-count:]
return b''
def describe(self, object):
"""
Return a description for an object in the ROP stack
"""
if isinstance(object, enums):
return str(object)
if isinstance(object, six.integer_types):
return self.unresolve(object)
if isinstance(object, (bytes, six.text_type)):
return repr(object)
if isinstance(object, Gadget):
return '; '.join(object.insns)
def build(self, base = None, description = None):
"""
Construct the ROP chain into a list of elements which can be passed
to :func:`.flat`.
Arguments:
base(int):
The base address to build the rop-chain from. Defaults to
:attr:`base`.
description(dict):
Optional output argument, which will gets a mapping of
``address: description`` for each address on the stack,
starting at ``base``.
"""
if base is None:
base = self.base or 0
stack = DescriptiveStack(base)
chain = self._chain
#
# First pass
#
# Get everything onto the stack and save as much descriptive information
# as possible.
#
# The only replacements performed are to add stack adjustment gadgets
# (to move SP to the next gadget after a Call) and NextGadgetAddress,
# which can only be calculated in this pass.
#
iterable = enumerate(chain)
for idx, slot in iterable:
remaining = len(chain) - 1 - idx
address = stack.next
# Integers can just be added.
# Do our best to find out what the address is.
if isinstance(slot, six.integer_types):
stack.describe(self.describe(slot))
stack.append(slot)
# Byte blobs can also be added, however they must be
# broken down into pointer-width blobs.
elif isinstance(slot, (bytes, six.text_type)):
stack.describe(self.describe(slot))
if not isinstance(slot, bytes):
slot = slot.encode()
for chunk in lists.group(context.bytes, slot):
stack.append(chunk)
elif isinstance(slot, srop.SigreturnFrame):
stack.describe("Sigreturn Frame")
if slot.sp in (0, None) and self.base:
slot.sp = stack.next + len(slot)
registers = [slot.registers[i] for i in sorted(slot.registers.keys())]
for register in registers:
value = slot[register]
description = self.describe(value)
if description:
stack.describe('%s = %s' % (register, description))
else:
stack.describe('%s' % (register))
stack.append(value)
elif isinstance(slot, Call):
stack.describe(self.describe(slot))
registers = slot.register_arguments
for value, name in self.setRegisters(registers):
if name in registers:
index = slot.abi.register_arguments.index(name)
description = self.describe(value) or repr(value)
stack.describe('[arg%d] %s = %s' % (index, name, description))
elif isinstance(name, Gadget):
stack.describe('; '.join(name.insns))
elif isinstance(name, str):
stack.describe(name)
stack.append(value)
if address != stack.next:
stack.describe(slot.name)
stack.append(slot.target)
# For any remaining arguments, put them on the stack
stackArguments = slot.stack_arguments
for argument in slot.stack_arguments_before:
stack.describe("[dlresolve index]")
stack.append(argument)
nextGadgetAddr = stack.next + (context.bytes * len(stackArguments))
# Generally, stack-based arguments assume there's a return
# address on the stack.
#
# We need to at least put padding there so that things line up
# properly, but likely also need to adjust the stack past the
# arguments.
if slot.abi.returns:
# Save off the address of the next gadget
if remaining or stackArguments:
nextGadgetAddr = stack.next
# If there were arguments on the stack, we need to stick something
# in the slot where the return address goes.
if len(stackArguments) > 0:
if remaining:
fix_size = (1 + len(stackArguments))
fix_bytes = fix_size * context.bytes
adjust = self.search(move = fix_bytes)
if not adjust:
log.error("Could not find gadget to adjust stack by %#x bytes" % fix_bytes)
nextGadgetAddr += adjust.move
stack.describe('<adjust @%#x> %s' % (nextGadgetAddr, self.describe(adjust)))
stack.append(adjust.address)
for pad in range(fix_bytes, adjust.move, context.bytes):
stackArguments.append(Padding())
# We could not find a proper "adjust" gadget, but also didn't need one.
else:
stack.append(Padding("<return address>"))
for i, argument in enumerate(stackArguments):
if isinstance(argument, NextGadgetAddress):
stack.describe("<next gadget>")
stack.append(nextGadgetAddr)
else:
description = self.describe(argument) or 'arg%i' % (i + len(registers))
stack.describe(description)
stack.append(argument)
else:
stack.append(slot)
#
# Second pass
#
# All of the register-loading, stack arguments, and call addresses
# are on the stack. We can now start loading in absolute addresses.
#
start = base
end = stack.next
size = (stack.next - base)
slot_address = base
for i, slot in enumerate(stack):
if isinstance(slot, six.integer_types):
pass
elif isinstance(slot, (bytes, six.text_type)):
pass
elif isinstance(slot, AppendedArgument):
stack[i] = stack.next
stack.extend(slot.resolve(stack.next))
elif isinstance(slot, CurrentStackPointer):
stack[i] = slot_address
elif isinstance(slot, Padding):
stack[i] = self.generatePadding(i * context.bytes, context.bytes)
stack.describe(slot.name, slot_address)
elif isinstance(slot, Gadget):
stack[i] = slot.address
stack.describe(self.describe(slot), slot_address)
# Everything else we can just leave in place.
# Maybe the user put in something on purpose?
# Also, it may work in pwnlib.util.packing.flat()
else:
pass
slot_address += _slot_len(slot)
return stack
def find_stack_adjustment(self, slots):
self.search(move=slots * context.bytes)
def chain(self, base=None):
"""Build the ROP chain
Arguments:
base(int):
The base address to build the rop-chain from. Defaults to
:attr:`base`.
Returns:
str containing raw ROP bytes
"""
return packing.flat(self.build(base=base))
def dump(self, base=None):
"""Dump the ROP chain in an easy-to-read manner
Arguments:
base(int):
The base address to build the rop-chain from. Defaults to
:attr:`base`.
"""
return self.build(base=base).dump()
def regs(self, registers=None, **kw):
if registers is None:
registers = {}
registers.update(kw)
def call(self, resolvable, arguments = (), abi = None, **kwargs):
"""Add a call to the ROP chain
Arguments:
resolvable(str,int): Value which can be looked up via 'resolve',
or is already an integer.
arguments(list): List of arguments which can be passed to pack().
Alternately, if a base address is set, arbitrarily nested
structures of strings or integers can be provided.
"""
if self.migrated:
log.error('Cannot append to a migrated chain')
# If we can find a function with that name, just call it
if isinstance(resolvable, str):
addr = self.resolve(resolvable)
elif hasattr(resolvable, 'name') and hasattr(resolvable, 'address'):
addr = resolvable.address
resolvable = str(resolvable.name)
else:
addr = resolvable
resolvable = ''
if addr:
self.raw(Call(resolvable, addr, arguments, abi))
# Otherwise, if it is a syscall we might be able to call it
elif not self._srop_call(resolvable, arguments):
log.error('Could not resolve %r.' % resolvable)
def _srop_call(self, resolvable, arguments):
# Check that the call is a valid syscall
resolvable = 'SYS_' + resolvable.lower()
syscall_number = getattr(constants, resolvable, None)
if syscall_number is None:
return False
log.info_once("Using sigreturn for %r" % resolvable)
# Find an int 0x80 or similar instruction we can use
syscall_gadget = None
syscall_instructions = srop.syscall_instructions[context.arch]
for instruction in syscall_instructions:
syscall_gadget = self.find_gadget([instruction])
if syscall_gadget:
break
else:
log.error("Could not find any instructions in %r" % syscall_instructions)
# Generate the SROP frame which would invoke the syscall
with context.local(arch=self.elfs[0].arch):
frame = srop.SigreturnFrame()
frame.pc = syscall_gadget
frame.syscall = syscall_number
try:
SYS_sigreturn = constants.SYS_sigreturn
except AttributeError:
SYS_sigreturn = constants.SYS_rt_sigreturn
for register, value in zip(frame.arguments, arguments):
if not isinstance(value, six.integer_types + (Unresolved,)):
frame[register] = AppendedArgument(value)
else:
frame[register] = value
# Set up a call frame which will set EAX and invoke the syscall
call = Call('SYS_sigreturn',
syscall_gadget,
[SYS_sigreturn],
abi.ABI.sigreturn())
self.raw(call)
self.raw(frame)
# We do not expect to ever recover after the syscall, as it would
# require something like 'int 0x80; ret' which does not ever occur
# in the wild.
self.migrated = True
return True
def find_gadget(self, instructions):
"""
Returns a gadget with the exact sequence of instructions specified
in the ``instructions`` argument.
"""
n = len(instructions)
for gadget in self.gadgets.values():
if tuple(gadget.insns)[:n] == tuple(instructions):
return gadget
def raw(self, value):
"""Adds a raw integer or string to the ROP chain.
If your architecture requires aligned values, then make
sure that any given string is aligned!
Arguments:
data(int/bytes): The raw value to put onto the rop chain.
>>> context.clear(arch='i386')
>>> rop = ROP([])
>>> rop.raw('AAAAAAAA')
>>> rop.raw('BBBBBBBB')
>>> rop.raw('CCCCCCCC')
>>> print(rop.dump())
0x0000: b'AAAA' 'AAAAAAAA'
0x0004: b'AAAA'
0x0008: b'BBBB' 'BBBBBBBB'
0x000c: b'BBBB'
0x0010: b'CCCC' 'CCCCCCCC'
0x0014: b'CCCC'
"""
if self.migrated:
log.error('Cannot append to a migrated chain')
self._chain.append(value)
def migrate(self, next_base):
"""Explicitly set $sp, by using a ``leave; ret`` gadget"""
if isinstance(next_base, ROP):
next_base = next_base.base
pop_sp = self.rsp or self.esp
pop_bp = self.rbp or self.ebp
leave = self.leave
if pop_sp and len(pop_sp.regs) == 1:
self.raw(pop_sp)
self.raw(next_base)
elif pop_bp and leave and len(pop_bp.regs) == 1:
self.raw(pop_bp)
self.raw(next_base - context.bytes)
self.raw(leave)
else:
log.error('Cannot find the gadgets to migrate')
self.migrated = True
def __bytes__(self):
"""Returns: Raw bytes of the ROP chain"""
return self.chain()
def __flat__(self):
return self.chain()
def __flat_at__(self, address):
return self.chain(address)
def __get_cachefile_name(self, files):
"""Given an ELF or list of ELF objects, return a cache file for the set of files"""
cachedir = os.path.join(context.cache_dir, 'rop-cache')
if not os.path.exists(cachedir):
os.mkdir(cachedir)
if isinstance(files, ELF):
files = [files]
sha256 = hashlib.sha256()
for elf_data in sorted(elf.get_data() for elf in files):
sha256.update(elf_data)
return os.path.join(cachedir, sha256.hexdigest())
@staticmethod
def clear_cache():
"""Clears the ROP gadget cache"""
cachedir = os.path.join(context.cache_dir, 'rop-cache')
shutil.rmtree(cachedir)
def __cache_load(self, elf):
filename = self.__get_cachefile_name(elf)
if not os.path.exists(filename):
return None
gadgets = eval(open(filename).read())
gadgets = {k - elf.load_addr + elf.address:v for k, v in gadgets.items()}
log.info_once('Loaded %s cached gadgets for %r', len(gadgets), elf.file.name)
return gadgets
def __cache_save(self, elf, data):
data = {k + elf.load_addr - elf.address:v for k, v in data.items()}
open(self.__get_cachefile_name(elf), 'w+').write(repr(data))
def __load(self):
"""Load all ROP gadgets for the selected ELF files"""
#
# We accept only instructions that look like these.
#
# - leave
# - pop reg
# - add $sp, <hexadecimal value>
# - ret
#
# Currently, ROPgadget does not detect multi-byte "C2" ret.
# https://github.com/JonathanSalwan/ROPgadget/issues/53
#
pop = re.compile(r'^pop (.{2,3})')
add = re.compile(r'^add [er]sp, ((?:0[xX])?[0-9a-fA-F]+)$')
ret = re.compile(r'^ret$')
leave = re.compile(r'^leave$')
int80 = re.compile(r'int +0x80')
syscall = re.compile(r'^syscall$')
sysenter = re.compile(r'^sysenter$')
#
# Validation routine
#
# >>> valid('pop eax')
# True
# >>> valid('add rax, 0x24')
# False
# >>> valid('add esp, 0x24')
# True
# >>> valid('add esp, esi')
# False
#
valid = lambda insn: any(map(lambda pattern: pattern.match(insn), [pop,add,ret,leave,int80,syscall,sysenter]))
#
# Currently, ropgadget.args.Args() doesn't take any arguments, and pulls
# only from sys.argv. Preserve it through this call. We also
# monkey-patch sys.stdout to suppress output from ropgadget.
#
argv = sys.argv
stdout = sys.stdout
class Wrapper:
def __init__(self, fd):
self._fd = fd
def write(self, s):
pass
def __getattr__(self, k):
return self._fd.__getattribute__(k)
gadgets = {}
for elf in self.elfs:
cache = self.__cache_load(elf)
if cache:
gadgets.update(cache)
continue
log.info_once('Loading gadgets for %r' % elf.path)
try:
sys.stdout = Wrapper(sys.stdout)
import ropgadget
sys.argv = ['ropgadget', '--binary', elf.path, '--only', 'sysenter|syscall|int|add|pop|leave|ret', '--nojop', '--multibr']
args = ropgadget.args.Args().getArgs()
core = ropgadget.core.Core(args)
core.do_binary(elf.path)
core.do_load(0)
finally:
sys.argv = argv
sys.stdout = stdout
elf_gadgets = {}
for gadget in core._Core__gadgets:
address = gadget['vaddr'] - elf.load_addr + elf.address
insns = [ g.strip() for g in gadget['gadget'].split(';') ]
if all(map(valid, insns)):
elf_gadgets[address] = insns
self.__cache_save(elf, elf_gadgets)
gadgets.update(elf_gadgets)
#
# For each gadget we decided to keep, find out how much it moves the stack,
# and log which registers it modifies.
#
self.gadgets = {}
self.pivots = {}
frame_regs = {
4: ['ebp', 'esp'],
8: ['rbp', 'rsp']
}[context.bytes]
for addr, insns in gadgets.items():
# Filter out gadgets by address against badchars
if set(pack(addr)) & self._badchars:
continue
sp_move = 0
regs = []
for insn in insns:
if pop.match(insn):
regs.append(pop.match(insn).group(1))
sp_move += context.bytes
elif add.match(insn):
sp_move += int(add.match(insn).group(1), 16)
elif ret.match(insn):
sp_move += context.bytes
elif leave.match(insn):
#
# HACK: Since this modifies ESP directly, this should
# never be returned as a 'normal' ROP gadget that
# simply 'increments' the stack.
#
# As such, the 'move' is set to a very large value,
# to prevent .search() from returning it unless $sp
# is specified as a register.
#
sp_move += 9999999999
regs += frame_regs
# Permit duplicates, because blacklisting bytes in the gadget
# addresses may result in us needing the dupes.
self.gadgets[addr] = Gadget(addr, insns, regs, sp_move)
# Don't use 'pop esp' for pivots
if not set(['rsp', 'esp']) & set(regs):
self.pivots[sp_move] = addr
leave = self.search(regs=frame_regs, order='regs')
if leave and leave.regs != frame_regs:
leave = None
self.leave = leave
def __repr__(self):
return 'ROP(%r)' % self.elfs
def search_iter(self, move=None, regs=None):
"""
Iterate through all gadgets which move the stack pointer by
*at least* ``move`` bytes, and which allow you to set all
registers in ``regs``.
"""
move = move or 0
regs = set(regs or ())
for addr, gadget in self.gadgets.items():
addr_bytes = set(pack(gadget.address))
if addr_bytes & self._badchars: continue
if gadget.insns[-1] != 'ret': continue
if gadget.move < move: continue
if not (regs <= set(gadget.regs)): continue
yield gadget
def search(self, move = 0, regs = None, order = 'size'):
"""Search for a gadget which matches the specified criteria.
Arguments:
move(int): Minimum number of bytes by which the stack
pointer is adjusted.
regs(list): Minimum list of registers which are popped off the
stack.
order(str): Either the string 'size' or 'regs'. Decides how to
order multiple gadgets the fulfill the requirements.
The search will try to minimize the number of bytes popped more than
requested, the number of registers touched besides the requested and
the address.
If ``order == 'size'``, then gadgets are compared lexicographically
by ``(total_moves, total_regs, addr)``, otherwise by ``(total_regs, total_moves, addr)``.
Returns:
A :class:`.Gadget` object
"""
matches = self.search_iter(move, regs)
if matches is None:
return None
# Search for an exact match, save the closest match
key = {
'size': lambda g: (g.move, len(g.regs), g.address),
'regs': lambda g: (len(g.regs), g.move, g.address)
}[order]
try:
result = min(matches, key=key)
except ValueError:
return None
# Check for magic 9999999... value used by 'leave; ret'
if move and result.move == 9999999999:
return None
return result
def ret2csu(self, edi=Padding('edi'), rsi=Padding('rsi'),
rdx=Padding('rdx'), rbx=Padding('rbx'), rbp=Padding('rbp'),
r12=Padding('r12'), r13=Padding('r13'), r14=Padding('r14'),
r15=Padding('r15'), call=None):
"""Build a ret2csu ROPchain
Arguments:
edi, rsi, rdx: Three primary registers to populate
rbx, rbp, r12, r13, r14, r15: Optional registers to populate
call: Pointer to the address of a function to call during
second gadget. If None then use the address of _fini in the
.dynamic section. .got.plt entries are a good target. Required
for PIE binaries.
Test:
>>> context.clear(binary=pwnlib.data.elf.ret2dlresolve.get("amd64"))
>>> r = ROP(context.binary)
>>> r.ret2csu(1, 2, 3, 4, 5, 6, 7, 8, 9)
>>> r.call(0xdeadbeef)
>>> print(r.dump())
0x0000: 0x40058a
0x0008: 0x0
0x0010: 0x1
0x0018: 0x600e48
0x0020: 0x1
0x0028: 0x2
0x0030: 0x3
0x0038: 0x400570
0x0040: b'qaaaraaa' <add rsp, 8>
0x0048: 0x4
0x0050: 0x5
0x0058: 0x6
0x0060: 0x7
0x0068: 0x8
0x0070: 0x9
0x0078: 0xdeadbeef 0xdeadbeef()
>>> open('core','w').close(); os.unlink('core') # remove any old core file for the tests
>>> p = process()
>>> p.send(fit({64+context.bytes: r}))
>>> p.wait(0.5)
>>> core = p.corefile
>>> hex(core.pc)
'0xdeadbeef'
>>> core.rdi, core.rsi, core.rdx, core.rbx, core.rbp, core.r12, core.r13, core.r14, core.r15
(1, 2, 3, 4, 5, 6, 7, 8, 9)
"""
if self.migrated:
log.error('Cannot append to a migrated chain')
# Ensure 'edi' argument is packable
try:
packing.p32(edi)
except struct.error:
log.error('edi must be a 32bit value')
# Find an appropriate, non-library ELF.
# Prioritise non-PIE binaries so we can use _fini
exes = (elf for elf in self.elfs if not elf.library and elf.bits == 64)
if not exes:
log.error('No non-library binaries in [elfs]')
nonpie = csu = None
for elf in exes:
if not elf.pie:
if '__libc_csu_init' in elf.symbols:
break
nonpie = elf
elif '__libc_csu_init' in elf.symbols:
csu = elf
if elf.pie:
if nonpie:
elf = nonpie
elif csu:
elf = csu
from .ret2csu import ret2csu
ret2csu(self, elf, edi, rsi, rdx, rbx, rbp, r12, r13, r14, r15, call)
def ret2dlresolve(self, dlresolve):
elf = next(elf for elf in self.elfs if elf.get_section_by_name(".plt"))
elf_base = elf.address if elf.pie else 0
plt_init = elf.get_section_by_name(".plt").header.sh_addr + elf_base
log.debug("PLT_INIT: %#x", plt_init)
reloc_index = dlresolve.reloc_index
real_args = dlresolve.real_args
call = Call("[plt_init] " + dlresolve.symbol.decode(),
plt_init,
dlresolve.real_args,
before=[reloc_index])
self.raw(call)
def __getattr__(self, attr):
"""Helper to make finding ROP gadgets easier.
Also provides a shorthand for ``.call()``:
``rop.function(args)`` is equivalent to ``rop.call(function, args)``
>>> context.clear(arch='i386')
>>> elf=ELF(which('bash'))
>>> rop=ROP([elf])
>>> rop.rdi == rop.search(regs=['rdi'], order = 'regs')
True
>>> rop.r13_r14_r15_rbp == rop.search(regs=['r13','r14','r15','rbp'], order = 'regs')
True
>>> rop.ret_8 == rop.search(move=8)
True
>>> rop.ret is not None
True
>>> with context.local(arch='amd64', bits='64'):
... r = ROP(ELF.from_assembly('syscall; ret'))
>>> r.syscall is not None
True
"""
gadget = collections.namedtuple('gadget', ['address', 'details'])
if attr in self.__dict__ \
or attr in self.BAD_ATTRS \
or attr.startswith('_'):
raise AttributeError('ROP instance has no attribute %r' % attr)
#
# Check for 'ret' or 'ret_X'
#
if attr.startswith('ret'):
count = context.bytes
if '_' in attr:
count = int(attr.split('_')[1])
return self.search(move=count)
#
# Check for 'jmp_esp'('i386') or 'jmp_rsp'('amd64')
#
if attr == 'jmp_esp' and context.arch == 'i386' \
or attr == 'jmp_rsp' and context.arch == 'amd64':
jmp_sp = {'i386': 'jmp esp',
'amd64': 'jmp rsp'
}[context.arch]
insn_asm = b'\xff\xe4'
for elf in self.elfs:
for addr in elf.search(insn_asm, executable = True):
if set(pack(addr)) & self._badchars:
continue
return Gadget(addr, [jmp_sp], [], context.bytes)
return None
mapping = {'int80': 'int 0x80',
'syscall': 'syscall',
'sysenter': 'sysenter'}
if attr in mapping:
for each in self.gadgets:
if self.gadgets[each]['insns'][0] == mapping[attr]:
return gadget(each, self.gadgets[each])
return None
#
# Check for a '_'-delimited list of registers
#
if all(map(lambda x: x[-2:] in self.X86_SUFFIXES, attr.split('_'))):
return self.search(regs=attr.split('_'), order='regs')
#
# Otherwise, assume it's a rop.call() shorthand
#
def call(*args):
return self.call(attr, args)
return call
def __setattr__(self, attr, value):
"""Helper for setting registers.
This convenience feature allows one to set the values of registers
with simple python assignment syntax.
Warning:
Only one register is set at a time (one per rop chain).
This may lead to some previously set to registers be overwritten!
Note:
If you would like to set multiple registers in as few rop chains
as possible, see :meth:`__call__`.
>>> context.clear(arch='amd64')
>>> assembly = 'pop rax; pop rdi; pop rsi; ret; pop rax; ret;'
>>> e = ELF.from_assembly(assembly)
>>> r = ROP(e)
>>> r.rax = 0xdead
>>> r.rdi = 0xbeef
>>> r.rsi = 0xcafe
>>> print(r.dump())
0x0000: 0x10000004 pop rax; ret
0x0008: 0xdead
0x0010: 0x10000001 pop rdi; pop rsi; ret
0x0018: 0xbeef
0x0020: b'iaaajaaa' <pad rsi>
0x0028: 0x10000002 pop rsi; ret
0x0030: 0xcafe
"""
if attr in self.BAD_ATTRS:
raise AttributeError('ROP instance has no attribute %r' % attr)
if attr[-2:] in self.X86_SUFFIXES: # handle setting registers
self({attr: value})
# Otherwise, perform usual setting
self.__dict__[attr] = value
| 34.680786 | 138 | 0.540667 |
06b63c495c956521552e6540dae0cff7ed3f5723 | 2,239 | py | Python | src/tests/unittests/data_set/test_memory_data_set_io_reader.py | QuTech-Delft/qilib | a87892f8a9977ed338c36e8fb1e262b47449cf44 | [
"MIT"
] | 1 | 2019-02-20T16:56:30.000Z | 2019-02-20T16:56:30.000Z | src/tests/unittests/data_set/test_memory_data_set_io_reader.py | QuTech-Delft/qilib | a87892f8a9977ed338c36e8fb1e262b47449cf44 | [
"MIT"
] | 22 | 2019-02-16T06:10:55.000Z | 2022-02-15T18:52:34.000Z | src/tests/unittests/data_set/test_memory_data_set_io_reader.py | QuTech-Delft/qilib | a87892f8a9977ed338c36e8fb1e262b47449cf44 | [
"MIT"
] | 2 | 2020-02-04T08:46:21.000Z | 2020-10-18T16:31:58.000Z | import unittest
from unittest.mock import MagicMock
from qilib.data_set import DataSet, DataArray
from qilib.data_set.memory_data_set_io_reader import MemoryDataSetIOReader
from qilib.utils.memory_storage_queue import MemoryStorageQueue
class TestMemoryDataSetIOReader(unittest.TestCase):
def test_sync_from_storage(self):
self._test_sync_from_storage(-1)
def test_load_is_not_implemented(self):
error_args = (NotImplementedError, 'The load function cannot be used with the MemoryDataSetIOReader!')
self.assertRaisesRegex(*error_args, MemoryDataSetIOReader.load)
def test_sync_from_storage_with_timeout(self):
self._test_sync_from_storage(0.01)
def test_read_from_empty_queue_timeout(self):
timeout = 0.001
queue = MemoryStorageQueue()
data_set_io_reader = MemoryDataSetIOReader(queue)
data_set = MagicMock(spec=DataSet)
data_set_io_reader.bind_data_set(data_set)
error_args = (TimeoutError, '')
self.assertRaisesRegex(*error_args, data_set_io_reader.sync_from_storage, timeout)
def test_sync_from_storage_none_blocking(self):
self._test_sync_from_storage(0)
def test_read_from_empty_queue(self):
timeout = 0
queue = MemoryStorageQueue()
data_set_io_reader = MemoryDataSetIOReader(queue)
data_set = MagicMock(spec=DataSet)
data_set_io_reader.bind_data_set(data_set)
data_set_io_reader.sync_from_storage(timeout)
data_set.add_array.assert_not_called()
data_set.add_data.assert_not_called()
def _test_sync_from_storage(self, timeout):
queue = MemoryStorageQueue()
data_set_io_reader = MemoryDataSetIOReader(queue)
data_set = MagicMock(spec=DataSet)
data_set_io_reader.bind_data_set(data_set)
data_array = DataArray(name='bla', label='blu', shape=(2, 2))
queue.add_array(data_array)
queue.add_data(4, {'z': [42]})
queue.add_meta_data('name', 'bob')
data_set_io_reader.sync_from_storage(timeout)
self.assertEqual('bob', data_set.name)
data_set.add_data.assert_called_once_with(4, {'z': [42]})
data_set.add_array.assert_called_once_with(data_array)
| 39.280702 | 110 | 0.73247 |
c8bac33614c72b287299707efa487ca0050298be | 20,816 | py | Python | python_modules/libraries/dagster-airflow/dagster_airflow/factory.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-airflow/dagster_airflow/factory.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-airflow/dagster_airflow/factory.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | import datetime
import os
import re
from collections import namedtuple
from airflow import DAG
from airflow.models.baseoperator import BaseOperator
from dagster_airflow.operators.util import check_storage_specified
from dagster import check, seven
from dagster.core.definitions.reconstruct import ReconstructableRepository
from dagster.core.execution.api import create_execution_plan
from dagster.core.instance import DagsterInstance, is_dagster_home_set
from dagster.core.instance.ref import InstanceRef
from dagster.core.snap import ExecutionPlanSnapshot, PipelineSnapshot, snapshot_from_execution_plan
from dagster.utils.backcompat import canonicalize_backcompat_args
from .compile import coalesce_execution_steps
from .operators.docker_operator import DagsterDockerOperator
from .operators.python_operator import DagsterPythonOperator
DEFAULT_ARGS = {
"depends_on_past": False,
"email": ["airflow@example.com"],
"email_on_failure": False,
"email_on_retry": False,
"owner": "airflow",
"retries": 1,
"retry_delay": datetime.timedelta(0, 300),
"start_date": datetime.datetime(1900, 1, 1, 0, 0),
}
# Airflow DAG names are not allowed to be longer than 250 chars
AIRFLOW_MAX_DAG_NAME_LEN = 250
def _make_dag_description(pipeline_name):
return """Editable scaffolding autogenerated by dagster-airflow from pipeline {pipeline_name}
""".format(
pipeline_name=pipeline_name
)
def _rename_for_airflow(name):
"""Modify pipeline name for Airflow to meet constraints on DAG names:
https://github.com/apache/airflow/blob/1.10.3/airflow/utils/helpers.py#L52-L63
Here, we just substitute underscores for illegal characters to avoid imposing Airflow's
constraints on our naming schemes.
"""
return re.sub(r"[^\w\-\.]", "_", name)[:AIRFLOW_MAX_DAG_NAME_LEN]
class DagsterOperatorInvocationArgs(
namedtuple(
"DagsterOperatorInvocationArgs",
"recon_repo pipeline_name run_config mode step_keys instance_ref pipeline_snapshot "
"execution_plan_snapshot parent_pipeline_snapshot",
)
):
def __new__(
cls,
recon_repo,
pipeline_name,
run_config,
mode,
step_keys,
instance_ref,
pipeline_snapshot,
execution_plan_snapshot,
parent_pipeline_snapshot,
):
return super(DagsterOperatorInvocationArgs, cls).__new__(
cls,
recon_repo=recon_repo,
pipeline_name=pipeline_name,
run_config=run_config,
mode=mode,
step_keys=step_keys,
instance_ref=instance_ref,
pipeline_snapshot=pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=parent_pipeline_snapshot,
)
class DagsterOperatorParameters(
namedtuple(
"_DagsterOperatorParameters",
(
"recon_repo pipeline_name run_config "
"mode task_id step_keys dag instance_ref op_kwargs pipeline_snapshot "
"execution_plan_snapshot parent_pipeline_snapshot"
),
)
):
def __new__(
cls,
pipeline_name,
task_id,
recon_repo=None,
run_config=None,
mode=None,
step_keys=None,
dag=None,
instance_ref=None,
op_kwargs=None,
pipeline_snapshot=None,
execution_plan_snapshot=None,
parent_pipeline_snapshot=None,
):
pipeline_def = recon_repo.get_definition().get_pipeline(pipeline_name)
if mode is None:
mode = pipeline_def.get_default_mode_name()
mode_def = pipeline_def.get_mode_definition(mode)
check_storage_specified(pipeline_def, mode_def)
return super(DagsterOperatorParameters, cls).__new__(
cls,
recon_repo=check.opt_inst_param(recon_repo, "recon_repo", ReconstructableRepository),
pipeline_name=check.str_param(pipeline_name, "pipeline_name"),
run_config=check.opt_dict_param(run_config, "run_config", key_type=str),
mode=check.opt_str_param(mode, "mode"),
task_id=check.str_param(task_id, "task_id"),
step_keys=check.opt_list_param(step_keys, "step_keys", of_type=str),
dag=check.opt_inst_param(dag, "dag", DAG),
instance_ref=check.opt_inst_param(instance_ref, "instance_ref", InstanceRef),
op_kwargs=check.opt_dict_param(op_kwargs.copy(), "op_kwargs", key_type=str),
pipeline_snapshot=check.inst_param(
pipeline_snapshot, "pipeline_snapshot", PipelineSnapshot
),
execution_plan_snapshot=check.inst_param(
execution_plan_snapshot, "execution_plan_snapshot", ExecutionPlanSnapshot
),
parent_pipeline_snapshot=check.opt_inst_param(
parent_pipeline_snapshot, "parent_pipeline_snapshot", PipelineSnapshot
),
)
@property
def invocation_args(self):
return DagsterOperatorInvocationArgs(
recon_repo=self.recon_repo,
pipeline_name=self.pipeline_name,
run_config=self.run_config,
mode=self.mode,
step_keys=self.step_keys,
instance_ref=self.instance_ref,
pipeline_snapshot=self.pipeline_snapshot,
execution_plan_snapshot=self.execution_plan_snapshot,
parent_pipeline_snapshot=self.parent_pipeline_snapshot,
)
def _make_airflow_dag(
recon_repo,
job_name,
run_config=None,
mode=None,
instance=None,
dag_id=None,
dag_description=None,
dag_kwargs=None,
op_kwargs=None,
operator=DagsterPythonOperator,
):
check.inst_param(recon_repo, "recon_repo", ReconstructableRepository)
check.str_param(job_name, "job_name")
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
mode = check.opt_str_param(mode, "mode")
# Default to use the (persistent) system temp directory rather than a TemporaryDirectory,
# which would not be consistent between Airflow task invocations.
if instance is None:
if is_dagster_home_set():
instance = DagsterInstance.get()
else:
instance = DagsterInstance.local_temp(tempdir=seven.get_system_temp_directory())
check.inst_param(instance, "instance", DagsterInstance)
# Only used for Airflow; internally we continue to use pipeline.name
dag_id = check.opt_str_param(dag_id, "dag_id", _rename_for_airflow(job_name))
dag_description = check.opt_str_param(
dag_description, "dag_description", _make_dag_description(job_name)
)
check.class_param(operator, "operator", superclass=BaseOperator)
dag_kwargs = dict(
{"default_args": DEFAULT_ARGS},
**check.opt_dict_param(dag_kwargs, "dag_kwargs", key_type=str),
)
op_kwargs = check.opt_dict_param(op_kwargs, "op_kwargs", key_type=str)
dag = DAG(dag_id=dag_id, description=dag_description, **dag_kwargs)
pipeline = recon_repo.get_definition().get_pipeline(job_name)
if mode is None:
mode = pipeline.get_default_mode_name()
execution_plan = create_execution_plan(pipeline, run_config, mode=mode)
tasks = {}
coalesced_plan = coalesce_execution_steps(execution_plan)
for solid_handle, solid_steps in coalesced_plan.items():
step_keys = [step.key for step in solid_steps]
operator_parameters = DagsterOperatorParameters(
recon_repo=recon_repo,
pipeline_name=job_name,
run_config=run_config,
mode=mode,
task_id=solid_handle,
step_keys=step_keys,
dag=dag,
instance_ref=instance.get_ref(),
op_kwargs=op_kwargs,
pipeline_snapshot=pipeline.get_pipeline_snapshot(),
execution_plan_snapshot=snapshot_from_execution_plan(
execution_plan, pipeline_snapshot_id=pipeline.get_pipeline_snapshot_id()
),
)
task = operator(operator_parameters)
tasks[solid_handle] = task
for solid_step in solid_steps:
for step_input in solid_step.step_inputs:
for key in step_input.dependency_keys:
prev_solid_handle = execution_plan.get_step_by_key(key).solid_handle.to_string()
if solid_handle != prev_solid_handle:
tasks[prev_solid_handle].set_downstream(task)
return (dag, [tasks[solid_handle] for solid_handle in coalesced_plan.keys()])
def make_airflow_dag(
module_name,
job_name,
run_config=None,
mode=None,
instance=None,
dag_id=None,
dag_description=None,
dag_kwargs=None,
op_kwargs=None,
pipeline_name=None,
):
"""Construct an Airflow DAG corresponding to a given Dagster job/pipeline.
Tasks in the resulting DAG will execute the Dagster logic they encapsulate as a Python
callable, run by an underlying :py:class:`PythonOperator <airflow:PythonOperator>`. As a
consequence, both dagster, any Python dependencies required by your solid logic, and the module
containing your pipeline definition must be available in the Python environment within which
your Airflow tasks execute. If you cannot install requirements into this environment, or you
are looking for a containerized solution to provide better isolation, see instead
:py:func:`make_airflow_dag_containerized`.
This function should be invoked in an Airflow DAG definition file, such as that created by an
invocation of the dagster-airflow scaffold CLI tool.
Args:
module_name (str): The name of the importable module in which the pipeline/job definition can be
found.
job_name (str): The name of the job definition.
run_config (Optional[dict]): The config, if any, with which to compile
the pipeline/job to an execution plan, as a Python dict.
mode (Optional[str]): The mode in which to execute the pipeline.
instance (Optional[DagsterInstance]): The Dagster instance to use to execute the pipeline/job.
dag_id (Optional[str]): The id to use for the compiled Airflow DAG (passed through to
:py:class:`DAG <airflow:airflow.models.DAG>`).
dag_description (Optional[str]): The description to use for the compiled Airflow DAG
(passed through to :py:class:`DAG <airflow:airflow.models.DAG>`)
dag_kwargs (Optional[dict]): Any additional kwargs to pass to the Airflow
:py:class:`DAG <airflow:airflow.models.DAG>` constructor, including ``default_args``.
op_kwargs (Optional[dict]): Any additional kwargs to pass to the underlying Airflow
operator (a subclass of
:py:class:`PythonOperator <airflow:airflow.operators.python_operator.PythonOperator>`).
pipeline_name (str): (legacy) The name of the pipeline definition.
Returns:
(airflow.models.DAG, List[airflow.models.BaseOperator]): The generated Airflow DAG, and a
list of its constituent tasks.
"""
check.str_param(module_name, "module_name")
job_name = canonicalize_backcompat_args(
new_val=job_name,
new_arg="job_name",
old_val=pipeline_name,
old_arg="pipeline_name",
breaking_version="future versions",
coerce_old_to_new=lambda val: val,
)
recon_repo = ReconstructableRepository.for_module(module_name, job_name, os.getcwd())
return _make_airflow_dag(
recon_repo=recon_repo,
job_name=job_name,
run_config=run_config,
mode=mode,
instance=instance,
dag_id=dag_id,
dag_description=dag_description,
dag_kwargs=dag_kwargs,
op_kwargs=op_kwargs,
)
def make_airflow_dag_for_operator(
recon_repo,
job_name,
operator,
run_config=None,
mode=None,
dag_id=None,
dag_description=None,
dag_kwargs=None,
op_kwargs=None,
pipeline_name=None,
):
"""Construct an Airflow DAG corresponding to a given Dagster job/pipeline and custom operator.
`Custom operator template <https://github.com/dagster-io/dagster/blob/master/python_modules/dagster-test/dagster_test/dagster_airflow/custom_operator.py>`_
Tasks in the resulting DAG will execute the Dagster logic they encapsulate run by the given
Operator :py:class:`BaseOperator <airflow.models.BaseOperator>`. If you
are looking for a containerized solution to provide better isolation, see instead
:py:func:`make_airflow_dag_containerized`.
This function should be invoked in an Airflow DAG definition file, such as that created by an
invocation of the dagster-airflow scaffold CLI tool.
Args:
recon_repo (:class:`dagster.ReconstructableRepository`): reference to a Dagster RepositoryDefinition
that can be reconstructed in another process
job_name (str): The name of the job definition.
operator (type): The operator to use. Must be a class that inherits from
:py:class:`BaseOperator <airflow.models.BaseOperator>`
run_config (Optional[dict]): The config, if any, with which to compile
the pipeline to an execution plan, as a Python dict.
mode (Optional[str]): The mode in which to execute the pipeline.
instance (Optional[DagsterInstance]): The Dagster instance to use to execute the pipeline.
dag_id (Optional[str]): The id to use for the compiled Airflow DAG (passed through to
:py:class:`DAG <airflow:airflow.models.DAG>`).
dag_description (Optional[str]): The description to use for the compiled Airflow DAG
(passed through to :py:class:`DAG <airflow:airflow.models.DAG>`)
dag_kwargs (Optional[dict]): Any additional kwargs to pass to the Airflow
:py:class:`DAG <airflow:airflow.models.DAG>` constructor, including ``default_args``.
op_kwargs (Optional[dict]): Any additional kwargs to pass to the underlying Airflow
operator.
pipeline_name (str): (legacy) The name of the pipeline definition.
Returns:
(airflow.models.DAG, List[airflow.models.BaseOperator]): The generated Airflow DAG, and a
list of its constituent tasks.
"""
check.class_param(operator, "operator", superclass=BaseOperator)
job_name = canonicalize_backcompat_args(
new_val=job_name,
new_arg="job_name",
old_val=pipeline_name,
old_arg="pipeline_name",
breaking_version="future versions",
coerce_old_to_new=lambda val: val,
)
return _make_airflow_dag(
recon_repo=recon_repo,
job_name=job_name,
run_config=run_config,
mode=mode,
dag_id=dag_id,
dag_description=dag_description,
dag_kwargs=dag_kwargs,
op_kwargs=op_kwargs,
operator=operator,
)
def make_airflow_dag_for_recon_repo(
recon_repo,
job_name,
run_config=None,
mode=None,
dag_id=None,
dag_description=None,
dag_kwargs=None,
op_kwargs=None,
pipeline_name=None,
):
job_name = canonicalize_backcompat_args(
new_val=job_name,
new_arg="job_name",
old_val=pipeline_name,
old_arg="pipeline_name",
breaking_version="future versions",
coerce_old_to_new=lambda val: val,
)
return _make_airflow_dag(
recon_repo=recon_repo,
job_name=job_name,
run_config=run_config,
mode=mode,
dag_id=dag_id,
dag_description=dag_description,
dag_kwargs=dag_kwargs,
op_kwargs=op_kwargs,
)
def make_airflow_dag_containerized(
module_name,
job_name,
image,
run_config=None,
mode=None,
dag_id=None,
dag_description=None,
dag_kwargs=None,
op_kwargs=None,
pipeline_name=None,
):
"""Construct a containerized Airflow DAG corresponding to a given Dagster job/pipeline.
Tasks in the resulting DAG will execute the Dagster logic they encapsulate using a subclass of
:py:class:`DockerOperator <airflow:airflow.operators.docker_operator.DockerOperator>`. As a
consequence, both dagster, any Python dependencies required by your solid logic, and the module
containing your pipeline definition must be available in the container spun up by this operator.
Typically you'll want to install these requirements onto the image you're using.
This function should be invoked in an Airflow DAG definition file, such as that created by an
invocation of the dagster-airflow scaffold CLI tool.
Args:
module_name (str): The name of the importable module in which the pipeline/job definition can be
found.
job_name (str): The name of the job definition.
image (str): The name of the Docker image to use for execution (passed through to
:py:class:`DockerOperator <airflow:airflow.operators.docker_operator.DockerOperator>`).
run_config (Optional[dict]): The config, if any, with which to compile
the pipeline/job to an execution plan, as a Python dict.
mode (Optional[str]): The mode in which to execute the pipeline.
dag_id (Optional[str]): The id to use for the compiled Airflow DAG (passed through to
:py:class:`DAG <airflow:airflow.models.DAG>`).
dag_description (Optional[str]): The description to use for the compiled Airflow DAG
(passed through to :py:class:`DAG <airflow:airflow.models.DAG>`)
dag_kwargs (Optional[dict]): Any additional kwargs to pass to the Airflow
:py:class:`DAG <airflow:airflow.models.DAG>` constructor, including ``default_args``.
op_kwargs (Optional[dict]): Any additional kwargs to pass to the underlying Airflow
operator (a subclass of
:py:class:`DockerOperator <airflow:airflow.operators.docker_operator.DockerOperator>`).
pipeline_name (str): (legacy) The name of the pipeline definition.
Returns:
(airflow.models.DAG, List[airflow.models.BaseOperator]): The generated Airflow DAG, and a
list of its constituent tasks.
"""
check.str_param(module_name, "module_name")
check.str_param(job_name, "job_name")
check.str_param(image, "image")
check.opt_dict_param(run_config, "run_config")
check.opt_str_param(mode, "mode")
check.opt_str_param(dag_id, "dag_id")
check.opt_str_param(dag_description, "dag_description")
check.opt_dict_param(dag_kwargs, "dag_kwargs")
check.opt_dict_param(op_kwargs, "op_kwargs")
job_name = canonicalize_backcompat_args(
new_val=job_name,
new_arg="job_name",
old_val=pipeline_name,
old_arg="pipeline_name",
breaking_version="future versions",
coerce_old_to_new=lambda val: val,
)
recon_repo = ReconstructableRepository.for_module(module_name, job_name, os.getcwd())
op_kwargs = check.opt_dict_param(op_kwargs, "op_kwargs", key_type=str)
op_kwargs["image"] = image
return _make_airflow_dag(
recon_repo=recon_repo,
job_name=job_name,
run_config=run_config,
mode=mode,
dag_id=dag_id,
dag_description=dag_description,
dag_kwargs=dag_kwargs,
op_kwargs=op_kwargs,
operator=DagsterDockerOperator,
)
def make_airflow_dag_containerized_for_recon_repo(
recon_repo,
job_name,
image,
run_config=None,
mode=None,
dag_id=None,
dag_description=None,
dag_kwargs=None,
op_kwargs=None,
instance=None,
pipeline_name=None,
):
check.inst_param(recon_repo, "recon_repo", ReconstructableRepository)
check.str_param(job_name, "job_name")
check.str_param(image, "image")
check.opt_dict_param(run_config, "run_config")
check.opt_str_param(mode, "mode")
check.opt_str_param(dag_id, "dag_id")
check.opt_str_param(dag_description, "dag_description")
check.opt_dict_param(dag_kwargs, "dag_kwargs")
op_kwargs = check.opt_dict_param(op_kwargs, "op_kwargs", key_type=str)
check.opt_str_param(pipeline_name, "pipeline_name")
op_kwargs["image"] = image
job_name = canonicalize_backcompat_args(
new_val=job_name,
new_arg="job_name",
old_val=pipeline_name,
old_arg="pipeline_name",
breaking_version="future versions",
coerce_old_to_new=lambda val: val,
)
return _make_airflow_dag(
recon_repo=recon_repo,
job_name=job_name,
run_config=run_config,
mode=mode,
dag_id=dag_id,
dag_description=dag_description,
dag_kwargs=dag_kwargs,
op_kwargs=op_kwargs,
operator=DagsterDockerOperator,
instance=instance,
)
| 37.916211 | 159 | 0.693313 |
b231a81da9cfc7c80b4c5a6fe4e0bed9a730174c | 12,259 | py | Python | test/gtest-1.10.0/googlemock/scripts/generator/cpp/gmock_class_test.py | EliSchleifer/yaml-cpp | c83fa7ce38fb94a7cb5b85f38d9aef05cbe9ebde | [
"MIT"
] | null | null | null | test/gtest-1.10.0/googlemock/scripts/generator/cpp/gmock_class_test.py | EliSchleifer/yaml-cpp | c83fa7ce38fb94a7cb5b85f38d9aef05cbe9ebde | [
"MIT"
] | null | null | null | test/gtest-1.10.0/googlemock/scripts/generator/cpp/gmock_class_test.py | EliSchleifer/yaml-cpp | c83fa7ce38fb94a7cb5b85f38d9aef05cbe9ebde | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = "nnorwitz@google.com (Neal Norwitz)"
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from cpp import ast, gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return "\n".join([s.lstrip() for s in lines.split("\n")])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, "<test>")
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return "\n".join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0(Bar,\nint());", self.GenerateMethodSource(source)
)
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0(Bar,\nint());", self.GenerateMethodSource(source)
)
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0(Bar,\nint());", self.GenerateMethodSource(source)
)
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0(Bar,\nint());", self.GenerateMethodSource(source)
)
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0(Bar,\nint());", self.GenerateMethodSource(source)
)
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0(Bar,\nint());", self.GenerateMethodSource(source)
)
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));",
self.GenerateMethodSource(source),
)
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0(Bar,\nint(void));", self.GenerateMethodSource(source)
)
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD1(Bar,\nvoid(int a));", self.GenerateMethodSource(source)
)
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD2(Bar,\nvoid(int, char));", self.GenerateMethodSource(source)
)
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD2(Bar,\nvoid(int, char));", self.GenerateMethodSource(source)
)
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD2(Bar,\nvoid(int, char));", self.GenerateMethodSource(source)
)
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));",
self.GenerateMethodSource(source),
)
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));",
self.GenerateMethodSource(source),
)
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD2(Bar,\n"
"int(const vector<int>& v, map<int, string>* output));",
self.GenerateMethodSource(source),
)
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD1(Bar,\nvector<int>*(int n));",
self.GenerateMethodSource(source),
)
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
"// The following line won't really compile, as the return\n"
"// type has multiple template arguments. To fix it, use a\n"
"// typedef for the return type.\n"
"MOCK_METHOD0(Bar,\nmap<int, string>());",
self.GenerateMethodSource(source),
)
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD0_T(Bar,\nint());", self.GenerateMethodSource(source)
)
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD1(Bar,\nint(C*));", self.GenerateMethodSource(source)
)
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD1(Bar,\nint(C&));", self.GenerateMethodSource(source)
)
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
"MOCK_METHOD1(Bar,\nint(C[]));", self.GenerateMethodSource(source)
)
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = "<test>"
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return "\n".join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedef(self):
source = """
class Test {
public:
typedef std::vector<std::list<int>> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedefWithComma(self):
source = """
class Test {
public:
typedef std::function<void(
const vector<std::list<int>>&, int> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(expected, self.GenerateMocks(source))
def testEnumClass(self):
source = """
class Test {
public:
enum class Baz { BAZINGA };
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(expected, self.GenerateMocks(source))
if __name__ == "__main__":
unittest.main()
| 26.420259 | 85 | 0.645159 |
2b50ea2e3b33c54acb2691bbf64f87cae54f6357 | 332 | py | Python | 01Apple/01Apple.py | WeirdCoder/LilyPadOS | 8007257746939de3221ebbeb6b03bfd4352a7ff2 | [
"MIT"
] | null | null | null | 01Apple/01Apple.py | WeirdCoder/LilyPadOS | 8007257746939de3221ebbeb6b03bfd4352a7ff2 | [
"MIT"
] | null | null | null | 01Apple/01Apple.py | WeirdCoder/LilyPadOS | 8007257746939de3221ebbeb6b03bfd4352a7ff2 | [
"MIT"
] | null | null | null | import lcm
import time
from lilylcm import L01Apple
lc = lcm.LCM()
msg = L01Apple()
msg.count = True
def my_handler(channel, data):
lc.publish("01Apple",msg.encode())
subscription = lc.subscribe("05Ebola",my_handler)
try:
while True:
lc.handle()
except Keyboardinterrupt:
pass
lc.unsubscribe(subscription)
| 15.090909 | 49 | 0.710843 |
deb53a6dcf3399907f7bb8ec3ed95b23edc9c99b | 5,499 | py | Python | applications/cli/commands/predict/launch.py | awesome-archive/nauta | 6ba6103421a10dfcd051aef3f7c5a714f6ac9429 | [
"Apache-2.0"
] | null | null | null | applications/cli/commands/predict/launch.py | awesome-archive/nauta | 6ba6103421a10dfcd051aef3f7c5a714f6ac9429 | [
"Apache-2.0"
] | 14 | 2020-09-26T01:27:23.000Z | 2022-02-10T02:14:54.000Z | applications/cli/commands/predict/launch.py | awesome-archive/nauta | 6ba6103421a10dfcd051aef3f7c5a714f6ac9429 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import os
from sys import exit
from typing import Tuple, List
import click
from tabulate import tabulate
from commands.predict.common import start_inference_instance, get_inference_instance_url, INFERENCE_INSTANCE_PREFIX
from commands.experiment.common import validate_experiment_name, validate_pack_params_names
from platform_resources.experiment_utils import generate_name
from cli_state import common_options, pass_state, State
from platform_resources.run import RunStatus
from util.aliascmd import AliasCmd
from util.logger import initialize_logger
from util.system import handle_error
from cli_text_consts import PredictLaunchCmdTexts as Texts
from util.k8s.k8s_info import get_secret, get_kubectl_current_context_namespace, get_service_account
INFERENCE_TEMPLATE = 'tf-inference-stream'
logger = initialize_logger(__name__)
def validate_local_model_location(local_model_location: str):
if not os.path.isdir(local_model_location):
handle_error(
user_msg=Texts.MODEL_DIR_NOT_FOUND_ERROR_MSG.format(local_model_location=local_model_location)
)
exit(2)
@click.command(help=Texts.HELP, short_help=Texts.HELP, cls=AliasCmd, alias='l', options_metavar='[options]')
@click.option('-n', '--name', default=None, help=Texts.HELP_N, callback=validate_experiment_name)
@click.option('-m', '--model-location', help=Texts.HELP_M)
@click.option("-l", "--local-model-location", type=click.Path(), help=Texts.HELP_LOCAL_MODEL_LOCATION)
@click.option('-mn', '--model-name', help=Texts.HELP_MODEL_NAME)
@click.option("-p", "--pack-param", type=(str, str), multiple=True, help=Texts.HELP_P,
callback=validate_pack_params_names)
@click.option("-r", "--requirements", type=click.Path(exists=True, dir_okay=False), required=False, help=Texts.HELP_R)
@common_options()
@pass_state
def launch(state: State, name: str, model_location: str, local_model_location: str, model_name: str,
pack_param: List[Tuple[str, str]], requirements: str):
"""
Starts a new prediction instance that can be used for performing prediction, classification and
regression tasks on trained model.
"""
if not model_location and not local_model_location:
handle_error(
user_msg=Texts.MISSING_MODEL_LOCATION_ERROR_MSG.format(local_model_location=local_model_location)
)
exit(1)
if local_model_location:
validate_local_model_location(local_model_location)
click.echo('Submitting prediction instance.')
try:
model_path = model_location.rstrip('/') if model_location else local_model_location.rstrip('/')
model_name = model_name if model_name else os.path.basename(model_path)
name = name if name else generate_name(name=model_name, prefix=INFERENCE_INSTANCE_PREFIX)
inference_instance = start_inference_instance(name=name, model_location=model_location, model_name=model_name,
local_model_location=local_model_location,
requirements=requirements, pack_params=pack_param)
if inference_instance.state == RunStatus.FAILED:
raise RuntimeError('Inference instance submission failed.')
except Exception:
handle_error(logger, Texts.INSTANCE_START_ERROR_MSG, Texts.INSTANCE_START_ERROR_MSG,
add_verbosity_msg=state.verbosity == 0)
exit(1)
click.echo(tabulate([[inference_instance.cli_representation.name, model_location,
inference_instance.cli_representation.status]],
headers=Texts.TABLE_HEADERS,
tablefmt="orgtbl"))
try:
namespace = get_kubectl_current_context_namespace()
authorization_header = get_authorization_header(service_account_name=name, namespace=namespace)
inference_instance_url = get_inference_instance_url(inference_instance=inference_instance,
model_name=model_name)
click.echo(Texts.INSTANCE_INFO_MSG.format(inference_instance_url=inference_instance_url,
authorization_header=authorization_header))
except Exception:
handle_error(logger, Texts.INSTANCE_URL_ERROR_MSG, Texts.INSTANCE_URL_ERROR_MSG,
add_verbosity_msg=state.verbosity == 0)
exit(1)
def get_authorization_header(service_account_name: str, namespace: str):
service_account = get_service_account(service_account_name=service_account_name, namespace=namespace)
secret_name = service_account.secrets[0].name
authorization_token = get_secret(secret_name=secret_name, namespace=namespace).data['token']
authorization_token = base64.b64decode(authorization_token).decode('utf-8')
return f'Authorization: Bearer {authorization_token}'
| 48.236842 | 118 | 0.732679 |
e7374e4188419b942339a85b2f3022c9c2013459 | 1,095 | py | Python | accounts/utils.py | bbengfort/ledger | f3b961be568d3a373228ee2ccf52afc7fb7ca9a8 | [
"MIT"
] | 1 | 2018-08-15T22:34:33.000Z | 2018-08-15T22:34:33.000Z | accounts/utils.py | bbengfort/ledger | f3b961be568d3a373228ee2ccf52afc7fb7ca9a8 | [
"MIT"
] | 75 | 2018-05-07T21:13:27.000Z | 2021-09-22T17:45:20.000Z | accounts/utils.py | bbengfort/ledger | f3b961be568d3a373228ee2ccf52afc7fb7ca9a8 | [
"MIT"
] | 2 | 2018-08-15T22:34:34.000Z | 2020-07-04T17:27:41.000Z | # utils
# Helpers and utilities for the accounts app
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Thu May 03 20:17:30 2018 -0400
#
# ID: utils.py [d510add] benjamin@bengfort.com $
"""
Helpers and utilities for the accounts app
"""
##########################################################################
## Imports
##########################################################################
from enum import Enum
##########################################################################
## Currency
##########################################################################
class Currency(Enum):
USD = "USD"
GBP = "GBP"
EUR = "EUR"
CNY = "CNY"
JPY = "JPY"
MXN = "MXN"
@classmethod
def choices(cls):
return tuple((i.name, i.value) for i in cls)
@property
def symbol(self):
"""
Returns the currency symbol
"""
return {
"USD": "$",
"GBP": "£",
"EUR": "€",
"CNY": "角",
"JPY": "¥",
"MXN": "$",
}[self.value]
| 21.9 | 74 | 0.367123 |
0337487ee2374e26ee95d4be372007855a0bd0c8 | 4,849 | py | Python | classification/svm_improved.py | BobbyZhouZijian/AI-Algo-Implmentations | 5592d3c358cc1611a1bde61797b93c0d6eee10c6 | [
"MIT"
] | null | null | null | classification/svm_improved.py | BobbyZhouZijian/AI-Algo-Implmentations | 5592d3c358cc1611a1bde61797b93c0d6eee10c6 | [
"MIT"
] | null | null | null | classification/svm_improved.py | BobbyZhouZijian/AI-Algo-Implmentations | 5592d3c358cc1611a1bde61797b93c0d6eee10c6 | [
"MIT"
] | null | null | null | """
The basic version of SVM uses gradietn ascent
to find the local maxima of the loss function:
L = 1/2 ||w||^2 + C / N * sum (max(0, 1 - yi(w * xi + b)))
While it works, it takes much time to finetune the value of
the learning rate in order to for the loss function to converge.
SMO (Sequential Minimal Optimization) can solve the Loss function without
the need of introduing extra hyperparameters. We shall use it for the improved
version of SVM.
SMO optimize the following Loss which is equivalent to the one above:
L = sum alpha - 0.5 * sum sum alphai * alphaj * yi * yj * xiT * xj
"""
import random
import numpy as np
import pandas as pd
import argparse
from util import get_input_label_split, get_accuracy, get_precision
class SVM:
def __init__(self, C=1., gamma=0.01, kernel='rbf'):
self.C = C
self.kernel = kernel
self.gamma=gamma
self.train_x = None
self.train_y = None
self.weights = None
self.bias = None
def kernel_func(self, x, y):
'''
dot x and y based on the specified kernel
K(x, y) = dot(phi(x), phi(y))
'''
if self.kernel == 'linear':
return x.dot(y.T)
elif self.kernel == 'poly':
# for now, default it to degree 3, scale=1, bias=1
scale = 1.
bias = 1.
deg = 3
return (scale * x.dot(y.T) + bias)**deg
elif self.kernel == 'rbf':
# for now, default sigma = 1.
m = x.shape[0]
return np.exp(-self.gamma * np.linalg.norm(x-y.T)**2)
else:
raise Exception('Kernel not defined')
def train_SMO(self, num_epochs):
X = self.train_x
y = self.train_y
m, n = X.shape
alpha = np.zeros(m)
self.bias = 0
for _ in range(num_epochs):
for j in range(0, m):
i = self.select_rand(0, m-1, j)
xi, xj, yi, yj = X[i,:], X[j,:], y[i], y[j]
kij = self.kernel_func(xi,xi) + self.kernel_func(xj,xj) - 2*self.kernel_func(xi,xj)
if kij == 0:
continue
ai, aj = alpha[i], alpha[j]
L, H = self.compute_L_H(ai, aj, yi, yj)
# compute w and b
self.weights = self.calc_w(alpha, y, X)
self.bias = self.calc_b(y, X)
# compute Ei, Ej
Ei = self.E(xi, yi)
Ej = self.E(xj, yj)
# update alpha
alpha[j] = aj + float(yj * (Ei - Ej)) / kij
alpha[j] = max(alpha[j], L)
alpha[j] = min(alpha[j], H)
alpha[i] = ai + yi * yj * (aj - alpha[j])
def build_ker_mat(self, data1, data2):
m1 = data1.shape[0]
m2 = data2.shape[0]
ker_mat = np.mat(np.zeros((m1, m2)))
for i in range(m1):
for j in range(m2):
ker_mat[i,j] = self.kernel_func(data1[i],data2[j])
ker_mat = torch.tensor(ker_mat)
return ker_mat
def E(self, x, y):
return self.infer(x) - y
def calc_w(self, alpha, y, x):
return np.dot(x.T, np.multiply(alpha, y))
def calc_b(self, y, x):
b_sum = y - np.dot(self.weights.T, x.T)
return np.mean(b_sum)
def select_rand(self, a, b, i):
j = i
while j == i:
j = random.randint(a, b)
return j
def compute_L_H(self, ai, aj, yi, yj):
if yi != yj:
return max(0, aj-ai), min(self.C, self.C-ai+aj)
else:
return max(0, ai+aj-self.C), min(self.C, ai+aj)
def train(self, data, label_name, num_epochs=30):
self.train_x, self.train_y = get_input_label_split(data, label_name)
self.train_SMO(num_epochs)
def infer(self, data):
return np.sign(np.dot(self.weights.T, data.T) + self.bias).astype(int)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', required=True, help='training data file path')
parser.add_argument('--label_name', type=str, default='label', help='label column name for the input file')
parser.add_argument('--eval_mode', action='store_true', help='run this in evaluation mode')
args = parser.parse_args()
df = pd.read_csv(args.file_path)
if args.eval_mode:
train_sz = int(len(df) * 0.8)
df_train = df[:train_sz]
df_test = df[train_sz:]
svm = SVM()
svm.train(df_train, args.label_name)
test_x, test_y = get_input_label_split(df_test, args.label_name)
pred = svm.infer(test_x)
print(f"accuracy score: {get_accuracy(pred, test_y)}")
print(f"precision score: {get_precision(pred, test_y)}")
else:
pass
| 28.028902 | 111 | 0.546298 |
c12892c4bdb52186dd5f7bcf2a6511337cc66a71 | 2,898 | py | Python | game.py | angelinawawrzyniak/bomberman | bd058ccea049c1e1af851d0c5e1a0be53685484c | [
"MIT"
] | null | null | null | game.py | angelinawawrzyniak/bomberman | bd058ccea049c1e1af851d0c5e1a0be53685484c | [
"MIT"
] | null | null | null | game.py | angelinawawrzyniak/bomberman | bd058ccea049c1e1af851d0c5e1a0be53685484c | [
"MIT"
] | null | null | null | from artifacts.life import Life
from artifacts.points import Point
from artifacts.super_bomb_artifact import SuperBombArtifact
from bomb import Bomb
from brick import Brick
from context import Context
from game_over_error import GameOverError
from monster import Monster
def draw_scene(context, graphic_buffer):
context.board.draw(graphic_buffer, context)
for brick in context.bricks:
brick.draw(graphic_buffer, context)
for monster in context.monsters:
monster.draw(graphic_buffer, context)
context.user.draw(graphic_buffer, context)
for bomb in context.bombs:
bomb.draw(graphic_buffer, context)
if context.portal is not None:
context.portal.draw(graphic_buffer, context)
for artifact in context.artifacts:
artifact.draw(graphic_buffer, context)
if context.game_over:
letters = list('GAME OVER')
offset = int((len(graphic_buffer[0]) - len(letters)) / 2)
for x in range(0, len(letters)):
graphic_buffer[int(len(graphic_buffer) / 2)][x + offset] = letters[x]
for row in graphic_buffer:
print(' '.join(row))
print('Level: {}, Lives: {}, Points: {}'.format(context.game_level, context.user.life,
context.user.points))
for bomb in context.bombs:
print('Bomb time: {}'.format(bomb.time))
def remove_elements(context):
for element in context.dead_list:
if isinstance(element, Bomb):
context.bombs.remove(element)
if isinstance(element, Brick):
context.bricks.remove(element)
if isinstance(element, Life):
context.artifacts.remove(element)
if isinstance(element, SuperBombArtifact):
context.artifacts.remove(element)
if isinstance(element, Point):
context.artifacts.remove(element)
if isinstance(element, Monster):
context.monsters.remove(element)
context.dead_list = []
context = Context()
graphic_buffer = [
[' ' for index_x in range(len(context.board.fields[index_y]))] for index_y in range(len(context.board.fields))
]
while True:
draw_scene(context, graphic_buffer)
context.user.make_step(context)
for artifact in context.artifacts:
artifact.make_step(context)
if context.portal is not None:
if (context.user.y, context.user.x) == (context.portal.y, context.portal.x):
context.level_up()
continue
try:
for bomb in context.bombs:
bomb.make_step(context)
except GameOverError as error:
context.game_over = True
draw_scene(context, graphic_buffer)
break
remove_elements(context)
# TODO:
# super bomb - bigger range of bomb explosion
# monster - moving, decrease user life when monster is on user
# user - decrease user life when user is on monster
# unit tests
| 34.5 | 114 | 0.669427 |
eabc4894f136147cead5211ce8971e04c3fe3125 | 216 | py | Python | reamber/quaver/lists/__init__.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | reamber/quaver/lists/__init__.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | reamber/quaver/lists/__init__.py | Bestfast/reamberPy | 91b76ca6adf11fbe8b7cee7c186481776a4d7aaa | [
"MIT"
] | null | null | null | from reamber.quaver.lists.QuaBpmList import QuaBpmList
from reamber.quaver.lists.QuaNotePkg import QuaNotePkg
from reamber.quaver.lists.QuaSvList import QuaSvList
__all__ = ['QuaBpmList', 'QuaNotePkg', 'QuaSvList']
| 36 | 54 | 0.824074 |
0e523ff5758a32b47680965fa1b8d961b78dabf6 | 5,196 | py | Python | BTCRobot/bot/WS.py | Paulorpc/Slack-Chatbot-Bitcoin-Exchange-Integrator_Python | 8f9f2b4dee59095200e545fb029b02847ac3d3c1 | [
"MIT"
] | null | null | null | BTCRobot/bot/WS.py | Paulorpc/Slack-Chatbot-Bitcoin-Exchange-Integrator_Python | 8f9f2b4dee59095200e545fb029b02847ac3d3c1 | [
"MIT"
] | null | null | null | BTCRobot/bot/WS.py | Paulorpc/Slack-Chatbot-Bitcoin-Exchange-Integrator_Python | 8f9f2b4dee59095200e545fb029b02847ac3d3c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import urllib
import hmac
import hashlib
import httplib
import json
from Config import Config
from collections import OrderedDict
from enum import Enum
from slackclient import SlackClient
class MetodoColetaDadosMB(Enum):
TICKER = 'ticker'
TRADES = 'trades'
ORDERBOOK = 'orderbook'
class MercadoBitcoin(Config):
# Metodos privados de configuração (__XXX) herdados de 'Config'
def __init__(self, TAPI_METHOD=None, COIN_PAIR=None):
self.response = None
self.responseJson = None
if TAPI_METHOD is None:
TAPI_METHOD = Config.TAPI_METHOD
if COIN_PAIR is None:
COIN_PAIR = Config.COIN_PAIR
# Parâmetros URL
self.__PARAMS = {
'tapi_method': TAPI_METHOD,
'tapi_nonce': Config.TAPI_NONCE,
'coin_pair': COIN_PAIR # MOEDA
}
self.__PARAMS = urllib.urlencode(self.__PARAMS)
def getConn(self, metodo, desdeTid=None):
return self.__conexao(metodo, desdeTid)
def getDados(self, metodo, desdeTid=None):
conn = self.__conexao(metodo, desdeTid)
response = conn.getresponse()
response = response.read()
self.response = response
if conn:
conn.close()
return response
def getDadosJSON(self, metodo, desdeTid=None):
response = self.getDados(metodo, desdeTid)
responseJson = json.loads(response, object_pairs_hook=OrderedDict)
self.responseJson = responseJson
return responseJson
def __conexao(self, metodo, desdeTid=None):
"""
Método de conexão e coleta dos dados no Webservice do Mercado Bitboin (MB)
:param METODO: Número do método que deseja chamar
:param desdeTid: Último ID coletado de Ordens Executadas para continuar coleta a partir dele (opcional)
:return: dados coletados como resposta do Webservice
"""
if not isinstance(metodo, MetodoColetaDadosMB):
raise "PASSE ITEM DE ENUM DA CLASSE 'metodoColetaDadosMB'"
# Constantes
request_path = '/api/' + str(metodo.name).lower() + '/'
if (desdeTid > 0):
requestParam = '?tid=' + desdeTid
request_path += requestParam
# Gerar MAC
params_string = request_path + '?' + self.__PARAMS
H = hmac.new(Config.MB_TAPI_SECRET, digestmod=hashlib.sha512)
H.update(params_string)
tapi_mac = H.hexdigest()
# Gerar cabeçalho da requisição
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'TAPI-ID': Config.MB_TAPI_ID,
'TAPI-MAC': tapi_mac
}
conn = None
try:
# Conexao publica usar metodo GET
conn = httplib.HTTPSConnection(Config.REQUEST_HOST)
conn.request("GET", request_path, self.__PARAMS, headers)
except Exception as e:
print "\tException em 'Conexão com webservice MB': " + e.message
pass
return conn
class Slack(Config):
@staticmethod
def getConn():
token = Config.MAIN_USER.slack_tokenID
return SlackClient(token)
@staticmethod
def postMessage(canal, msg, asUser=False, botname=Config.BOT_USER.nome, botIcon=Config.BOTICON):
'''
Método de envio de mensagem para o Slack.
:param canal: ID do canal, grupo ou usuário de destino
:param msg: Mensagem a ser enviada
:param asUser: (True) Enviar como usuário padrão da conexão ou (False) como bot
:param botname: Nome do bot exibido no envio da mensagem
:param botIcon: icone do bot
'''
sc = Slack.getConn()
sc.api_call(
"chat.postMessage",
channel=canal,
username=botname,
as_user=asUser,
attachments=msg,
icon_url=botIcon
)
def getListaUsuarios(self):
return
def getListaCanais(self):
return
def getIdUsuario(self, nome):
return
def getIdCanal(self, nome):
return
# v1.0
# funcionando
# while True:
#
# slack_token = "xoxb-232327601397-69F1gxfCtkdFfAhdS23x32qy"
# sc = SlackClient(slack_token)
#
# if sc.rtm_connect():
# while True:
#
# for slack_message in sc.rtm_read():
# tipo = slack_message.get("type")
# channel = slack_message.get("channel")
# message = slack_message.get("text")
# user = slack_message.get("user")
#
# print tipo, channel, message, user
#
# if message == "cotacao":
# sc.rtm_send_message("#btcbot", "<@{}>".format(user))
# Estrategia.painelCotacaoMoeda(bitcoin, metodoColeta, ws_MB)
#
# if message == "funcionando":
# sc.rtm_send_message("#btcbot", "<@{}> sim, estou. Obrigado pela preocupação!".format(user))
#
# time.sleep(1)
# else:
# print "Connection Failed"
| 29.027933 | 117 | 0.587182 |
bed6f956db6997a94ed754d9012d66d04cc9eb3e | 2,123 | py | Python | quoine/exceptions.py | nannan7/python-quoine | 93af663395402591f9e9096f640dc160176e8d5d | [
"MIT"
] | 56 | 2017-11-07T11:38:15.000Z | 2022-03-28T00:38:59.000Z | quoine/exceptions.py | nannan7/python-quoine | 93af663395402591f9e9096f640dc160176e8d5d | [
"MIT"
] | 6 | 2017-12-29T15:02:26.000Z | 2021-04-02T11:44:00.000Z | quoine/exceptions.py | nannan7/python-quoine | 93af663395402591f9e9096f640dc160176e8d5d | [
"MIT"
] | 18 | 2017-12-19T10:43:47.000Z | 2022-02-01T19:36:33.000Z | #!/usr/bin/env python
# coding=utf-8
class QuoineAPIException(Exception):
"""Exception class to handle general API Exceptions
`code` values
HTTP 400: Bad Request
There was an error with the request. The body of the response will have more info
HTTP 401: Unauthorized
Token is invalid. If your API key is wrong a 401 will also be served,
so check the response body, it might be that the API_KEY is invalid.
HTTP 422: Unprocessable Entity
There was an error with the request. The body of the response will have more info. Some possible reasons:
- Missing params
- The format of data is wrong
HTTP 429: Too Many Requests
This status indicates that the user has sent too many requests in a given amount of time
HTTP 503: Service Unavailable
Many reasons, body will include details
- An internal error on Authy.
- Your application is accessing an API call you don't have access too.
- API usage limit. If you reach API usage limits a 503 will be returned,
please wait until you can do the call again.
`message` format
.. code-block:: python
{
"user": ["not_enough_fund"]
}
"""
def __init__(self, response):
try:
json_res = response.json()
except ValueError:
self.messages = response.content
else:
if 'message' in json_res:
self.messages = json_res
elif 'errors' in json_res:
self.messages = json_res['errors']
self.status_code = response.status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return 'QuoineAPIException: {}'.format(self.messages)
class QuoineRequestException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'QuoineRequestException: {}'.format(self.message)
| 32.166667 | 117 | 0.612812 |
5145b657acaa172333d097e020ebb6b7ff561877 | 696 | py | Python | torrenttv/utils/async_utils/futurize.py | AlexCovizzi/torrenttv | 25ae5490568110a7ea1f2e5eb74505ef8eea002d | [
"MIT"
] | null | null | null | torrenttv/utils/async_utils/futurize.py | AlexCovizzi/torrenttv | 25ae5490568110a7ea1f2e5eb74505ef8eea002d | [
"MIT"
] | 19 | 2020-05-03T17:06:24.000Z | 2021-03-11T05:26:57.000Z | torrenttv/utils/async_utils/futurize.py | AlexCovizzi/torrenttv | 25ae5490568110a7ea1f2e5eb74505ef8eea002d | [
"MIT"
] | 1 | 2020-05-04T19:00:00.000Z | 2020-05-04T19:00:00.000Z | import asyncio
def futurize(func, args=None, kwargs=None, loop=None, executor=None):
loop = loop or asyncio.get_event_loop()
args = args or ()
kwargs = kwargs or {}
awaitable = loop.run_in_executor(executor, func, *args, **kwargs)
return asyncio.ensure_future(awaitable)
def futurize_callback(callback, loop=None):
loop = loop or asyncio.get_event_loop()
def func_wrapper(*args, **kwargs):
def set_result(fut, result):
if not fut.done():
fut.set_result(result)
result = callback(*args, **kwargs)
loop.call_soon_threadsafe(set_result, fut, result)
fut = loop.create_future()
return fut, func_wrapper
| 25.777778 | 69 | 0.659483 |
1d0f6be53645c2dc598782fd30f3f990379e3367 | 1,521 | py | Python | net_trainer.py | Githubowy-Juliusz/SDT | 1d696e38345e745b874d619dfe4bc336d4239616 | [
"MIT"
] | null | null | null | net_trainer.py | Githubowy-Juliusz/SDT | 1d696e38345e745b874d619dfe4bc336d4239616 | [
"MIT"
] | null | null | null | net_trainer.py | Githubowy-Juliusz/SDT | 1d696e38345e745b874d619dfe4bc336d4239616 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class NetTrainer:
def __init__(self, model, device, learning_rate=0.001, weight_decay=0.05):
self.model = model
self.device = device
self.loss_fn = nn.CrossEntropyLoss()
self.optimizer = torch.optim.AdamW(self.model.parameters(),
lr=learning_rate, weight_decay=weight_decay)
def train(self, data_loader):
self.model.train()
accuracies = []
losses = []
for data, labels in data_loader:
data = data.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
predictions = self.model(data)
loss = self.loss_fn(predictions, labels)
losses.append(loss.detach().cpu().numpy())
loss.backward()
self.optimizer.step()
predictions = F.softmax(predictions, dim=1)
accuracy = (torch.argmax(predictions, dim=1) == labels).type(torch.FloatTensor).mean().item()
accuracies.append(accuracy)
return np.mean(accuracies), np.mean(losses)
def validate(self, data_loader):
self.model.eval()
losses = []
accuracies = []
for data, labels in data_loader:
data = data.to(self.device)
labels = labels.to(self.device)
predictions = self.model(data)
loss = self.loss_fn(predictions, labels)
losses.append(loss.detach().cpu().numpy())
predictions = F.softmax(predictions, dim=1)
accuracy = (torch.argmax(predictions, dim=1) == labels).type(torch.FloatTensor).mean().item()
accuracies.append(accuracy)
return np.mean(accuracies), np.mean(losses) | 28.166667 | 96 | 0.708744 |
cb7e32a3d1de030496b6598908578a0de3db0228 | 12,297 | py | Python | esrt/models/lse.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | esrt/models/lse.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | esrt/models/lse.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | import tensorflow as tf
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from esrt.engine.base_model import BaseModel
from esrt.losses import single_nce_loss, pair_search_loss
from esrt.query_embedding import get_query_embedding
from tensorflow.python.framework import dtypes
class LSE(BaseModel):
def __init__(self, dataset, params, forward_only=False):
print("################LSE####################")
self._dataset = dataset
self.vocab_size = self._dataset.vocab_size
self.review_size = self._dataset.review_size
self.user_size = self._dataset.user_size
self.product_size = self._dataset.product_size
self.query_max_length = self._dataset.query_max_length
self.vocab_distribute = self._dataset.vocab_distribute
self.review_distribute = self._dataset.review_distribute
self.product_distribute = self._dataset.product_distribute
self._params = params
self.negative_sample = self._params['negative_sample']
self.embed_size = self._params['embed_size']
self.window_size = self._params['window_size']
self.max_gradient_norm = self._params['max_gradient_norm']
self.init_learning_rate = self._params['init_learning_rate']
self.L2_lambda = self._params['L2_lambda']
self.net_struct = self._params['net_struct']
self.similarity_func = self._params['similarity_func']
self.query_weight=self._params['query_weight']
self.global_step = tf.Variable(0, trainable=False)
self.forward_only = forward_only
self.print_ops = []
if self.query_weight >= 0:
self.Wu = tf.Variable(self.query_weight, name="user_weight", dtype=tf.float32, trainable=False)
else:
self.Wu = tf.sigmoid(tf.Variable(0, name="user_weight", dtype=tf.float32))
self.context_word_idxs = []
for i in range(2 * self.window_size):
self.context_word_idxs.append(tf.placeholder(tf.int64, shape=[None], name="context_idx{0}".format(i)))
def build(self):
self._build_placeholder()
self.loss = self._build_embedding_graph_and_loss()
if not self.forward_only:
self.updates = self._build_optimizer()
else:
self.product_scores = self.get_product_scores(self.user_idxs, self.query_word_idxs)
self.saver = tf.train.Saver(tf.global_variables())
def _build_placeholder(self):
# Feeds for inputs.
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
self.review_idxs = tf.placeholder(tf.int64, shape=[None], name="review_idxs")
self.user_idxs = tf.placeholder(tf.int64, shape=[None], name="user_idxs")
self.product_idxs = tf.placeholder(tf.int64, shape=[None], name="product_idxs")
self.word_idxs = tf.placeholder(tf.int64, shape=[None], name="word_idxs")
self.query_word_idxs = tf.placeholder(tf.int64, shape=[None, self.query_max_length], name="query_word_idxs")
self.PAD_embed = tf.get_variable("PAD_embed", [1,self.embed_size],dtype=tf.float32)
def LSE_nce_loss(self, user_idxs, product_idxs, word_idxs, context_word_idxs):
batch_size = array_ops.shape(word_idxs)[0] # get batch_size
loss = None
# get f(s)
word_idx_list = tf.stack([word_idxs] + context_word_idxs, 1)
f_s, [f_W, word_vecs] = get_query_embedding(self, word_idx_list, self.word_emb, None)
# Negative sampling
loss, true_w, sample_w = self.LSE_single_nce_loss(f_s, product_idxs, self.product_emb,
self.product_bias, self.product_size, self.product_distribute)
# L2 regularization
if self.L2_lambda > 0:
loss += self.L2_lambda * (tf.nn.l2_loss(true_w) + tf.nn.l2_loss(sample_w) +
tf.nn.l2_loss(f_W) + tf.nn.l2_loss(word_vecs))
return loss / math_ops.cast(batch_size, dtypes.float32)
def LSE_single_nce_loss(self, example_vec, label_idxs, label_emb,
label_bias, label_size, label_distribution):
batch_size = array_ops.shape(label_idxs)[0] # get batch_size
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(tf.cast(label_idxs, dtype=tf.int64), [batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=self.negative_sample,
unique=False,
range_max=label_size,
distortion=0.75,
unigrams=label_distribution))
# get label embeddings and bias [batch_size, embed_size], [batch_size, 1]
true_w = tf.nn.embedding_lookup(label_emb, label_idxs)
true_b = tf.nn.embedding_lookup(label_bias, label_idxs)
# get sampled embeddings and bias [num_sampled, embed_size], [num_sampled, 1]
sampled_w = tf.nn.embedding_lookup(label_emb, sampled_ids)
sampled_b = tf.nn.embedding_lookup(label_bias, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.multiply(example_vec, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [self.negative_sample])
sampled_logits = tf.matmul(example_vec, sampled_w, transpose_b=True) + sampled_b_vec
return self.nce_loss(true_logits, sampled_logits), true_w, sampled_w
# return model.nce_loss(true_logits, true_logits)
def nce_loss(self, true_logits, sampled_logits):
"Build the graph for the NCE loss."
# cross-entropy(logits, labels)
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
logits=true_logits, labels=tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
logits=sampled_logits, labels=tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) + tf.reduce_sum(sampled_xent))
return nce_loss_tensor
def _build_embedding_graph_and_loss(self, scope=None):
with variable_scope.variable_scope(scope or "LSE_graph"):
# Word embeddings.
init_width = 0.5 / self.embed_size
self.word_emb = tf.Variable(tf.random_uniform(
[self.vocab_size, self.embed_size], -init_width, init_width),
name="word_emb")
self.word_emb = tf.concat(axis=0, values=[self.word_emb, tf.zeros([1, self.embed_size])])
self.word_bias = tf.Variable(tf.zeros([self.vocab_size]), name="word_b")
self.word_bias = tf.concat(axis=0, values=[self.word_bias, tf.zeros([1])])
# user/product embeddings.
self.user_emb = tf.Variable(tf.zeros([self.user_size, self.embed_size]),
name="user_emb")
self.user_bias = tf.Variable(tf.zeros([self.user_size]), name="user_b")
self.product_emb = tf.Variable(tf.zeros([self.product_size, self.embed_size]),
name="product_emb")
self.product_bias = tf.Variable(tf.zeros([self.product_size]), name="product_b")
# self.context_emb = tf.Variable( tf.zeros([self.vocab_size, self.embed_size]),
# name="context_emb")
# self.context_bias = tf.Variable(tf.zeros([self.vocab_size]), name="context_b")
return self.LSE_nce_loss(self.user_idxs, self.product_idxs, self.word_idxs,
self.context_word_idxs)
def _build_optimizer(self):
params = tf.trainable_variables()
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
self.gradients = tf.gradients(self.loss, params)
self.clipped_gradients, self.norm = tf.clip_by_global_norm(self.gradients,
self.max_gradient_norm)
return opt.apply_gradients(zip(self.clipped_gradients, params),
global_step=self.global_step)
def step(self, session, input_feed, forward_only, file_writer=None, test_mode='product_scores'):
# if not forward_only:
# output_feed = [self.updates, # Update Op that does SGD.
# self.loss] # Loss for this batch.
# else:
# if test_mode == 'output_embedding':
# output_feed = [self.user_emb, self.product_emb, self.Wu, self.word_emb, self.word_bias]
# else:
# output_feed = [self.product_scores, self.print_ops]
#
# outputs = session.run(output_feed, input_feed) #options=run_options, run_metadata=run_metadata)
#
# if not forward_only:
# return outputs[1] # loss, no outputs, Gradient norm.
# else:
# if test_mode == 'output_embedding':
# return outputs[:4], outputs[4:]
# else:
# return outputs[0], None # product scores to input user
if not forward_only:
output_feed = [self.updates, # Update Op that does SGD.
self.loss] # Loss for this batch.
else:
if test_mode == 'output_embedding':
output_feed = [self.user_emb, self.product_emb, self.Wu, self.word_emb, self.word_bias]
else:
output_feed = [self.product_scores, self.print_ops] #negative instance output
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1] # loss, no outputs, Gradient norm.
else:
if test_mode == 'output_embedding':
return outputs[:4], outputs[4:]
else:
return outputs[0], None # product scores to input user
def get_product_scores(self, user_idxs, query_word_idx, product_idxs = None, scope = None):
"""
Args:
user_idxs: Tensor with shape of [batch_size] with type of int32.
query_word_idx: Tensor with shape for [batch_size, query_max_length] with type of int32.
product_idxs: Tensor with shape of [batch_size] with type of int32 or None.
scope:
Return:
product_scores: Tensor with shape of [batch_size, batch_size] or [batch_size, len(product_vocab)]
with type of float32. its (i, j) entry is the score of j product retrieval by i
example(which is a linear combination of user and query).
"""
with variable_scope.variable_scope(scope or "LSE_graph"):
# get query vector
query_vec, word_vecs = get_query_embedding(self, query_word_idx, self.word_emb, True)
# match with product
product_vec = None
product_bias = None
if product_idxs != None:
product_vec = tf.nn.embedding_lookup(self.product_emb, product_idxs)
product_bias = tf.nn.embedding_lookup(self.product_bias, product_idxs)
else:
product_vec = self.product_emb
product_bias = self.product_bias
print('Similarity Function : ' + self.similarity_func)
if self.similarity_func == 'product':
return tf.matmul(query_vec, product_vec, transpose_b=True)
elif self.similarity_func == 'bias_product':
return tf.matmul(query_vec, product_vec, transpose_b=True) + product_bias
else:
query_norm = tf.sqrt(tf.reduce_sum(tf.square(query_vec), 1, keep_dims=True))
product_norm = tf.sqrt(tf.reduce_sum(tf.square(product_vec), 1, keep_dims=True))
return tf.matmul(query_vec / query_norm, product_vec / product_norm, transpose_b=True)
| 48.604743 | 116 | 0.633163 |
21085173945da4de0dab669a7995706de1e2be22 | 18,911 | py | Python | custom/icds/location_reassignment/parser.py | orangejenny-test/commcare-hq | 4939322bb2941bea0fc75ec1e88585e478c16abf | [
"BSD-3-Clause"
] | null | null | null | custom/icds/location_reassignment/parser.py | orangejenny-test/commcare-hq | 4939322bb2941bea0fc75ec1e88585e478c16abf | [
"BSD-3-Clause"
] | null | null | null | custom/icds/location_reassignment/parser.py | orangejenny-test/commcare-hq | 4939322bb2941bea0fc75ec1e88585e478c16abf | [
"BSD-3-Clause"
] | null | null | null | from collections import defaultdict
import attr
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.users.models import CommCareUser
from corehq.apps.users.util import normalize_username
from custom.icds.location_reassignment.const import (
AWC_CODE_COLUMN,
CURRENT_SITE_CODE_COLUMN,
EXTRACT_OPERATION,
HOUSEHOLD_ID_COLUMN,
MERGE_OPERATION,
NEW_LGD_CODE,
NEW_NAME,
NEW_PARENT_SITE_CODE,
NEW_SITE_CODE_COLUMN,
NEW_SUB_DISTRICT_NAME,
NEW_USERNAME_COLUMN,
OPERATION_COLUMN,
SPLIT_OPERATION,
USERNAME_COLUMN,
VALID_OPERATIONS,
)
from custom.icds.location_reassignment.models import Transition
class TransitionRow(object):
"""
An object representation of each row in excel
"""
def __init__(self, location_type, operation, old_site_code, new_site_code, expects_parent,
new_location_details=None, old_username=None, new_username=None):
self.location_type = location_type
self.operation = operation
self.old_site_code = old_site_code
self.new_site_code = new_site_code
self.expects_parent = expects_parent
self.new_location_details = new_location_details or {}
self.old_username = old_username
self.new_username = new_username
def validate(self):
"""
a. operation column has a valid value
b. If its a valid operation
i. there should be both old and new location codes
ii. there should be a change in old and new location codes
iii. there should be both old and new usernames or neither
iv. there should be a new parent site code where expected
v. there should be no new parent site code where not expected
"""
if self.operation not in VALID_OPERATIONS:
return [f"Invalid Operation {self.operation}"]
if not self.old_site_code or not self.new_site_code:
return [f"Missing location code for operation {self.operation}. "
f"Got old: '{self.old_site_code}' and new: '{self.new_site_code}'"]
errors = []
if self.old_site_code == self.new_site_code:
errors.append(f"No change in location code for operation {self.operation}. "
f"Got old: '{self.old_site_code}' and new: '{self.new_site_code}'")
if bool(self.new_username) != bool(self.old_username):
errors.append(f"Need both old and new username for {self.operation} operation "
f"on location '{self.old_site_code}'")
if not self.new_location_details.get('name', '').strip():
errors.append(f"Missing new location name for {self.new_site_code}")
if self.expects_parent and not self.new_location_details.get('parent_site_code'):
errors.append(f"Need parent for '{self.new_site_code}'")
if not self.expects_parent and self.new_location_details.get('parent_site_code'):
errors.append(f"Unexpected parent set for '{self.new_site_code}'")
return errors
class Parser(object):
def __init__(self, domain, workbook):
"""
Receives a worksheet generated by custom.icds.location_reassignment.download.Download
and generates an output like
{
location_type_code:
{
'location site code': Transition object
}
}
Find valid transitions and then additionally validates that
a. all old locations should be present in the system
b. if a location is deprecated, all its descendants should get deprecated too
c. new parent assigned should be of the expected location type
"""
self.domain = domain
self.workbook = workbook
# For consolidated validations
# maintain a list of all site codes undergoing a transition
self.transiting_site_codes = set()
# maintain a list of valid site codes to be deprecated i.e all old site codes
self.site_codes_to_be_deprecated = set()
# mapping of expected parent type for a location type
self.location_type_parent = {
lt.code: lt.parent_type.code
for lt in LocationType.objects.select_related('parent_type').filter(domain=self.domain)
if lt.parent_type
}
location_type_codes_in_hierarchy = [lt.code for lt in LocationType.objects.by_domain(self.domain)]
# Details of requested changes
# site codes of new locations getting created
self.new_site_codes_for_location_type = {
location_type_code: set()
for location_type_code in location_type_codes_in_hierarchy
}
# a mapping of all TransitionRows passed for each location type
self.transition_rows = {location_type_code: defaultdict(list)
for location_type_code in location_type_codes_in_hierarchy}
# a list of all normalized usernames passed
self.usernames = set()
# a mapping of all valid transitions found
self.valid_transitions = {location_type_code: []
for location_type_code in location_type_codes_in_hierarchy}
self.errors = []
def parse(self):
for worksheet in self.workbook.worksheets:
location_type_code = worksheet.title
expects_parent = bool(self.location_type_parent.get(location_type_code))
for row in worksheet:
operation = row.get(OPERATION_COLUMN)
if not operation:
continue
transition_row = TransitionRow(
location_type=location_type_code,
operation=operation,
old_site_code=row.get(CURRENT_SITE_CODE_COLUMN),
new_site_code=row.get(NEW_SITE_CODE_COLUMN),
expects_parent=expects_parent,
new_location_details={
'name': row.get(NEW_NAME),
'parent_site_code': row.get(NEW_PARENT_SITE_CODE),
'lgd_code': row.get(NEW_LGD_CODE),
'sub_district_name': row.get(NEW_SUB_DISTRICT_NAME)
},
old_username=row.get(USERNAME_COLUMN),
new_username=row.get(NEW_USERNAME_COLUMN)
)
self._note_transition(transition_row)
self._consolidate()
self.validate()
return self.errors
def _note_transition(self, row):
operation = row.operation
location_type_code = row.location_type
new_site_code = row.new_site_code
old_site_code = row.old_site_code
# only for merge operation final location is used as the reference key
if operation == MERGE_OPERATION:
self.transition_rows[location_type_code][new_site_code].append(row)
else:
self.transition_rows[location_type_code][old_site_code].append(row)
def _consolidate(self):
"""
Consolidate valid TransitionRow requests into valid Transition objects
In case of multiple TransitionRow requests, like in case of merge/split,
combine them into one Transition
"""
for location_type_code, rows_for_site_code in self.transition_rows.items():
for site_code, rows in rows_for_site_code.items():
errors = []
for row in rows:
errors.extend(row.validate())
if errors:
self.errors.extend(errors)
continue
operation = self._valid_unique_operation(site_code, rows)
if not operation:
continue
transition = self._consolidated_transition(location_type_code, operation, rows)
if not transition:
continue
if self._is_valid_transition(transition):
self.site_codes_to_be_deprecated.update(transition.old_site_codes)
self.valid_transitions[location_type_code].append(transition)
for old_username, new_username in transition.user_transitions.items():
if old_username:
self.usernames.add(normalize_username(old_username, self.domain))
if new_username:
self.usernames.add(normalize_username(new_username, self.domain))
# keep note of transition details for consolidated validations
self.transiting_site_codes.update(transition.old_site_codes)
self.transiting_site_codes.update(transition.new_site_codes)
self.new_site_codes_for_location_type[location_type_code].update(transition.new_site_codes)
def _valid_unique_operation(self, site_code, rows):
"""
return unique valid operation for rows
"""
unique_operations = {row.operation for row in rows}
if len(unique_operations) > 1:
self.errors.append(f"Different operations requested for {site_code}: {','.join(unique_operations)}")
return None
operation = rows[0].operation
if len(rows) > 1 and operation not in [MERGE_OPERATION, SPLIT_OPERATION]:
self.errors.append(f"Multiple {operation} rows for {site_code}")
return None
if not len(rows) > 1 and operation in [MERGE_OPERATION, SPLIT_OPERATION]:
self.errors.append(f"Expected multiple rows for {operation} for {site_code}")
return None
return operation
def _consolidated_transition(self, location_type_code, operation, rows):
transition = Transition(domain=self.domain, location_type_code=location_type_code, operation=operation)
for row in rows:
if row.new_site_code in transition.new_location_details:
# new location is passed with different details
if transition.new_location_details[row.new_site_code] != row.new_location_details:
self.errors.append(f"New location {row.new_site_code} passed with different information")
return None
transition.add(
old_site_code=row.old_site_code,
new_site_code=row.new_site_code,
new_location_details=row.new_location_details,
old_username=row.old_username,
new_username=row.new_username
)
return transition
def _is_valid_transition(self, transition):
valid = True
for old_site_code in transition.old_site_codes:
if old_site_code in self.transiting_site_codes:
self.errors.append(f"{old_site_code} participating in multiple transitions")
valid = False
for new_site_code in transition.new_site_codes:
if new_site_code in self.transiting_site_codes:
self.errors.append(f"{new_site_code} participating in multiple transitions")
valid = False
return valid
def validate(self):
if self.site_codes_to_be_deprecated:
self._validate_old_locations()
self._validate_descendants_deprecated()
self._validate_parents()
self._validate_usernames()
def _validate_old_locations(self):
deprecating_locations_site_codes = (
SQLLocation.active_objects
.filter(domain=self.domain, site_code__in=self.site_codes_to_be_deprecated)
.values_list('site_code', flat=True)
)
if len(deprecating_locations_site_codes) != len(self.site_codes_to_be_deprecated):
self.errors.append(f"Found {len(deprecating_locations_site_codes)} locations for "
f"{len(self.site_codes_to_be_deprecated)} deprecating site codes")
missing_site_codes = set(self.site_codes_to_be_deprecated) - set(deprecating_locations_site_codes)
if missing_site_codes:
self.errors.append(f"Could not find old locations with site codes {','.join(missing_site_codes)}")
def _validate_descendants_deprecated(self):
"""
ensure all locations getting deprecated, also have their descendants getting deprecated
except for extract operation ensure at least one descendant getting deprecated
"""
site_codes_to_be_deprecated = set(self.site_codes_to_be_deprecated)
locations_to_be_deprecated_by_site_code = {
loc.site_code: loc
for loc in SQLLocation.active_objects.filter(
domain=self.domain, site_code__in=self.site_codes_to_be_deprecated)
}
for transitions in self.valid_transitions.values():
for transition in transitions:
operation = transition.operation
for old_site_code in transition.old_site_codes:
location = locations_to_be_deprecated_by_site_code.get(old_site_code)
# using an archived location's site code is already caught,
# but still adding another error for edge cases
if not location:
self.errors.append(f"Could not find old location with site code {old_site_code}")
continue
descendants_sites_codes = location.child_locations().values_list('site_code', flat=True)
if operation == EXTRACT_OPERATION:
if not set(descendants_sites_codes) & site_codes_to_be_deprecated:
self.errors.append(
f"Location {location.site_code} is getting deprecated via {operation} "
f"but none of its descendants")
else:
missing_site_codes = set(descendants_sites_codes) - site_codes_to_be_deprecated
if missing_site_codes:
self.errors.append(
f"Location {location.site_code} is getting deprecated via {operation} "
f"but the following descendants are not {', '.join(missing_site_codes)}")
def _validate_parents(self):
"""
validate new parent set respects the hierarchy
if the parent location is already present in the system, validate it's location type and that its not
getting archived
else check for parent location in new locations to be created for the expected parent location type
else add error for missing parent
"""
for location_type_code in self.valid_transitions:
expected_parent_type = self.location_type_parent.get(location_type_code)
if not expected_parent_type:
continue
new_parent_site_codes = self._get_new_parent_site_codes(location_type_code)
if not new_parent_site_codes:
continue
existing_new_parents = {
loc.site_code: loc for loc in
SQLLocation.active_objects.select_related('location_type')
.filter(domain=self.domain, site_code__in=new_parent_site_codes)
}
for transition in self.valid_transitions[location_type_code]:
for new_site_code, new_location_details in transition.new_location_details.items():
parent_site_code = new_location_details['parent_site_code']
if parent_site_code in existing_new_parents:
if existing_new_parents[parent_site_code].location_type.code != expected_parent_type:
self.errors.append(f"Unexpected parent {parent_site_code} "
f"for type {location_type_code}")
elif parent_site_code not in self.new_site_codes_for_location_type[expected_parent_type]:
self.errors.append(f"Unexpected parent {parent_site_code} for type {location_type_code}")
if parent_site_code in self.site_codes_to_be_deprecated:
self.errors.append(f"Parent {parent_site_code} is marked for archival")
def _get_new_parent_site_codes(self, location_type_code):
parent_site_codes = set()
for transition in self.valid_transitions[location_type_code]:
for new_site_code, new_location_details in transition.new_location_details.items():
if new_location_details['parent_site_code']:
parent_site_codes.add(new_location_details['parent_site_code'])
return parent_site_codes
def _validate_usernames(self):
keys = [["active", self.domain, "CommCareUser", username] for username in self.usernames]
result = CommCareUser.get_db().view(
'users/by_domain',
keys=keys,
reduce=False,
include_docs=False
).all()
if len(result) != len(self.usernames):
usernames_found = set([r['key'][-1] for r in result])
usernames_missing = set(self.usernames) - usernames_found
self.errors.append(f"Could not find user(s): {', '.join(usernames_missing)}")
def valid_transitions_json(self, for_location_type=None):
# return valid transitions as json
json_response = {}
for location_type, transitions in self.valid_transitions.items():
if for_location_type and for_location_type != location_type:
continue
json_response[location_type] = [attr.asdict(transition) for transition in transitions]
return json_response
class HouseholdReassignmentParser(object):
def __init__(self, domain, workbook):
self.domain = domain
self.workbook = workbook
self.reassignments = {} # household id mapped to a dict with old_site_code and new_site_code
def parse(self):
errors = []
for worksheet in self.workbook.worksheets:
location_site_code = worksheet.title
for row in worksheet:
household_id = row.get(HOUSEHOLD_ID_COLUMN)
new_awc_code = row.get(AWC_CODE_COLUMN)
if not household_id:
errors.append("Missing Household ID for %s" % location_site_code)
continue
if not new_awc_code:
errors.append("Missing New AWC Code for household ID %s" % household_id)
continue
self.reassignments[household_id] = {
'old_site_code': location_site_code,
'new_site_code': new_awc_code
}
return errors
| 48.489744 | 113 | 0.63397 |
e65742b69f2e9c4b2144e071dd7dadf16fc9c5b2 | 6,201 | py | Python | Lib/test/test_sunau.py | eendebakpt/cpython | 474fdbe9e4a2ff90ef39e8748da644c86a200981 | [
"0BSD"
] | 1 | 2020-09-28T16:41:16.000Z | 2020-09-28T16:41:16.000Z | Lib/test/test_sunau.py | eendebakpt/cpython | 474fdbe9e4a2ff90ef39e8748da644c86a200981 | [
"0BSD"
] | 4 | 2021-12-01T11:57:28.000Z | 2022-03-01T20:05:21.000Z | Lib/test/test_sunau.py | eendebakpt/cpython | 474fdbe9e4a2ff90ef39e8748da644c86a200981 | [
"0BSD"
] | null | null | null | import unittest
from test import audiotests
import io
import struct
import sys
import sunau
from test.support import warnings_helper
audioop = warnings_helper.import_deprecated("audioop")
class SunauTest(audiotests.AudioWriteTests,
audiotests.AudioTestsWithSourceFile):
module = sunau
class SunauPCM8Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm8.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 1
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
02FF 4B00 3104 8008 CB06 4803 BF01 03FE B8FA B4F3 29EB 1AE6 \
EDE4 C6E2 0EE0 EFE0 57E2 FBE8 13EF D8F7 97FB F5FC 08FB DFFB \
11FA 3EFB BCFC 66FF CF04 4309 C10E 5112 EE17 8216 7F14 8012 \
490E 520D EF0F CE0F E40C 630A 080A 2B0B 510E 8B11 B60E 440A \
""")
class SunauPCM16Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm16.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022EFFEA 4B5C00F9 311404EF 80DB0844 CBE006B0 48AB03F3 BFE601B5 0367FE80 \
B853FA42 B4AFF351 2997EBCD 1A5AE6DC EDF9E492 C627E277 0E06E0B7 EF29E029 \
5759E271 FB34E83F 1377EF85 D82CF727 978EFB79 F5F7FC12 0864FB9E DF30FB40 \
1183FA30 3EEAFB59 BC78FCB4 66D5FF60 CF130415 431A097D C1BA0EC7 512312A0 \
EEE11754 82071666 7FFE1448 80001298 49990EB7 52B40DC1 EFAD0F65 CE3A0FBE \
E4B70CE6 63490A57 08CC0A1D 2BBC0B09 51480E46 8BCB113C B6F60EE9 44150A5A \
""")
class SunauPCM24Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm24.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 3
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
""")
class SunauPCM32Test(SunauTest, unittest.TestCase):
sndfilename = 'pluck-pcm32.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 4
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
""")
class SunauULAWTest(SunauTest, unittest.TestCase):
sndfilename = 'pluck-ulaw.au'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'ULAW'
compname = 'CCITT G.711 u-law'
frames = bytes.fromhex("""\
022CFFE8 497C00F4 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \
B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \
557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \
11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C09BC C1840EBC 517C12FC \
EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \
E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \
""")
if sys.byteorder != 'big':
frames = audioop.byteswap(frames, 2)
class SunauLowLevelTest(unittest.TestCase):
def test_read_bad_magic_number(self):
b = b'SPA'
with self.assertRaises(EOFError):
sunau.open(io.BytesIO(b))
b = b'SPAM'
with self.assertRaisesRegex(sunau.Error, 'bad magic number'):
sunau.open(io.BytesIO(b))
def test_read_too_small_header(self):
b = struct.pack('>LLLLL', sunau.AUDIO_FILE_MAGIC, 20, 0,
sunau.AUDIO_FILE_ENCODING_LINEAR_8, 11025)
with self.assertRaisesRegex(sunau.Error, 'header size too small'):
sunau.open(io.BytesIO(b))
def test_read_too_large_header(self):
b = struct.pack('>LLLLLL', sunau.AUDIO_FILE_MAGIC, 124, 0,
sunau.AUDIO_FILE_ENCODING_LINEAR_8, 11025, 1)
b += b'\0' * 100
with self.assertRaisesRegex(sunau.Error, 'header size ridiculously large'):
sunau.open(io.BytesIO(b))
def test_read_wrong_encoding(self):
b = struct.pack('>LLLLLL', sunau.AUDIO_FILE_MAGIC, 24, 0, 0, 11025, 1)
with self.assertRaisesRegex(sunau.Error, r'encoding not \(yet\) supported'):
sunau.open(io.BytesIO(b))
def test_read_wrong_number_of_channels(self):
b = struct.pack('>LLLLLL', sunau.AUDIO_FILE_MAGIC, 24, 0,
sunau.AUDIO_FILE_ENCODING_LINEAR_8, 11025, 0)
with self.assertRaisesRegex(sunau.Error, 'bad # of channels'):
sunau.open(io.BytesIO(b))
if __name__ == "__main__":
unittest.main()
| 38.515528 | 84 | 0.716014 |
1c5d47d4b45e224bdf69d36de42628cbda889678 | 1,193 | py | Python | src/models/amanatsu.py | HhotateA/quiche_pantie_patch | f50c4fd69bd43cccaeb38f026d486e3ccc3850d8 | [
"CC-BY-4.0"
] | 73 | 2019-01-26T02:57:24.000Z | 2022-02-15T08:45:11.000Z | src/models/amanatsu.py | HhotateA/quiche_pantie_patch | f50c4fd69bd43cccaeb38f026d486e3ccc3850d8 | [
"CC-BY-4.0"
] | 9 | 2019-04-09T10:53:41.000Z | 2020-09-11T13:18:26.000Z | src/models/amanatsu.py | HhotateA/quiche_pantie_patch | f50c4fd69bd43cccaeb38f026d486e3ccc3850d8 | [
"CC-BY-4.0"
] | 15 | 2019-04-07T11:28:57.000Z | 2022-03-29T04:35:48.000Z | import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image, ImageOps
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_amanatsu.png', **options):
super().__init__(name='あまなつ', body=body, pantie_position=[402, 835], **options)
self.mask = io.imread('./mask/mask_amanatsu.png')
def convert(self, image):
pantie = np.array(image)
patch = np.copy(pantie[-110:-5, 546:, :])
pantie[-110:, 546:, :] = 0
[pr, pc, d] = patch.shape
pantie[105:105 + pr, :pc, :] = patch[::-1, ::-1]
arrx = np.zeros(100) - 40
arrx += np.cos(np.linspace(0, np.pi, 100)) * -30
arry = np.linspace(0, 1, 100)**2 * 10
pantie = affine_transform_by_arr(pantie, arrx, arry)[:, 7:]
pantie = perspective_transform(pantie, np.matrix('1, 0.01, 0; 0, 1, 0; -0.0008,0,1'))
pantie = np.uint8(resize(pantie[:300, :415], [1.5, 1.5]) * 255)
pantie = np.bitwise_and(pantie, self.mask)
pantie = np.concatenate([pantie[:, ::-1], pantie], axis=1)
return Image.fromarray(pantie)
| 39.766667 | 93 | 0.605197 |
5703618fb44a58596cd1f1bebc96cf10fb705c6f | 2,179 | py | Python | isitopen/config/environment.py | oki-archive/isitopen | 828288ed4ede395aceeb96cd798eb028431dde06 | [
"OML"
] | 1 | 2018-08-01T07:10:24.000Z | 2018-08-01T07:10:24.000Z | isitopen/config/environment.py | oki-archive/isitopen | 828288ed4ede395aceeb96cd798eb028431dde06 | [
"OML"
] | null | null | null | isitopen/config/environment.py | oki-archive/isitopen | 828288ed4ede395aceeb96cd798eb028431dde06 | [
"OML"
] | null | null | null | """Pylons environment configuration"""
import os
import pylons
from pylons import config
import isitopen.lib.app_globals as app_globals
import isitopen.lib.helpers
from isitopen.config.routing import make_map
from pylons.i18n.translation import ugettext
from genshi.template import TemplateLoader
from genshi.filters.i18n import Translator
from sqlalchemy import engine_from_config
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'public'),
templates=[os.path.join(root, 'templates')])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package='isitopen',
template_engine='genshi', paths=paths)
config['routes.map'] = make_map()
config['pylons.g'] = app_globals.Globals()
config['pylons.h'] = isitopen.lib.helpers
# Customize templating options via this variable
tmpl_options = config['buffet.template_options']
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
engine = engine_from_config(config, 'sqlalchemy.')
config['pylons.g'].sa_engine = engine
# Translator (i18n)
translator = Translator(ugettext)
def template_loaded(template):
template.filters.insert(0, translator)
#translator.setup(template)
# redo template setup to use genshi.search_path
# This requires path notation in calls to render rather than dotted notation
# e.g. render('index.html') not render('index') etc
genshi = config['buffet.template_engines'].pop()
# set None for template_root as not using dotted (python package) notation
config.add_template_engine('genshi', None)
tmpl_options = config['buffet.template_options']
tmpl_options['genshi.search_path'] = paths['templates'][0]
tmpl_options["genshi.loader_callback"] = template_loaded
| 35.721311 | 80 | 0.709959 |
61d1a1b27250aa99b998f4c46c09fbbaf24c93c2 | 629 | py | Python | AtCoder/BeginnerContest134/myans/c.py | scnsh/CompetitiveProgramming | 3f08b4719ffe9511cba6d1ba7909f1899def702b | [
"MIT"
] | 1 | 2019-08-04T23:40:18.000Z | 2019-08-04T23:40:18.000Z | AtCoder/BeginnerContest134/myans/c.py | scnsh/CompetitiveProgramming | 3f08b4719ffe9511cba6d1ba7909f1899def702b | [
"MIT"
] | null | null | null | AtCoder/BeginnerContest134/myans/c.py | scnsh/CompetitiveProgramming | 3f08b4719ffe9511cba6d1ba7909f1899def702b | [
"MIT"
] | null | null | null | N = int(input())
A = MAX = [0]*N
max_1 = max_2 = index = 0
for i in range(N):
A[i] = int(input())
if A[i] >= max_1:
max_2 = max_1
max_1 = A[i]
elif A[i] > max_2:
max_2 = A[i]
for i in range(N):
if A[i] == max_1:
print(max_2)
else:
print(max_1)
# if max_1 > A[i] or max_1 == A[i] and index != i:
# if A[i] > max_2:
# max_2 = A[i]
# MAX[i] = max_1
# else:
# index = i
# max_2 = max_1
# max_1 = A[i]
# for i in range(N)[::-1]:
# if max_1 > A[i]:
# MAX[i] = max_1
# elif max_1 == A[i] and index != i:
# MAX[i] = max_2
# print(MAX)
| 20.966667 | 52 | 0.459459 |
679f37615e16490bbdf73fe1a85bae9386b6cfab | 822 | py | Python | python/Black Hat Python/Chapter 2/listing-1-3.py | andrewguest/code-snippets | cb4bb8c2cf651cd86e3280348e4a4cfb88ad0127 | [
"MIT"
] | 1 | 2020-09-13T01:40:19.000Z | 2020-09-13T01:40:19.000Z | python/Black Hat Python/Chapter 2/listing-1-3.py | andrewguest/code-snippets | cb4bb8c2cf651cd86e3280348e4a4cfb88ad0127 | [
"MIT"
] | 6 | 2020-11-16T04:11:37.000Z | 2021-07-05T01:39:58.000Z | python/Black Hat Python/Chapter 2/listing-1-3.py | andrewguest/code-snippets | cb4bb8c2cf651cd86e3280348e4a4cfb88ad0127 | [
"MIT"
] | null | null | null | import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print(f'[*] Listening on {bind_ip}:{bind_port}')
# this is our client handling thread
def handle_client(client_socket):
# just print out what the client sends
request = client_socket.recv(1024)
print(f"[*] Received: {request}")
# send back a packet
client_socket.send("ACK!")
print(client_socket.getpeername())
client_socket.close()
while True:
client,addr = server.accept()
print(f"[*] Accepted connection from: {addr[0]}:{addr[1]}")
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
| 21.631579 | 74 | 0.701946 |
0a73414f02b77e853d550083554ca0ee1a859fb6 | 818 | py | Python | src/rmlab_http_client/__init__.py | antonrv/rmlab-py-http-client | 2d7041a13c659fa98ac1a2575b930c537c9468cf | [
"MIT"
] | null | null | null | src/rmlab_http_client/__init__.py | antonrv/rmlab-py-http-client | 2d7041a13c659fa98ac1a2575b930c537c9468cf | [
"MIT"
] | 1 | 2022-03-07T11:16:16.000Z | 2022-03-23T15:52:21.000Z | src/rmlab_http_client/__init__.py | antonrv/rmlab-py-http-client | 2d7041a13c659fa98ac1a2575b930c537c9468cf | [
"MIT"
] | null | null | null | from rmlab_http_client.types import (
MethodType,
FileExtensionType,
PayloadType,
AuthType,
ResponseType,
CommunicationType,
FileType,
PayloadArguments,
Endpoint,
AsyncEndpoint,
DataRequestContext,
DataRequestContextMultipart,
)
from rmlab_http_client.cache import Cache
from rmlab_http_client.client.sync_ctx import HTTPClientJWTExpirable, SyncClient
from rmlab_http_client.client.async_ctx import AsyncClient
__all__ = [
"MethodType",
"FileExtensionType",
"PayloadType",
"AuthType",
"ResponseType",
"CommunicationType",
"FileType",
"PayloadArguments",
"Endpoint",
"AsyncEndpoint",
"DataRequestContext",
"DataRequestContextMultipart",
"HTTPClientJWTExpirable",
"SyncClient",
"AsyncClient",
"Cache",
]
| 20.45 | 80 | 0.709046 |
396ba8f3492787b1bc31a17aa5ce55bdb748bcfa | 4,064 | py | Python | DeepBach/helpers.py | asdfang/CC-AugmentedDeepBach | 653ccceca1308e0d5737e9872ac700920f981624 | [
"MIT"
] | null | null | null | DeepBach/helpers.py | asdfang/CC-AugmentedDeepBach | 653ccceca1308e0d5737e9872ac700920f981624 | [
"MIT"
] | null | null | null | DeepBach/helpers.py | asdfang/CC-AugmentedDeepBach | 653ccceca1308e0d5737e9872ac700920f981624 | [
"MIT"
] | null | null | null | """
@author: Gaetan Hadjeres
"""
import torch
from torch.autograd import Variable
import re, os
import pickle
import numpy as np
def cuda_variable(tensor, volatile=False):
if torch.cuda.is_available():
return Variable(tensor.cuda(), volatile=volatile)
else:
return Variable(tensor, volatile=volatile)
def to_numpy(variable: Variable):
if torch.cuda.is_available():
return variable.data.cpu().numpy()
else:
return variable.data.numpy()
def init_hidden(num_layers, batch_size, lstm_hidden_size, volatile=False):
hidden = (
cuda_variable(
torch.randn(num_layers, batch_size, lstm_hidden_size), volatile=volatile),
cuda_variable(
torch.randn(num_layers, batch_size, lstm_hidden_size), volatile=volatile)
)
return hidden
def non_decreasing(list):
return all(x <= y for x, y in zip(list, list[1:]))
def read_train_log(model_id):
"""
used for debugging, read log file rather than re-training to record values
"""
with open(f'logs/{model_id}_train_log.txt', 'r') as fin:
rest = fin.read().replace('\n', '')
for i in range(4):
splits = rest.split(f'Training voice model {i+1}')
if len(splits) == 2:
curr_epoch, rest = splits
else:
curr_epoch = splits[0]
train_loss_matches = re.findall("Training loss: (\d*\.\d*)", curr_epoch)
train_loss = [float(l) for l in train_loss_matches]
train_acc_matches = re.findall("Training accuracy: (\d*\.\d*)", curr_epoch)
train_acc = [float(l) for l in train_acc_matches]
val_loss_matches = re.findall("Validation loss: (\d*\.\d*)", curr_epoch)
val_loss = [float(l) for l in val_loss_matches]
val_acc_matches = re.findall("Validation accuracy: (\d*\.\d*)", curr_epoch)
val_acc = [float(l) for l in val_acc_matches]
loss_over_epochs = {'training': train_loss, 'validation': val_loss}
acc_over_epochs = {'training': train_acc, 'validation': val_acc}
return loss_over_epochs, acc_over_epochs
def ensure_dir(directory):
"""
create directory if it does not already exist
"""
if not os.path.exists(directory):
os.makedirs(directory)
def load_or_pickle_distributions(dataset):
distributions_file = 'grader/pickles/bach_distributions.txt'
error_note_ratio_file = 'grader/pickles/error_note_ratio.txt'
parallel_error_note_ratio_file = 'grader/pickles/parallel_error_note_ratio.txt'
gaussian_file = 'grader/pickles/gaussian.txt'
if os.path.exists(distributions_file) and os.path.exists(error_note_ratio_file) and os.path.exists(
parallel_error_note_ratio_file) and os.path.exists(gaussian_file):
print('Loading Bach chorale distributions')
with open(distributions_file, 'rb') as fin:
dataset.distributions = pickle.load(fin)
with open(error_note_ratio_file, 'rb') as fin:
dataset.error_note_ratio = pickle.load(fin)
with open(parallel_error_note_ratio_file, 'rb') as fin:
dataset.parallel_error_note_ratio = pickle.load(fin)
with open(gaussian_file, 'rb') as fin:
dataset.gaussian = pickle.load(fin)
else:
dataset.calculate_distributions()
with open(distributions_file, 'wb') as fo:
pickle.dump(dataset.distributions, fo)
with open(error_note_ratio_file, 'wb') as fo:
pickle.dump(dataset.error_note_ratio, fo)
with open(parallel_error_note_ratio_file, 'wb') as fo:
pickle.dump(dataset.parallel_error_note_ratio, fo)
with open(gaussian_file, 'wb') as fo:
pickle.dump(dataset.gaussian, fo)
def get_threshold(data_file=None, col=-1):
thres = np.NINF # minimum score seen so far
with open(data_file, 'r') as fin:
next(fin)
for row in fin:
s = float(row.split(',')[col])
if s > thres:
thres = s
return thres | 36.285714 | 103 | 0.64936 |
88a15390560b5d82a326a6bd0caea06d3c11e44f | 19,264 | py | Python | yt/fields/field_info_container.py | tukss/yt | 8bf6fce609cad3d4b291ebd94667019ab2e18377 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/fields/field_info_container.py | tukss/yt | 8bf6fce609cad3d4b291ebd94667019ab2e18377 | [
"BSD-3-Clause-Clear"
] | 8 | 2020-04-02T16:51:49.000Z | 2022-01-11T14:12:44.000Z | yt/fields/field_info_container.py | tukss/yt | 8bf6fce609cad3d4b291ebd94667019ab2e18377 | [
"BSD-3-Clause-Clear"
] | 2 | 2020-08-12T15:46:11.000Z | 2021-02-09T13:09:17.000Z | from numbers import Number as numeric_type
import numpy as np
from yt.funcs import issue_deprecation_warning, mylog, only_on_root
from yt.geometry.geometry_handler import is_curvilinear
from yt.units.dimensions import dimensionless
from yt.units.unit_object import Unit
from yt.utilities.exceptions import YTFieldNotFound
from .derived_field import DerivedField, NullFunc, TranslationFunc
from .field_plugin_registry import field_plugins
from .particle_fields import (
add_union_field,
particle_deposition_functions,
particle_scalar_functions,
particle_vector_functions,
sph_whitelist_fields,
standard_particle_fields,
)
def tupleize(inp):
if isinstance(inp, tuple):
return inp
# prepending with a '?' ensures that the sort order is the same in py2 and
# py3, since names of field types shouldn't begin with punctuation
return (
"?",
inp,
)
class FieldInfoContainer(dict):
"""
This is a generic field container. It contains a list of potential derived
fields, all of which know how to act on a data object and return a value.
This object handles converting units as well as validating the availability
of a given field.
"""
fallback = None
known_other_fields = ()
known_particle_fields = ()
extra_union_fields = ()
def __init__(self, ds, field_list, slice_info=None):
self._show_field_errors = []
self.ds = ds
# Now we start setting things up.
self.field_list = field_list
self.slice_info = slice_info
self.field_aliases = {}
self.species_names = []
if ds is not None and is_curvilinear(ds.geometry):
self.curvilinear = True
else:
self.curvilinear = False
self.setup_fluid_aliases()
def setup_fluid_fields(self):
pass
def setup_fluid_index_fields(self):
# Now we get all our index types and set up aliases to them
if self.ds is None:
return
index_fields = set([f for _, f in self if _ == "index"])
for ftype in self.ds.fluid_types:
if ftype in ("index", "deposit"):
continue
for f in index_fields:
if (ftype, f) in self:
continue
self.alias((ftype, f), ("index", f))
def setup_particle_fields(self, ptype, ftype="gas", num_neighbors=64):
skip_output_units = ("code_length",)
for f, (units, aliases, dn) in sorted(self.known_particle_fields):
units = self.ds.field_units.get((ptype, f), units)
output_units = units
if (
f in aliases or ptype not in self.ds.particle_types_raw
) and units not in skip_output_units:
u = Unit(units, registry=self.ds.unit_registry)
if u.dimensions is not dimensionless:
output_units = str(self.ds.unit_system[u.dimensions])
if (ptype, f) not in self.field_list:
continue
self.add_output_field(
(ptype, f),
sampling_type="particle",
units=units,
display_name=dn,
output_units=output_units,
)
for alias in aliases:
self.alias((ptype, alias), (ptype, f), units=output_units)
# We'll either have particle_position or particle_position_[xyz]
if (ptype, "particle_position") in self.field_list or (
ptype,
"particle_position",
) in self.field_aliases:
particle_scalar_functions(
ptype, "particle_position", "particle_velocity", self
)
else:
# We need to check to make sure that there's a "known field" that
# overlaps with one of the vector fields. For instance, if we are
# in the Stream frontend, and we have a set of scalar position
# fields, they will overlap with -- and be overridden by -- the
# "known" vector field that the frontend creates. So the easiest
# thing to do is to simply remove the on-disk field (which doesn't
# exist) and replace it with a derived field.
if (ptype, "particle_position") in self and self[
ptype, "particle_position"
]._function == NullFunc:
self.pop((ptype, "particle_position"))
particle_vector_functions(
ptype,
[f"particle_position_{ax}" for ax in "xyz"],
[f"particle_velocity_{ax}" for ax in "xyz"],
self,
)
particle_deposition_functions(ptype, "particle_position", "particle_mass", self)
standard_particle_fields(self, ptype)
# Now we check for any leftover particle fields
for field in sorted(self.field_list):
if field in self:
continue
if not isinstance(field, tuple):
raise RuntimeError
if field[0] not in self.ds.particle_types:
continue
self.add_output_field(
field,
sampling_type="particle",
units=self.ds.field_units.get(field, ""),
)
self.setup_smoothed_fields(ptype, num_neighbors=num_neighbors, ftype=ftype)
def setup_extra_union_fields(self, ptype="all"):
if ptype != "all":
raise RuntimeError(
"setup_extra_union_fields is currently"
+ 'only enabled for particle type "all".'
)
for units, field in self.extra_union_fields:
add_union_field(self, ptype, field, units)
def setup_smoothed_fields(self, ptype, num_neighbors=64, ftype="gas"):
# We can in principle compute this, but it is not yet implemented.
if (ptype, "density") not in self or not hasattr(self.ds, "_sph_ptypes"):
return
new_aliases = []
for ptype2, alias_name in list(self):
if ptype2 != ptype:
continue
if alias_name not in sph_whitelist_fields:
if alias_name.startswith("particle_"):
pass
else:
continue
uni_alias_name = alias_name
if "particle_position_" in alias_name:
uni_alias_name = alias_name.replace("particle_position_", "")
elif "particle_" in alias_name:
uni_alias_name = alias_name.replace("particle_", "")
new_aliases.append(((ftype, uni_alias_name), (ptype, alias_name),))
new_aliases.append(((ptype, uni_alias_name), (ptype, alias_name),))
for alias, source in new_aliases:
self.alias(alias, source)
# Collect the names for all aliases if geometry is curvilinear
def get_aliases_gallery(self):
aliases_gallery = []
known_other_fields = dict(self.known_other_fields)
if self.curvilinear:
for field in sorted(self.field_list):
if field[0] in self.ds.particle_types:
continue
args = known_other_fields.get(field[1], ("", [], None))
units, aliases, display_name = args
for alias in aliases:
aliases_gallery.append(alias)
return aliases_gallery
def setup_fluid_aliases(self, ftype="gas"):
known_other_fields = dict(self.known_other_fields)
# For non-Cartesian geometry, convert alias of vector fields to
# curvilinear coordinates
aliases_gallery = self.get_aliases_gallery()
for field in sorted(self.field_list):
if not isinstance(field, tuple):
raise RuntimeError
if field[0] in self.ds.particle_types:
continue
args = known_other_fields.get(field[1], ("", [], None))
units, aliases, display_name = args
# We allow field_units to override this. First we check if the
# field *name* is in there, then the field *tuple*.
units = self.ds.field_units.get(field[1], units)
units = self.ds.field_units.get(field, units)
if not isinstance(units, str) and args[0] != "":
units = f"(({args[0]})*{units})"
if (
isinstance(units, (numeric_type, np.number, np.ndarray))
and args[0] == ""
and units != 1.0
):
mylog.warning(
"Cannot interpret units: %s * %s, setting to dimensionless.",
units,
args[0],
)
units = ""
elif units == 1.0:
units = ""
self.add_output_field(
field, sampling_type="cell", units=units, display_name=display_name
)
axis_names = self.ds.coordinates.axis_order
for alias in aliases:
if (
self.curvilinear
): # For non-Cartesian geometry, convert vector aliases
if alias[-2:] not in ["_x", "_y", "_z"]:
to_convert = False
else:
for suffix in ["x", "y", "z"]:
if f"{alias[:-2]}_{suffix}" not in aliases_gallery:
to_convert = False
break
to_convert = True
if to_convert:
if alias[-2:] == "_x":
alias = f"{alias[:-2]}_{axis_names[0]}"
elif alias[-2:] == "_y":
alias = f"{alias[:-2]}_{axis_names[1]}"
elif alias[-2:] == "_z":
alias = f"{alias[:-2]}_{axis_names[2]}"
self.alias((ftype, alias), field)
@staticmethod
def _sanitize_sampling_type(sampling_type, particle_type=None):
"""Detect conflicts between deprecated and new parameters to specify the
sampling type in a new field.
This is a helper function to add_field methods.
Parameters
----------
sampling_type: str
One of "cell", "particle" or "local" (case insensitive)
particle_type: str
This is a deprecated argument of the add_field method,
which was replaced by sampling_type.
Raises
------
ValueError
For unsupported values in sampling_type
RuntimeError
If conflicting parameters are passed.
"""
try:
sampling_type = sampling_type.lower()
except AttributeError as e:
raise TypeError("sampling_type should be a string.") from e
acceptable_samplings = ("cell", "particle", "local")
if sampling_type not in acceptable_samplings:
raise ValueError(
"Invalid sampling type %s. Valid sampling types are %s",
sampling_type,
", ".join(acceptable_samplings),
)
if particle_type:
issue_deprecation_warning(
"'particle_type' keyword argument is deprecated in favour "
"of the positional argument 'sampling_type'."
)
if sampling_type != "particle":
raise RuntimeError(
"Conflicting values for parameters "
"'sampling_type' and 'particle_type'."
)
return sampling_type
def add_field(self, name, function, sampling_type, **kwargs):
"""
Add a new field, along with supplemental metadata, to the list of
available fields. This respects a number of arguments, all of which
are passed on to the constructor for
:class:`~yt.data_objects.api.DerivedField`.
Parameters
----------
name : str
is the name of the field.
function : callable
A function handle that defines the field. Should accept
arguments (field, data)
sampling_type: str
"cell" or "particle" or "local"
units : str
A plain text string encoding the unit. Powers must be in
python syntax (** instead of ^). If set to "auto" the units
will be inferred from the return value of the field function.
take_log : bool
Describes whether the field should be logged
validators : list
A list of :class:`FieldValidator` objects
vector_field : bool
Describes the dimensionality of the field. Currently unused.
display_name : str
A name used in the plots
"""
override = kwargs.pop("force_override", False)
# Handle the case where the field has already been added.
if not override and name in self:
# See below.
if function is None:
def create_function(f):
return f
return create_function
return
# add_field can be used in two different ways: it can be called
# directly, or used as a decorator (as yt.derived_field). If called directly,
# the function will be passed in as an argument, and we simply create
# the derived field and exit. If used as a decorator, function will
# be None. In that case, we return a function that will be applied
# to the function that the decorator is applied to.
kwargs.setdefault("ds", self.ds)
if function is None:
def create_function(f):
self[name] = DerivedField(name, sampling_type, f, **kwargs)
return f
return create_function
if isinstance(name, tuple):
self[name] = DerivedField(name, sampling_type, function, **kwargs)
return
sampling_type = self._sanitize_sampling_type(
sampling_type, particle_type=kwargs.get("particle_type")
)
if sampling_type == "particle":
ftype = "all"
else:
ftype = self.ds.default_fluid_type
if (ftype, name) not in self:
tuple_name = (ftype, name)
self[tuple_name] = DerivedField(
tuple_name, sampling_type, function, **kwargs
)
self.alias(name, tuple_name)
else:
self[name] = DerivedField(name, sampling_type, function, **kwargs)
def load_all_plugins(self, ftype="gas"):
loaded = []
for n in sorted(field_plugins):
loaded += self.load_plugin(n, ftype)
only_on_root(mylog.debug, "Loaded %s (%s new fields)", n, len(loaded))
self.find_dependencies(loaded)
def load_plugin(self, plugin_name, ftype="gas", skip_check=False):
if callable(plugin_name):
f = plugin_name
else:
f = field_plugins[plugin_name]
orig = set(self.items())
f(self, ftype, slice_info=self.slice_info)
loaded = [n for n, v in set(self.items()).difference(orig)]
return loaded
def find_dependencies(self, loaded):
deps, unavailable = self.check_derived_fields(loaded)
self.ds.field_dependencies.update(deps)
# Note we may have duplicated
dfl = set(self.ds.derived_field_list).union(deps.keys())
self.ds.derived_field_list = list(sorted(dfl, key=tupleize))
return loaded, unavailable
def add_output_field(self, name, sampling_type, **kwargs):
kwargs.setdefault("ds", self.ds)
self[name] = DerivedField(name, sampling_type, NullFunc, **kwargs)
def alias(self, alias_name, original_name, units=None):
if original_name not in self:
return
if units is None:
# We default to CGS here, but in principle, this can be pluggable
# as well.
u = Unit(self[original_name].units, registry=self.ds.unit_registry)
if u.dimensions is not dimensionless:
units = str(self.ds.unit_system[u.dimensions])
else:
units = self[original_name].units
self.field_aliases[alias_name] = original_name
self.add_field(
alias_name,
function=TranslationFunc(original_name),
sampling_type=self[original_name].sampling_type,
display_name=self[original_name].display_name,
units=units,
)
def has_key(self, key):
# This gets used a lot
if key in self:
return True
if self.fallback is None:
return False
return key in self.fallback
def __missing__(self, key):
if self.fallback is None:
raise KeyError(f"No field named {key}")
return self.fallback[key]
@classmethod
def create_with_fallback(cls, fallback, name=""):
obj = cls()
obj.fallback = fallback
obj.name = name
return obj
def __contains__(self, key):
if dict.__contains__(self, key):
return True
if self.fallback is None:
return False
return key in self.fallback
def __iter__(self):
for f in dict.__iter__(self):
yield f
if self.fallback is not None:
for f in self.fallback:
yield f
def keys(self):
keys = dict.keys(self)
if self.fallback:
keys += list(self.fallback.keys())
return keys
def check_derived_fields(self, fields_to_check=None):
deps = {}
unavailable = []
fields_to_check = fields_to_check or list(self.keys())
for field in fields_to_check:
fi = self[field]
try:
fd = fi.get_dependencies(ds=self.ds)
except (NotImplementedError, Exception) as e: # noqa: B014
if field in self._show_field_errors:
raise
if not isinstance(e, YTFieldNotFound):
# if we're doing field tests, raise an error
# see yt.fields.tests.test_fields
if hasattr(self.ds, "_field_test_dataset"):
raise
mylog.debug(
"Raises %s during field %s detection.", str(type(e)), field
)
self.pop(field)
continue
# This next bit checks that we can't somehow generate everything.
# We also manually update the 'requested' attribute
missing = not all(f in self.field_list for f in fd.requested)
if missing:
self.pop(field)
unavailable.append(field)
continue
fd.requested = set(fd.requested)
deps[field] = fd
mylog.debug("Succeeded with %s (needs %s)", field, fd.requested)
dfl = set(self.ds.derived_field_list).union(deps.keys())
self.ds.derived_field_list = list(sorted(dfl, key=tupleize))
return deps, unavailable
| 38.60521 | 88 | 0.569093 |
d51f32ffa57c1fadcc3e29d3c4f09f0bcd889402 | 11,590 | py | Python | lib/networks/network.py | juhuyan/Faster_rcnn_TF | 9e26a56c7f0996301d02f072ce015fe2e6dbd301 | [
"MIT"
] | 2,794 | 2016-09-12T15:21:27.000Z | 2022-03-29T14:22:13.000Z | lib/networks/network.py | juhuyan/Faster_rcnn_TF | 9e26a56c7f0996301d02f072ce015fe2e6dbd301 | [
"MIT"
] | 323 | 2016-09-20T05:46:12.000Z | 2021-12-16T10:38:43.000Z | lib/networks/network.py | juhuyan/Faster_rcnn_TF | 9e26a56c7f0996301d02f072ce015fe2e6dbd301 | [
"MIT"
] | 1,343 | 2016-09-08T01:27:08.000Z | 2022-03-29T08:53:08.000Z | import numpy as np
import tensorflow as tf
import roi_pooling_layer.roi_pooling_op as roi_pool_op
import roi_pooling_layer.roi_pooling_op_grad
from rpn_msr.proposal_layer_tf import proposal_layer as proposal_layer_py
from rpn_msr.anchor_target_layer_tf import anchor_target_layer as anchor_target_layer_py
from rpn_msr.proposal_target_layer_tf import proposal_target_layer as proposal_target_layer_py
DEFAULT_PADDING = 'SAME'
def layer(op):
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.inputs)==0:
raise RuntimeError('No input variables found for layer %s.'%name)
elif len(self.inputs)==1:
layer_input = self.inputs[0]
else:
layer_input = list(self.inputs)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
self.inputs = []
self.layers = dict(inputs)
self.trainable = trainable
self.setup()
def setup(self):
raise NotImplementedError('Must be subclassed.')
def load(self, data_path, session, saver, ignore_missing=False):
if data_path.endswith('.ckpt'):
saver.restore(session, data_path)
else:
data_dict = np.load(data_path).item()
for key in data_dict:
with tf.variable_scope(key, reuse=True):
for subkey in data_dict[key]:
try:
var = tf.get_variable(subkey)
session.run(var.assign(data_dict[key][subkey]))
print "assign pretrain model "+subkey+ " to "+key
except ValueError:
print "ignore "+key
if not ignore_missing:
raise
def feed(self, *args):
assert len(args)!=0
self.inputs = []
for layer in args:
if isinstance(layer, basestring):
try:
layer = self.layers[layer]
print layer
except KeyError:
print self.layers.keys()
raise KeyError('Unknown layer name fed: %s'%layer)
self.inputs.append(layer)
return self
def get_output(self, layer):
try:
layer = self.layers[layer]
except KeyError:
print self.layers.keys()
raise KeyError('Unknown layer name fed: %s'%layer)
return layer
def get_unique_name(self, prefix):
id = sum(t.startswith(prefix) for t,_ in self.layers.items())+1
return '%s_%d'%(prefix, id)
def make_var(self, name, shape, initializer=None, trainable=True):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
def validate_padding(self, padding):
assert padding in ('SAME', 'VALID')
@layer
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True):
self.validate_padding(padding)
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
biases = self.make_var('biases', [c_o], init_biases, trainable)
if group==1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def roi_pool(self, input, pooled_height, pooled_width, spatial_scale, name):
# only use the first input
if isinstance(input[0], tuple):
input[0] = input[0][0]
if isinstance(input[1], tuple):
input[1] = input[1][0]
print input
return roi_pool_op.roi_pool(input[0], input[1],
pooled_height,
pooled_width,
spatial_scale,
name=name)[0]
@layer
def proposal_layer(self, input, _feat_stride, anchor_scales, cfg_key, name):
if isinstance(input[0], tuple):
input[0] = input[0][0]
return tf.reshape(tf.py_func(proposal_layer_py,[input[0],input[1],input[2], cfg_key, _feat_stride, anchor_scales], [tf.float32]),[-1,5],name =name)
@layer
def anchor_target_layer(self, input, _feat_stride, anchor_scales, name):
if isinstance(input[0], tuple):
input[0] = input[0][0]
with tf.variable_scope(name) as scope:
rpn_labels,rpn_bbox_targets,rpn_bbox_inside_weights,rpn_bbox_outside_weights = tf.py_func(anchor_target_layer_py,[input[0],input[1],input[2],input[3], _feat_stride, anchor_scales],[tf.float32,tf.float32,tf.float32,tf.float32])
rpn_labels = tf.convert_to_tensor(tf.cast(rpn_labels,tf.int32), name = 'rpn_labels')
rpn_bbox_targets = tf.convert_to_tensor(rpn_bbox_targets, name = 'rpn_bbox_targets')
rpn_bbox_inside_weights = tf.convert_to_tensor(rpn_bbox_inside_weights , name = 'rpn_bbox_inside_weights')
rpn_bbox_outside_weights = tf.convert_to_tensor(rpn_bbox_outside_weights , name = 'rpn_bbox_outside_weights')
return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights
@layer
def proposal_target_layer(self, input, classes, name):
if isinstance(input[0], tuple):
input[0] = input[0][0]
with tf.variable_scope(name) as scope:
rois,labels,bbox_targets,bbox_inside_weights,bbox_outside_weights = tf.py_func(proposal_target_layer_py,[input[0],input[1],classes],[tf.float32,tf.float32,tf.float32,tf.float32,tf.float32])
rois = tf.reshape(rois,[-1,5] , name = 'rois')
labels = tf.convert_to_tensor(tf.cast(labels,tf.int32), name = 'labels')
bbox_targets = tf.convert_to_tensor(bbox_targets, name = 'bbox_targets')
bbox_inside_weights = tf.convert_to_tensor(bbox_inside_weights, name = 'bbox_inside_weights')
bbox_outside_weights = tf.convert_to_tensor(bbox_outside_weights, name = 'bbox_outside_weights')
return rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights
@layer
def reshape_layer(self, input, d,name):
input_shape = tf.shape(input)
if name == 'rpn_cls_prob_reshape':
return tf.transpose(tf.reshape(tf.transpose(input,[0,3,1,2]),[input_shape[0],
int(d),tf.cast(tf.cast(input_shape[1],tf.float32)/tf.cast(d,tf.float32)*tf.cast(input_shape[3],tf.float32),tf.int32),input_shape[2]]),[0,2,3,1],name=name)
else:
return tf.transpose(tf.reshape(tf.transpose(input,[0,3,1,2]),[input_shape[0],
int(d),tf.cast(tf.cast(input_shape[1],tf.float32)*(tf.cast(input_shape[3],tf.float32)/tf.cast(d,tf.float32)),tf.int32),input_shape[2]]),[0,2,3,1],name=name)
@layer
def feature_extrapolating(self, input, scales_base, num_scale_base, num_per_octave, name):
return feature_extrapolating_op.feature_extrapolating(input,
scales_base,
num_scale_base,
num_per_octave,
name=name)
@layer
def lrn(self, input, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
@layer
def fc(self, input, num_out, name, relu=True, trainable=True):
with tf.variable_scope(name) as scope:
# only use the first input
if isinstance(input, tuple):
input = input[0]
input_shape = input.get_shape()
if input_shape.ndims == 4:
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
else:
feed_in, dim = (input, int(input_shape[-1]))
if name == 'bbox_pred':
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
init_biases = tf.constant_initializer(0.0)
else:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
weights = self.make_var('weights', [dim, num_out], init_weights, trainable)
biases = self.make_var('biases', [num_out], init_biases, trainable)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input, name):
input_shape = tf.shape(input)
if name == 'rpn_cls_prob':
return tf.reshape(tf.nn.softmax(tf.reshape(input,[-1,input_shape[3]])),[-1,input_shape[1],input_shape[2],input_shape[3]],name=name)
else:
return tf.nn.softmax(input,name=name)
@layer
def dropout(self, input, keep_prob, name):
return tf.nn.dropout(input, keep_prob, name=name)
| 42.454212 | 238 | 0.582226 |
6fca951caa4a25ae351a830ccbad952bf8b0034a | 759 | py | Python | setup.py | alex-kling/amesgcm | 5ffe9b4696fc4b48767d3acc466a919b796e7b11 | [
"MIT"
] | 4 | 2020-05-29T19:52:09.000Z | 2021-12-11T09:31:12.000Z | setup.py | alex-kling/amesgcm | 5ffe9b4696fc4b48767d3acc466a919b796e7b11 | [
"MIT"
] | 2 | 2021-09-08T20:56:20.000Z | 2022-02-11T00:49:01.000Z | setup.py | alex-kling/amesgcm | 5ffe9b4696fc4b48767d3acc466a919b796e7b11 | [
"MIT"
] | 2 | 2020-07-21T16:10:34.000Z | 2021-09-08T20:58:39.000Z | from setuptools import setup, find_packages
setup(name='amesgcm',
version='0.1',
description='Analysis pipeline for the NASA Ames MGCM',
url='http://github.com/alex-kling/amesgcm',
author='Mars Climate Modeling Center',
author_email='alexandre.m.kling@nasa.gov',
license='TBD',
scripts=['bin/MarsPull.py','bin/MarsInterp.py','bin/MarsPlot.py','bin/MarsVars.py','bin/MarsFiles.py','bin/MarsViewer.py'],
install_requires=['requests','netCDF4','numpy==1.18','matplotlib','scipy'],
packages=['amesgcm'],
data_files = [('mars_data', ['mars_data/Legacy.fixed.nc']),('mars_templates', ['mars_templates/legacy.in','mars_templates/amesgcm_profile'])],
include_package_data=True,
zip_safe=False)
| 47.4375 | 148 | 0.681159 |
a04f6e552b73b8170ebeb6adc5e8de5377c006c5 | 7,795 | py | Python | docs/conf.py | Shustea/lstm-speech | 666538ec420dee3e83e9db11cd80ebea3056b58c | [
"MIT"
] | null | null | null | docs/conf.py | Shustea/lstm-speech | 666538ec420dee3e83e9db11cd80ebea3056b58c | [
"MIT"
] | null | null | null | docs/conf.py | Shustea/lstm-speech | 666538ec420dee3e83e9db11cd80ebea3056b58c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# lstm-speech documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lstm-speech'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'lstm-speechdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'lstm-speech.tex',
u'lstm-speech Documentation',
u"Adam & Yahav", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'lstm-speech', u'lstm-speech Documentation',
[u"Adam & Yahav"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'lstm-speech', u'lstm-speech Documentation',
u"Adam & Yahav", 'lstm-speech',
'A short description of the project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.816327 | 80 | 0.705452 |
e5b274ed73970e8a263eff25acc93f1e4ec098ba | 3,729 | py | Python | spacy_ann/cli/create_index.py | StanciuMarius/spacy-ann-linker | d889a15b877c153269bc3068c8c4ed32773b182a | [
"MIT"
] | null | null | null | spacy_ann/cli/create_index.py | StanciuMarius/spacy-ann-linker | d889a15b877c153269bc3068c8c4ed32773b182a | [
"MIT"
] | null | null | null | spacy_ann/cli/create_index.py | StanciuMarius/spacy-ann-linker | d889a15b877c153269bc3068c8c4ed32773b182a | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from pathlib import Path
import spacy
import srsly
import typer
from spacy.kb import KnowledgeBase
from spacy_ann.candidate_generator import CandidateGenerator
from spacy_ann.types import kb_type_vs_index
from wasabi import Printer
from tqdm import tqdm
from itertools import tee
INPUT_DIM = 300 # dimension of pretrained input vectors
DESC_WIDTH = 300 # dimension of output entity vectors
def create_index(
model: str,
kb_dir: Path,
output_dir: Path,
new_model_name: str = "ann_linker",
cg_threshold: float = 0.8,
n_iter: int = 5,
verbose: bool = True,
):
"""Create an AnnLinker based on the Character N-Gram
TF-IDF vectors for aliases in a KnowledgeBase
model (str): spaCy language model directory or name to load
kb_dir (Path): path to the directory with kb entities.jsonl and aliases.jsonl files
output_dir (Path): path to output_dir for spaCy model with ann_linker pipe
kb File Formats
e.g. entities.jsonl
{"id": "a1", "description": "Machine learning (ML) is the scientific study of algorithms and statistical models..."}
{"id": "a2", "description": "ML (\"Meta Language\") is a general-purpose functional programming language. It has roots in Lisp, and has been characterized as \"Lisp with types\"."}
e.g. aliases.jsonl
{"alias": "ML", "entities": ["a1", "a2"], "probabilities": [0.5, 0.5]}
"""
msg = Printer(hide_animation=not verbose)
msg.divider("Load Model")
with msg.loading(f"Loading model {model}"):
nlp = spacy.load(model)
msg.good("Done.")
if output_dir is not None:
output_dir = Path(output_dir / new_model_name)
if not output_dir.exists():
output_dir.mkdir(parents=True)
entities, entities_copy = tee(srsly.read_jsonl(kb_dir / "entities.jsonl"))
total_entities = sum(1 for _ in entities_copy)
aliases, aliases_copy = tee(srsly.read_jsonl(kb_dir / "aliases.jsonl"))
total_aliases = sum(1 for _ in aliases_copy)
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=INPUT_DIM)
empty_doc = nlp.make_doc('').vector
for entity in tqdm(entities, desc='Adding entities to KB', total=total_entities):
id = entity['id']
if not kb.contains_entity(id):
embedding = nlp.make_doc(entity['description']).vector if 'description' in entity else empty_doc
label = entity['label'] if 'label' in entity else 0
if label: label = kb_type_vs_index[label]
kb.add_entity(entity=id,
freq=label, #TODO: Add a proper "label" field (repurposed freq field as the type label)
entity_vector=embedding)
for alias in tqdm(aliases, desc="Setting kb entities and aliases", total=total_aliases):
entities = [e for e in alias["entities"] if kb.contains_entity(e)]
num_entities = len(entities)
if num_entities > 0:
prior_probabilities = alias['probabilities'] if len(alias['probabilities']) == num_entities else [1.0 / num_entities] * num_entities
kb.add_alias(alias=alias["alias"], entities=entities, probabilities=prior_probabilities)
msg.divider("Create ANN Index")
alias_strings = kb.get_alias_strings()
cg = CandidateGenerator().fit(alias_strings, verbose=True)
ann_linker = nlp.create_pipe("ann_linker")
ann_linker.set_kb(kb)
ann_linker.set_cg(cg)
nlp.add_pipe(ann_linker, last=True)
nlp.meta["name"] = new_model_name
nlp.to_disk(output_dir)
nlp.from_disk(output_dir)
if __name__ == "__main__":
typer.run(create_index)
| 35.855769 | 184 | 0.682489 |
5e8fd59ee0cd63c37e239c44c7c173fb5416f655 | 40,818 | py | Python | pudb/theme.py | mm40/pudb | 889016708fccdcb27b6cbe03b94d626f6d39be46 | [
"MIT"
] | null | null | null | pudb/theme.py | mm40/pudb | 889016708fccdcb27b6cbe03b94d626f6d39be46 | [
"MIT"
] | null | null | null | pudb/theme.py | mm40/pudb | 889016708fccdcb27b6cbe03b94d626f6d39be46 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
__copyright__ = """
Copyright (C) 2009-2017 Andreas Kloeckner
Copyright (C) 2014-2017 Aaron Meurer
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
THEMES = [
"classic",
"vim",
"dark vim",
"midnight",
"solarized",
"agr-256",
"monokai",
"monokai-256"
]
from pudb.py3compat import execfile, raw_input
import urwid
def get_palette(may_use_fancy_formats, theme="classic"):
if may_use_fancy_formats:
def add_setting(color, setting):
return color+","+setting
else:
def add_setting(color, setting):
return color
# ------------------------------------------------------------------------------
# Reference for some palette items:
#
# "namespace" : "import", "from", "using"
# "operator" : "+", "-", "=" etc.
# NOTE: Does not include ".", which is assigned the type "source"
# "argument" : Function arguments
# "builtin" : "range", "dict", "set", "list", etc.
# "pseudo" : "None", "True", "False"
# NOTE: Does not include "self", which is assigned the
# type "source"
# "dunder" : Class method names of the form __<name>__ within
# a class definition
# "exception" : Exception names
# "keyword" : All keywords except those specifically assigned to "keyword2"
# ("from", "and", "break", "is", "try", "pass", etc.)
# "keyword2" : "class", "def", "exec", "lambda", "print"
# ------------------------------------------------------------------------------
inheritance_map = (
# Style Inherits from
# ---------- ----------
("namespace", "keyword"),
("operator", "source"),
("argument", "source"),
("builtin", "source"),
("pseudo", "source"),
("dunder", "name"),
("exception", "source"),
("keyword2", "keyword"),
("current line marker", "source"),
)
palette_dict = {
# The following styles are initialized to "None". Themes
# (including custom Themes) may set them as needed.
# If they are not set by a theme, then they will
# inherit from other styles in accordance with
# the inheritance_map.
"namespace": None,
"operator": None,
"argument": None,
"builtin": None,
"pseudo": None,
"dunder": None,
"exception": None,
"keyword2": None,
# {{{ ui
"header": ("black", "light gray", "standout"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"button": (add_setting("white", "bold"), "dark blue"),
"focused button": ("light cyan", "black"),
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray", "underline"),
"focused sidebar": (add_setting("yellow", "bold"), "light gray", "standout"),
"warning": (add_setting("white", "bold"), "dark red", "standout"),
"label": ("black", "light gray"),
"value": (add_setting("yellow", "bold"), "dark blue"),
"fixed value": ("light gray", "dark blue"),
"group head": (add_setting("dark blue", "bold"), "light gray"),
"search box": ("black", "dark cyan"),
"search not found": ("white", "dark red"),
# }}}
# {{{ shell
"command line edit": (add_setting("yellow", "bold"), "dark blue"),
"command line prompt": (add_setting("white", "bold"), "dark blue"),
"command line output": ("light cyan", "dark blue"),
"command line input": (add_setting("light cyan", "bold"), "dark blue"),
"command line error": (add_setting("light red", "bold"), "dark blue"),
"focused command line output": ("black", "dark green"),
"focused command line input": (
add_setting("light cyan", "bold"),
"dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "dark blue"),
"command line focused button": ("light cyan", "black"),
# }}}
# {{{ source
"breakpoint": ("black", "dark cyan"),
"disabled breakpoint": ("dark gray", "dark cyan"),
"focused breakpoint": ("black", "dark green"),
"focused disabled breakpoint": ("dark gray", "dark green"),
"current breakpoint": (add_setting("white", "bold"), "dark cyan"),
"disabled current breakpoint": (
add_setting("dark gray", "bold"), "dark cyan"),
"focused current breakpoint": (
add_setting("white", "bold"), "dark green", "bold"),
"focused disabled current breakpoint": (
add_setting("dark gray", "bold"), "dark green", "bold"),
"source": (add_setting("yellow", "bold"), "dark blue"),
"focused source": ("black", "dark green"),
"highlighted source": ("black", "dark magenta"),
"current source": ("black", "dark cyan"),
"current focused source": (add_setting("white", "bold"), "dark cyan"),
"current highlighted source": ("white", "dark cyan"),
# {{{ highlighting
"current line marker": ("dark red", "dark blue"),
"breakpoint marker": ("dark red", "dark blue"),
"line number": ("light gray", "dark blue"),
"keyword": (add_setting("white", "bold"), "dark blue"),
"name": ("light cyan", "dark blue"),
"literal": ("light magenta, bold", "dark blue"),
"string": (add_setting("light magenta", "bold"), "dark blue"),
"doublestring": (add_setting("light magenta", "bold"), "dark blue"),
"singlestring": (add_setting("light magenta", "bold"), "dark blue"),
"docstring": (add_setting("light magenta", "bold"), "dark blue"),
"punctuation": ("light gray", "dark blue"),
"comment": ("light gray", "dark blue"),
# }}}
# }}}
# {{{ breakpoints
"breakpoint source": (add_setting("yellow", "bold"), "dark red"),
"breakpoint focused source": ("black", "dark red"),
"current breakpoint source": ("black", "dark red"),
"current breakpoint focused source": ("white", "dark red"),
# }}}
# {{{ variables view
"variables": ("black", "dark cyan"),
"variable separator": ("dark cyan", "light gray"),
"var label": ("dark blue", "dark cyan"),
"var value": ("black", "dark cyan"),
"focused var label": ("dark blue", "dark green"),
"focused var value": ("black", "dark green"),
"highlighted var label": ("white", "dark cyan"),
"highlighted var value": ("black", "dark cyan"),
"focused highlighted var label": ("white", "dark green"),
"focused highlighted var value": ("black", "dark green"),
"return label": ("white", "dark blue"),
"return value": ("black", "dark cyan"),
"focused return label": ("light gray", "dark blue"),
"focused return value": ("black", "dark green"),
# }}}
# {{{ stack
"stack": ("black", "dark cyan"),
"frame name": ("black", "dark cyan"),
"focused frame name": ("black", "dark green"),
"frame class": ("dark blue", "dark cyan"),
"focused frame class": ("dark blue", "dark green"),
"frame location": ("light cyan", "dark cyan"),
"focused frame location": ("light cyan", "dark green"),
"current frame name": (add_setting("white", "bold"),
"dark cyan"),
"focused current frame name": (add_setting("white", "bold"),
"dark green", "bold"),
"current frame class": ("dark blue", "dark cyan"),
"focused current frame class": ("dark blue", "dark green"),
"current frame location": ("light cyan", "dark cyan"),
"focused current frame location": ("light cyan", "dark green"),
# }}}
}
if theme == "classic":
pass
elif theme == "vim":
# {{{ vim theme
palette_dict.update({
"source": ("black", "default"),
"keyword": ("brown", "default"),
"kw_namespace": ("dark magenta", "default"),
"literal": ("black", "default"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("dark red", "default"),
"docstring": ("dark red", "default"),
"punctuation": ("black", "default"),
"comment": ("dark blue", "default"),
"classname": ("dark cyan", "default"),
"name": ("dark cyan", "default"),
"line number": ("dark gray", "default"),
"current line marker": ("dark red", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit":
("black", "default"),
"command line prompt":
(add_setting("black", "bold"), "default"),
"command line output":
(add_setting("black", "bold"), "default"),
"command line input":
("black", "default"),
"command line error":
(add_setting("light red", "bold"), "default"),
"focused command line output":
("black", "dark green"),
"focused command line input":
(add_setting("light cyan", "bold"), "dark green"),
"focused command line error":
("black", "dark green"),
# }}}
})
# }}}
elif theme == "dark vim":
# {{{ dark vim
palette_dict.update({
"header": ("black", "light gray", "standout"),
# {{{ variables view
"variables": ("black", "dark gray"),
"variable separator": ("dark cyan", "light gray"),
"var label": ("light gray", "dark gray"),
"var value": ("white", "dark gray"),
"focused var label": ("light gray", "light blue"),
"focused var value": ("white", "light blue"),
"highlighted var label": ("light gray", "dark green"),
"highlighted var value": ("white", "dark green"),
"focused highlighted var label": ("light gray", "light blue"),
"focused highlighted var value": ("white", "light blue"),
"return label": ("light gray", "dark gray"),
"return value": ("light cyan", "dark gray"),
"focused return label": ("yellow", "light blue"),
"focused return value": ("white", "light blue"),
# }}}
# {{{ stack view
"stack": ("black", "dark gray"),
"frame name": ("light gray", "dark gray"),
"focused frame name": ("light gray", "light blue"),
"frame class": ("dark blue", "dark gray"),
"focused frame class": ("dark blue", "light blue"),
"frame location": ("white", "dark gray"),
"focused frame location": ("white", "light blue"),
"current frame name": (add_setting("white", "bold"),
"dark gray"),
"focused current frame name": (add_setting("white", "bold"),
"light blue", "bold"),
"current frame class": ("dark blue", "dark gray"),
"focused current frame class": ("dark blue", "dark green"),
"current frame location": ("light cyan", "dark gray"),
"focused current frame location": ("light cyan", "light blue"),
# }}}
# {{{ breakpoint view
"breakpoint": ("light gray", "dark gray"),
"disabled breakpoint": ("black", "dark gray"),
"focused breakpoint": ("light gray", "light blue"),
"focused disabled breakpoint": ("black", "light blue"),
"current breakpoint": (add_setting("white", "bold"), "dark gray"),
"disabled current breakpoint": ("black", "dark gray"),
"focused current breakpoint":
(add_setting("white", "bold"), "light blue"),
"focused disabled current breakpoint":
("black", "light blue"),
# }}}
# {{{ ui widgets
"selectable": ("light gray", "dark gray"),
"focused selectable": ("white", "light blue"),
"button": ("light gray", "dark gray"),
"focused button": ("white", "light blue"),
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray", "underline"),
"focused sidebar": ("light blue", "light gray", "standout"),
"warning": (add_setting("white", "bold"), "dark red", "standout"),
"label": ("black", "light gray"),
"value": ("white", "dark gray"),
"fixed value": ("light gray", "dark gray"),
"search box": ("white", "dark gray"),
"search not found": ("white", "dark red"),
"dialog title": (add_setting("white", "bold"), "dark gray"),
# }}}
# {{{ source view
"current line marker": ("dark red", "black"),
"breakpoint marker": ("dark red", "black"),
"breakpoint source": ("light gray", "dark red"),
"breakpoint focused source": ("black", "dark red"),
"current breakpoint source": ("black", "dark red"),
"current breakpoint focused source": ("white", "dark red"),
# }}}
# {{{ highlighting
"source": ("white", "black"),
"focused source": ("white", "light blue"),
"highlighted source": ("black", "dark magenta"),
"current source": ("black", "light gray"),
"current focused source": ("white", "dark cyan"),
"current highlighted source": ("white", "dark cyan"),
"line number": ("dark gray", "black"),
"keyword": ("yellow", "black"),
"literal": ("dark magenta", "black"),
"string": ("dark magenta", "black"),
"doublestring": ("dark magenta", "black"),
"singlestring": ("dark magenta", "black"),
"docstring": ("dark magenta", "black"),
"name": ("light cyan", "black"),
"punctuation": ("yellow", "black"),
"comment": ("light blue", "black"),
# }}}
# {{{ shell
"command line edit":
("white", "black"),
"command line prompt":
(add_setting("yellow", "bold"), "black"),
"command line output":
(add_setting("yellow", "bold"), "black"),
"command line input":
("white", "black"),
"command line error":
(add_setting("light red", "bold"), "black"),
"focused command line output":
("black", "light blue"),
"focused command line input":
(add_setting("light cyan", "bold"), "light blue"),
"focused command line error":
("black", "light blue"),
# }}}
})
# }}}
elif theme == "midnight":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
palette_dict.update({
"variables": ("white", "default"),
"var label": ("light blue", "default"),
"var value": ("white", "default"),
"stack": ("white", "default"),
"frame name": ("white", "default"),
"frame class": ("dark blue", "default"),
"frame location": ("light cyan", "default"),
"current frame name": (add_setting("white", "bold"), "default"),
"current frame class": ("dark blue", "default"),
"current frame location": ("light cyan", "default"),
"focused frame name": ("black", "dark green"),
"focused frame class": (add_setting("white", "bold"), "dark green"),
"focused frame location": ("dark blue", "dark green"),
"focused current frame name": ("black", "dark green"),
"focused current frame class": (
add_setting("white", "bold"), "dark green"),
"focused current frame location": ("dark blue", "dark green"),
"search box": ("default", "default"),
"breakpoint": ("white", "default"),
"disabled breakpoint": ("dark gray", "default"),
"focused breakpoint": ("black", "dark green"),
"focused disabled breakpoint": ("dark gray", "dark green"),
"current breakpoint": (add_setting("white", "bold"), "default"),
"disabled current breakpoint": (
add_setting("dark gray", "bold"), "default"),
"focused current breakpoint": (
add_setting("white", "bold"), "dark green", "bold"),
"focused disabled current breakpoint": (
add_setting("dark gray", "bold"), "dark green", "bold"),
"source": ("white", "default"),
"highlighted source": ("white", "light cyan"),
"current source": ("white", "light gray"),
"current focused source": ("white", "brown"),
"line number": ("light gray", "default"),
"keyword": ("dark magenta", "default"),
"name": ("white", "default"),
"literal": ("dark cyan", "default"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("light red", "default"),
"backtick": ("light green", "default"),
"punctuation": ("white", "default"),
"comment": ("dark green", "default"),
"classname": ("dark cyan", "default"),
"funcname": ("white", "default"),
"current line marker": ("dark red", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit": ("white", "default"),
"command line prompt": (add_setting("white", "bold"), "default"),
"command line output": (add_setting("white", "bold"), "default"),
"command line input": (add_setting("white", "bold"), "default"),
"command line error": (add_setting("light red", "bold"), "default"),
"focused command line output": ("black", "dark green"),
"focused command line input": (
add_setting("white", "bold"), "dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "default"),
"command line focused button": ("black", "light gray"), # White
# doesn't work in curses mode
# }}}
})
# }}}
elif theme == "solarized":
# {{{ solarized
palette_dict.update({
# UI
"header": ("black", "light blue", "standout"),
"focused sidebar": ("yellow", "light blue", "standout"),
"group head": ("black", "light blue"),
"background": ("black", "light blue"),
"label": ("black", "light blue"),
"value": ("white", "dark blue"),
"fixed value": ("black", "light blue"),
"variables": ("light blue", "default"),
"var label": ("dark blue", "default"),
"var value": ("light blue", "default"),
"focused var label": ("white", "dark blue"),
"focused var value": ("black", "dark blue"),
"highlighted var label": ("white", "light green"),
"highlighted var value": ("white", "light green"),
"focused highlighted var label": ("white", "light green"),
"focused highlighted var value": ("white", "light green"),
"stack": ("light blue", "default"),
"frame name": ("dark blue", "default"),
"frame class": ("light blue", "default"),
"frame location": ("light green", "default"),
"focused frame name": ("white", "dark blue"),
"focused frame class": ("black", "dark blue"),
"focused frame location": ("dark gray", "dark blue"),
"focused current frame name": ("white", "light green"),
"focused current frame class": ("black", "light green"),
"focused current frame location": ("dark gray", "light green"),
"current frame name": ("white", "light green"),
"current frame class": ("black", "light green"),
"current frame location": ("dark gray", "light green"),
# breakpoints
"breakpoint": ("light blue", "default"),
"disabled breakpoint": ("light gray", "default"),
"focused breakpoint": ("white", "light green"),
"focused disabled breakpoint": ("light gray", "light green"),
"current breakpoint": ("white", "dark blue"),
"disabled current breakpoint": ("light gray", "dark blue"),
"focused current breakpoint": ("white", "light green"),
"focused disabled current breakpoint": ("light gray", "light green"),
# source
"breakpoint source": ("light blue", "black"),
"current breakpoint source": ("black", "light green"),
"breakpoint focused source": ("dark gray", "dark blue"),
"current breakpoint focused source": ("black", "light green"),
"current line marker": ("dark red", "default"),
"breakpoint marker": ("dark red", "default"),
"search box": ("default", "default"),
"source": ("light blue", "default"),
"current source": ("light gray", "light blue"),
"current focused source": ("light gray", "light blue"),
"focused source": ("dark gray", "dark blue"),
"current highlighted source": ("black", "dark cyan"),
"highlighted source": ("light blue", "black"),
"line number": ("light blue", "default"),
"keyword": ("dark green", "default"),
"name": ("light blue", "default"),
"literal": ("dark cyan", "default"),
"string": ("dark cyan", "default"),
"doublestring": ("dark cyan", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("dark cyan", "default"),
"backtick": ("light green", "default"),
"punctuation": ("light blue", "default"),
"comment": ("light green", "default"),
"classname": ("dark blue", "default"),
"funcname": ("dark blue", "default"),
# shell
"command line edit": ("light blue", "default"),
"command line prompt": ("light blue", "default"),
"command line output": ("light blue", "default"),
"command line input": ("light blue", "default"),
"command line error": ("dark red", "default"),
"focused command line output": ("black", "light green"),
"focused command line input": ("black", "light green"),
"focused command line error": ("dark red", "light blue"),
"command line clear button": ("light blue", "default"),
"command line focused button": ("black", "light blue"),
})
# }}}
elif theme == "agr-256":
# {{{ agr-256
palette_dict.update({
"header": ("h235", "h252", "standout"),
# {{{ variables view
"variables": ("h235", "h233"),
"variable separator": ("h23", "h252"),
"var label": ("h111", "h233"),
"var value": ("h255", "h233"),
"focused var label": ("h192", "h24"),
"focused var value": ("h192", "h24"),
"highlighted var label": ("h252", "h22"),
"highlighted var value": ("h255", "h22"),
"focused highlighted var label": ("h252", "h64"),
"focused highlighted var value": ("h255", "h64"),
"return label": ("h113", "h233"),
"return value": ("h113", "h233"),
"focused return label": (add_setting("h192", "bold"), "h24"),
"focused return value": ("h192", "h24"),
# }}}
# {{{ stack view
"stack": ("h235", "h233"),
"frame name": ("h192", "h233"),
"focused frame name": ("h192", "h24"),
"frame class": ("h111", "h233"),
"focused frame class": ("h192", "h24"),
"frame location": ("h252", "h233"),
"focused frame location": ("h192", "h24"),
"current frame name": ("h255", "h22"),
"focused current frame name": ("h255", "h64"),
"current frame class": ("h111", "h22"),
"focused current frame class": ("h255", "h64"),
"current frame location": ("h252", "h22"),
"focused current frame location": ("h255", "h64"),
# }}}
# {{{ breakpoint view
"breakpoint": ("h80", "h233"),
"disabled breakpoint": ("h60", "h233"),
"focused breakpoint": ("h192", "h24"),
"focused disabled breakpoint": ("h182", "h24"),
"current breakpoint": (add_setting("h255", "bold"), "h22"),
"disabled current breakpoint": (add_setting("h016", "bold"), "h22"),
"focused current breakpoint": (add_setting("h255", "bold"), "h64"),
"focused disabled current breakpoint": (
add_setting("h016", "bold"), "h64"),
# }}}
# {{{ ui widgets
"selectable": ("h252", "h235"),
"focused selectable": ("h255", "h24"),
"button": ("h252", "h235"),
"focused button": ("h255", "h24"),
"background": ("h235", "h252"),
"hotkey": (add_setting("h235", "underline"), "h252", "underline"),
"focused sidebar": ("h23", "h252", "standout"),
"warning": (add_setting("h255", "bold"), "h124", "standout"),
"label": ("h235", "h252"),
"value": ("h255", "h17"),
"fixed value": ("h252", "h17"),
"group head": (add_setting("h25", "bold"), "h252"),
"search box": ("h255", "h235"),
"search not found": ("h255", "h124"),
"dialog title": (add_setting("h255", "bold"), "h235"),
# }}}
# {{{ source view
"current line marker": ("h160", "h235"),
"breakpoint marker": ("h160", "h235"),
"breakpoint source": ("h252", "h124"),
"breakpoint focused source": ("h192", "h124"),
"current breakpoint source": ("h192", "h124"),
"current breakpoint focused source": (
add_setting("h192", "bold"), "h124"),
# }}}
# {{{ highlighting
"source": ("h255", "h235"),
"focused source": ("h192", "h24"),
"highlighted source": ("h252", "h22"),
"current source": (add_setting("h252", "bold"), "h23"),
"current focused source": (add_setting("h192", "bold"), "h23"),
"current highlighted source": ("h255", "h22"),
"line number": ("h241", "h235"),
"keyword": ("h111", "h235"),
"literal": ("h173", "h235"),
"string": ("h113", "h235"),
"doublestring": ("h113", "h235"),
"singlestring": ("h113", "h235"),
"docstring": ("h113", "h235"),
"name": ("h192", "h235"),
"punctuation": ("h223", "h235"),
"comment": ("h246", "h235"),
# }}}
# {{{ shell
"command line edit": ("h255", "h233"),
"command line prompt": (add_setting("h192", "bold"), "h233"),
"command line output": ("h80", "h233"),
"command line input": ("h255", "h233"),
"command line error": ("h160", "h233"),
"focused command line output": (add_setting("h192", "bold"), "h24"),
"focused command line input": ("h255", "h24"),
"focused command line error": ("h235", "h24"),
"command line clear button": (add_setting("h255", "bold"), "h233"),
"command line focused button": ("h255", "h24"),
# }}}
})
# }}}
elif theme == "monokai":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
palette_dict.update({
"variables": ("white", "default"),
"var label": ("light blue", "default"),
"var value": ("white", "default"),
"stack": ("white", "default"),
"frame name": ("white", "default"),
"frame class": ("dark blue", "default"),
"frame location": ("light cyan", "default"),
"current frame name": (add_setting("white", "bold"), "default"),
"current frame class": ("dark blue", "default"),
"current frame location": ("light cyan", "default"),
"focused frame name": ("black", "dark green"),
"focused frame class": (add_setting("white", "bold"), "dark green"),
"focused frame location": ("dark blue", "dark green"),
"focused current frame name": ("black", "dark green"),
"focused current frame class": (
add_setting("white", "bold"), "dark green"),
"focused current frame location": ("dark blue", "dark green"),
"search box": ("default", "default"),
"breakpoint": ("white", "default"),
"disabled breakpoint": ("dark gray", "default"),
"focused breakpoint": ("black", "dark green"),
"focused disabled breakpoint": ("dark gray", "dark green"),
"current breakpoint": (add_setting("white", "bold"), "default"),
"disabled current breakpoint": (
add_setting("dark gray", "bold"), "default"),
"focused current breakpoint": (
add_setting("white", "bold"), "dark green", "bold"),
"focused disabled current breakpoint": (
add_setting("dark gray", "bold"), "dark green", "bold"),
"source": ("white", "default"),
"highlighted source": ("white", "light cyan"),
"current source": ("white", "light gray"),
"current focused source": ("white", "brown"),
"line number": ("dark gray", "black"),
"keyword2": ("light cyan", "black"),
"name": ("light green", "black"),
"literal": ("light magenta", "black"),
"namespace": ("light red", "black"),
"operator": ("light red", "black"),
"argument": ("brown", "black"),
"builtin": ("light cyan", "black"),
"pseudo": ("light magenta", "black"),
"dunder": ("light cyan", "black"),
"exception": ("light cyan", "black"),
"keyword": ("light red", "black"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("light red", "default"),
"backtick": ("light green", "default"),
"punctuation": ("white", "default"),
"comment": ("dark green", "default"),
"classname": ("dark cyan", "default"),
"funcname": ("white", "default"),
"current line marker": ("dark red", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit": ("white", "default"),
"command line prompt": (add_setting("white", "bold"), "default"),
"command line output": (add_setting("white", "bold"), "default"),
"command line input": (add_setting("white", "bold"), "default"),
"command line error": (add_setting("light red", "bold"), "default"),
"focused command line output": ("black", "dark green"),
"focused command line input": (
add_setting("white", "bold"), "dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "default"),
"command line focused button": ("black", "light gray"), # White
# doesn't work in curses mode
# }}}
})
# }}}
elif theme == "monokai-256":
# {{{ monokai-256
palette_dict.update({
"header": ("h235", "h252", "standout"),
# {{{ variables view
"variables": ("h235", "h233"),
"variable separator": ("h23", "h252"),
"var label": ("h111", "h233"),
"var value": ("h255", "h233"),
"focused var label": ("h237", "h172"),
"focused var value": ("h237", "h172"),
"highlighted var label": ("h252", "h22"),
"highlighted var value": ("h255", "h22"),
"focused highlighted var label": ("h252", "h64"),
"focused highlighted var value": ("h255", "h64"),
"return label": ("h113", "h233"),
"return value": ("h113", "h233"),
"focused return label": (add_setting("h192", "bold"), "h24"),
"focused return value": ("h237", "h172"),
# }}}
# {{{ stack view
"stack": ("h235", "h233"),
"frame name": ("h192", "h233"),
"focused frame name": ("h237", "h172"),
"frame class": ("h111", "h233"),
"focused frame class": ("h237", "h172"),
"frame location": ("h252", "h233"),
"focused frame location": ("h237", "h172"),
"current frame name": ("h255", "h22"),
"focused current frame name": ("h255", "h64"),
"current frame class": ("h111", "h22"),
"focused current frame class": ("h255", "h64"),
"current frame location": ("h252", "h22"),
"focused current frame location": ("h255", "h64"),
# }}}
# {{{ breakpoint view
"breakpoint": ("h80", "h233"),
"disabled breakpoint": ("h60", "h233"),
"focused breakpoint": ("h237", "h172"),
"focused disabled breakpoint": ("h182", "h24"),
"current breakpoint": (add_setting("h255", "bold"), "h22"),
"disabled current breakpoint": (add_setting("h016", "bold"), "h22"),
"focused current breakpoint": (add_setting("h255", "bold"), "h64"),
"focused disabled current breakpoint": (
add_setting("h016", "bold"), "h64"),
# }}}
# {{{ ui widgets
"selectable": ("h252", "h235"),
"focused selectable": ("h255", "h24"),
"button": ("h252", "h235"),
"focused button": ("h255", "h24"),
"background": ("h235", "h252"),
"hotkey": (add_setting("h235", "underline"), "h252", "underline"),
"focused sidebar": ("h23", "h252", "standout"),
"warning": (add_setting("h255", "bold"), "h124", "standout"),
"label": ("h235", "h252"),
"value": ("h255", "h17"),
"fixed value": ("h252", "h17"),
"group head": (add_setting("h25", "bold"), "h252"),
"search box": ("h255", "h235"),
"search not found": ("h255", "h124"),
"dialog title": (add_setting("h255", "bold"), "h235"),
# }}}
# {{{ source view
"current line marker": ("h160", "h235"),
"breakpoint marker": ("h160", "h235"),
"breakpoint source": ("h252", "h124"),
"breakpoint focused source": ("h192", "h124"),
"current breakpoint source": ("h192", "h124"),
"current breakpoint focused source": (
add_setting("h192", "bold"), "h124"),
# }}}
# {{{ highlighting
"source": ("h255", "h235"),
"focused source": ("h237", "h172"),
"highlighted source": ("h252", "h22"),
"current source": (add_setting("h252", "bold"), "h23"),
"current focused source": (add_setting("h192", "bold"), "h23"),
"current highlighted source": ("h255", "h22"),
"line number": ("h241", "h235"),
"keyword2": ("h51", "h235"),
"name": ("h155", "h235"),
"literal": ("h141", "h235"),
"namespace": ("h198", "h235"),
"operator": ("h198", "h235"),
"argument": ("h208", "h235"),
"builtin": ("h51", "h235"),
"pseudo": ("h141", "h235"),
"dunder": ("h51", "h235"),
"exception": ("h51", "h235"),
"keyword": ("h198", "h235"),
"string": ("h228", "h235"),
"doublestring": ("h228", "h235"),
"singlestring": ("h228", "h235"),
"docstring": ("h243", "h235"),
"punctuation": ("h255", "h235"),
"comment": ("h243", "h235"),
# }}}
# {{{ shell
"command line edit": ("h255", "h233"),
"command line prompt": (add_setting("h192", "bold"), "h233"),
"command line output": ("h80", "h233"),
"command line input": ("h255", "h233"),
"command line error": ("h160", "h233"),
"focused command line output": (add_setting("h192", "bold"), "h24"),
"focused command line input": ("h255", "h24"),
"focused command line error": ("h235", "h24"),
"command line clear button": (add_setting("h255", "bold"), "h233"),
"command line focused button": ("h255", "h24"),
# }}}
})
# }}}
else:
try:
symbols = {
"palette": palette_dict,
"add_setting": add_setting,
}
from os.path import expanduser, expandvars
execfile(expanduser(expandvars(theme)), symbols)
except Exception:
print("Error when importing theme:")
from traceback import print_exc
print_exc()
raw_input("Hit enter:")
# Apply style inheritance
for child, parent in inheritance_map:
if palette_dict[child] is None:
palette_dict[child] = palette_dict[parent]
palette_list = []
for setting_name, color_values in palette_dict.items():
fg_color = color_values[0].lower().strip()
bg_color = color_values[1].lower().strip()
# Convert hNNN syntax to equivalent #RGB value
# (https://github.com/wardi/urwid/issues/24)
if fg_color.startswith("h") or bg_color.startswith("h"):
attr = urwid.AttrSpec(fg_color, bg_color, colors=256)
palette_list.append((setting_name, "default", "default", "default",
attr.foreground,
attr.background))
else:
palette_list.append((setting_name,) + color_values)
return palette_list
# vim: foldmethod=marker
| 39.022945 | 85 | 0.497452 |
cea5464141135a1344579b119bd327478db13e18 | 2,602 | py | Python | bot/longpoll.py | MiniK1337/py-bot-vk-api | 1cbf870dc8f37eb3e6f5fde2f12e6401280fdce4 | [
"MIT"
] | null | null | null | bot/longpoll.py | MiniK1337/py-bot-vk-api | 1cbf870dc8f37eb3e6f5fde2f12e6401280fdce4 | [
"MIT"
] | null | null | null | bot/longpoll.py | MiniK1337/py-bot-vk-api | 1cbf870dc8f37eb3e6f5fde2f12e6401280fdce4 | [
"MIT"
] | null | null | null | import urllib.request as urlreq
import threading
import json
from .api import api as API
from .handler import Handler
from .config import config
from .events import event_types
class Worker(threading.Thread):
def __init__(self, func, args = None):
threading.Thread.__init__(self)
self.func = func
self.args = args
def run(self):
try:
self.func(self.args)
except Exception as er:
return 0
class Longpoll:
def __init__(self, token = None, v = "5.95", api = API):
self.token = token
self.api_v = v
if self.token is None:
raise RuntimeError("You must past token into settings")
config['access_token'] = self.token
config['api_version'] = self.api_v
self.api = api
self.group = self.api("groups.getById")[0]
self.lp_server_data = None
self.lp_server = "{}?act=a_check&key={}&ts={}&wait=25"
self.is_polling = None
# other
self.handler = Handler()
self.command = self.handler.command
self.event = self.handler.event
def get_longpoll_data(self):
longpoll = self.api("groups.getLongPollServer", group_id = self.group['id'])
self.lp_server_data = {
**longpoll
}
def receiver(self):
if not self.lp_server_data:
self.get_longpoll_data()
try:
with urlreq.urlopen(
self.lp_server.format(
self.lp_server_data['server'],
self.lp_server_data['key'],
self.lp_server_data['ts']
)
) as res:
response = json.loads(res.read())
except Exception:
return ()
if 'error' in response:
self.get_longpoll_data()
if 'ts' in response:
self.lp_server_data['ts'] = response['ts']
return response['updates']
def main(self, none):
try:
while True:
if not self.is_polling:
break
for update in self.receiver():
if update['type'] in event_types:
worker = Worker(self.handler._update_handler(update), self.handler._msg_parser(update['object']))
worker.start()
except KeyboardInterrupt:
self.is_polling = False
def startPolling(self):
self.is_polling = True
worker = Worker(self.main)
worker.start()
def stopPolling(self):
self.is_polling = False
| 27.104167 | 121 | 0.55342 |
870e4035c2fd965a425561aedd7afef9cf9860c1 | 16,215 | py | Python | homeassistant/components/template/cover.py | ymanton/home-assistant | 274cf232692396a6f0359a45d949ff94b681af52 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/components/template/cover.py | ymanton/home-assistant | 274cf232692396a6f0359a45d949ff94b681af52 | [
"Apache-2.0"
] | 7 | 2016-04-09T20:56:30.000Z | 2016-04-19T21:28:46.000Z | homeassistant/components/template/cover.py | ymanton/home-assistant | 274cf232692396a6f0359a45d949ff94b681af52 | [
"Apache-2.0"
] | 2 | 2020-12-09T02:21:27.000Z | 2021-08-07T04:58:01.000Z | """Support for covers which integrate with other components."""
import logging
import voluptuous as vol
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DEVICE_CLASSES_SCHEMA,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
SUPPORT_STOP_TILT,
CoverDevice,
)
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_ICON_TEMPLATE,
CONF_OPTIMISTIC,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_START,
STATE_CLOSED,
STATE_OPEN,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.script import Script
from . import extract_entities, initialise_templates
from .const import CONF_AVAILABILITY_TEMPLATE
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_OPEN, STATE_CLOSED, "true", "false"]
CONF_COVERS = "covers"
CONF_POSITION_TEMPLATE = "position_template"
CONF_TILT_TEMPLATE = "tilt_template"
OPEN_ACTION = "open_cover"
CLOSE_ACTION = "close_cover"
STOP_ACTION = "stop_cover"
POSITION_ACTION = "set_cover_position"
TILT_ACTION = "set_cover_tilt_position"
CONF_TILT_OPTIMISTIC = "tilt_optimistic"
CONF_VALUE_OR_POSITION_TEMPLATE = "value_or_position"
CONF_OPEN_OR_CLOSE = "open_or_close"
TILT_FEATURES = (
SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_STOP_TILT
| SUPPORT_SET_TILT_POSITION
)
COVER_SCHEMA = vol.Schema(
{
vol.Inclusive(OPEN_ACTION, CONF_OPEN_OR_CLOSE): cv.SCRIPT_SCHEMA,
vol.Inclusive(CLOSE_ACTION, CONF_OPEN_OR_CLOSE): cv.SCRIPT_SCHEMA,
vol.Optional(STOP_ACTION): cv.SCRIPT_SCHEMA,
vol.Exclusive(
CONF_POSITION_TEMPLATE, CONF_VALUE_OR_POSITION_TEMPLATE
): cv.template,
vol.Exclusive(
CONF_VALUE_TEMPLATE, CONF_VALUE_OR_POSITION_TEMPLATE
): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_POSITION_TEMPLATE): cv.template,
vol.Optional(CONF_TILT_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_TILT_OPTIMISTIC): cv.boolean,
vol.Optional(POSITION_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(TILT_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_ENTITY_ID): cv.entity_ids,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA)}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Template cover."""
covers = []
for device, device_config in config[CONF_COVERS].items():
state_template = device_config.get(CONF_VALUE_TEMPLATE)
position_template = device_config.get(CONF_POSITION_TEMPLATE)
tilt_template = device_config.get(CONF_TILT_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
friendly_name = device_config.get(CONF_FRIENDLY_NAME, device)
device_class = device_config.get(CONF_DEVICE_CLASS)
open_action = device_config.get(OPEN_ACTION)
close_action = device_config.get(CLOSE_ACTION)
stop_action = device_config.get(STOP_ACTION)
position_action = device_config.get(POSITION_ACTION)
tilt_action = device_config.get(TILT_ACTION)
optimistic = device_config.get(CONF_OPTIMISTIC)
tilt_optimistic = device_config.get(CONF_TILT_OPTIMISTIC)
if position_action is None and open_action is None:
_LOGGER.error(
"Must specify at least one of %s" or "%s", OPEN_ACTION, POSITION_ACTION
)
continue
templates = {
CONF_VALUE_TEMPLATE: state_template,
CONF_POSITION_TEMPLATE: position_template,
CONF_TILT_TEMPLATE: tilt_template,
CONF_ICON_TEMPLATE: icon_template,
CONF_AVAILABILITY_TEMPLATE: availability_template,
CONF_ENTITY_PICTURE_TEMPLATE: entity_picture_template,
}
initialise_templates(hass, templates)
entity_ids = extract_entities(
device, "cover", device_config.get(CONF_ENTITY_ID), templates
)
covers.append(
CoverTemplate(
hass,
device,
friendly_name,
device_class,
state_template,
position_template,
tilt_template,
icon_template,
entity_picture_template,
availability_template,
open_action,
close_action,
stop_action,
position_action,
tilt_action,
optimistic,
tilt_optimistic,
entity_ids,
)
)
if not covers:
_LOGGER.error("No covers added")
return False
async_add_entities(covers)
return True
class CoverTemplate(CoverDevice):
"""Representation of a Template cover."""
def __init__(
self,
hass,
device_id,
friendly_name,
device_class,
state_template,
position_template,
tilt_template,
icon_template,
entity_picture_template,
availability_template,
open_action,
close_action,
stop_action,
position_action,
tilt_action,
optimistic,
tilt_optimistic,
entity_ids,
):
"""Initialize the Template cover."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
self._position_template = position_template
self._tilt_template = tilt_template
self._icon_template = icon_template
self._device_class = device_class
self._entity_picture_template = entity_picture_template
self._availability_template = availability_template
self._open_script = None
if open_action is not None:
self._open_script = Script(hass, open_action)
self._close_script = None
if close_action is not None:
self._close_script = Script(hass, close_action)
self._stop_script = None
if stop_action is not None:
self._stop_script = Script(hass, stop_action)
self._position_script = None
if position_action is not None:
self._position_script = Script(hass, position_action)
self._tilt_script = None
if tilt_action is not None:
self._tilt_script = Script(hass, tilt_action)
self._optimistic = optimistic or (not state_template and not position_template)
self._tilt_optimistic = tilt_optimistic or not tilt_template
self._icon = None
self._entity_picture = None
self._position = None
self._tilt_value = None
self._entities = entity_ids
self._available = True
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_cover_state_listener(entity, old_state, new_state):
"""Handle target device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_cover_startup(event):
"""Update template on startup."""
async_track_state_change(
self.hass, self._entities, template_cover_state_listener
)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_cover_startup
)
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._position == 0
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
if self._position_template or self._position_script:
return self._position
return None
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._tilt_value
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def entity_picture(self):
"""Return the entity picture to use in the frontend, if any."""
return self._entity_picture
@property
def device_class(self):
"""Return the device class of the cover."""
return self._device_class
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE
if self._stop_script is not None:
supported_features |= SUPPORT_STOP
if self._position_script is not None:
supported_features |= SUPPORT_SET_POSITION
if self.current_cover_tilt_position is not None:
supported_features |= TILT_FEATURES
return supported_features
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def available(self) -> bool:
"""Return if the device is available."""
return self._available
async def async_open_cover(self, **kwargs):
"""Move the cover up."""
if self._open_script:
await self._open_script.async_run(context=self._context)
elif self._position_script:
await self._position_script.async_run(
{"position": 100}, context=self._context
)
if self._optimistic:
self._position = 100
self.async_schedule_update_ha_state()
async def async_close_cover(self, **kwargs):
"""Move the cover down."""
if self._close_script:
await self._close_script.async_run(context=self._context)
elif self._position_script:
await self._position_script.async_run(
{"position": 0}, context=self._context
)
if self._optimistic:
self._position = 0
self.async_schedule_update_ha_state()
async def async_stop_cover(self, **kwargs):
"""Fire the stop action."""
if self._stop_script:
await self._stop_script.async_run(context=self._context)
async def async_set_cover_position(self, **kwargs):
"""Set cover position."""
self._position = kwargs[ATTR_POSITION]
await self._position_script.async_run(
{"position": self._position}, context=self._context
)
if self._optimistic:
self.async_schedule_update_ha_state()
async def async_open_cover_tilt(self, **kwargs):
"""Tilt the cover open."""
self._tilt_value = 100
await self._tilt_script.async_run(
{"tilt": self._tilt_value}, context=self._context
)
if self._tilt_optimistic:
self.async_schedule_update_ha_state()
async def async_close_cover_tilt(self, **kwargs):
"""Tilt the cover closed."""
self._tilt_value = 0
await self._tilt_script.async_run(
{"tilt": self._tilt_value}, context=self._context
)
if self._tilt_optimistic:
self.async_schedule_update_ha_state()
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
self._tilt_value = kwargs[ATTR_TILT_POSITION]
await self._tilt_script.async_run(
{"tilt": self._tilt_value}, context=self._context
)
if self._tilt_optimistic:
self.async_schedule_update_ha_state()
async def async_update(self):
"""Update the state from the template."""
if self._template is not None:
try:
state = self._template.async_render().lower()
if state in _VALID_STATES:
if state in ("true", STATE_OPEN):
self._position = 100
else:
self._position = 0
else:
_LOGGER.error(
"Received invalid cover is_on state: %s. Expected: %s",
state,
", ".join(_VALID_STATES),
)
self._position = None
except TemplateError as ex:
_LOGGER.error(ex)
self._position = None
if self._position_template is not None:
try:
state = float(self._position_template.async_render())
if state < 0 or state > 100:
self._position = None
_LOGGER.error(
"Cover position value must be"
" between 0 and 100."
" Value was: %.2f",
state,
)
else:
self._position = state
except (TemplateError, ValueError) as err:
_LOGGER.error(err)
self._position = None
if self._tilt_template is not None:
try:
state = float(self._tilt_template.async_render())
if state < 0 or state > 100:
self._tilt_value = None
_LOGGER.error(
"Tilt value must be between 0 and 100. Value was: %.2f", state,
)
else:
self._tilt_value = state
except (TemplateError, ValueError) as err:
_LOGGER.error(err)
self._tilt_value = None
for property_name, template in (
("_icon", self._icon_template),
("_entity_picture", self._entity_picture_template),
("_available", self._availability_template),
):
if template is None:
continue
try:
value = template.async_render()
if property_name == "_available":
value = value.lower() == "true"
setattr(self, property_name, value)
except TemplateError as ex:
friendly_property_name = property_name[1:].replace("_", " ")
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render %s template %s, the state is unknown.",
friendly_property_name,
self._name,
)
return
try:
setattr(self, property_name, getattr(super(), property_name))
except AttributeError:
_LOGGER.error(
"Could not render %s template %s: %s",
friendly_property_name,
self._name,
ex,
)
| 34.353814 | 87 | 0.612643 |
8a8bc8594ecec4a9be1af7e81ea3c31d4795487a | 2,369 | py | Python | limits/util.py | mymedia2/limits | 2609381a93bce95df3f643b3676cceae320007d1 | [
"MIT"
] | 140 | 2015-04-30T14:12:19.000Z | 2022-03-29T11:05:06.000Z | limits/util.py | mymedia2/limits | 2609381a93bce95df3f643b3676cceae320007d1 | [
"MIT"
] | 88 | 2015-02-25T07:23:07.000Z | 2022-03-17T20:11:08.000Z | limits/util.py | mymedia2/limits | 2609381a93bce95df3f643b3676cceae320007d1 | [
"MIT"
] | 43 | 2015-02-25T03:26:47.000Z | 2022-03-02T16:14:07.000Z | """
"""
import re
import sys
import six
from .limits import GRANULARITIES
SEPARATORS = re.compile(r"[,;|]{1}")
SINGLE_EXPR = re.compile(
r"""
\s*([0-9]+)
\s*(/|\s*per\s*)
\s*([0-9]+)
*\s*(hour|minute|second|day|month|year)s?\s*""",
re.IGNORECASE | re.VERBOSE
)
EXPR = re.compile(
r"^{SINGLE}(:?{SEPARATORS}{SINGLE})*$".format(
SINGLE=SINGLE_EXPR.pattern, SEPARATORS=SEPARATORS.pattern
), re.IGNORECASE | re.VERBOSE
)
def get_dependency(dep):
"""
safe function to import a module programmatically
:return: module or None (if not importable)
"""
try:
__import__(dep)
return sys.modules[dep]
except ImportError: # pragma: no cover
return None
def parse_many(limit_string):
"""
parses rate limits in string notation containing multiple rate limits
(e.g. '1/second; 5/minute')
:param string limit_string: rate limit string using :ref:`ratelimit-string`
:raise ValueError: if the string notation is invalid.
:return: a list of :class:`RateLimitItem` instances.
"""
if not (
isinstance(limit_string, six.string_types)
and EXPR.match(limit_string)
):
raise ValueError(
"couldn't parse rate limit string '%s'" % limit_string
)
limits = []
for limit in SEPARATORS.split(limit_string):
amount, _, multiples, granularity_string = SINGLE_EXPR.match(
limit
).groups()
granularity = granularity_from_string(granularity_string)
limits.append(granularity(amount, multiples))
return limits
def parse(limit_string):
"""
parses a single rate limit in string notation
(e.g. '1/second' or '1 per second'
:param string limit_string: rate limit string using :ref:`ratelimit-string`
:raise ValueError: if the string notation is invalid.
:return: an instance of :class:`RateLimitItem`
"""
return list(parse_many(limit_string))[0]
def granularity_from_string(granularity_string):
"""
:param granularity_string:
:return: a subclass of :class:`RateLimitItem`
:raise ValueError:
"""
for granularity in GRANULARITIES.values():
if granularity.check_granularity_string(granularity_string):
return granularity
raise ValueError("no granularity matched for %s" % granularity_string)
| 26.322222 | 79 | 0.654707 |
09284522a4b0a653bbc84bd2df1888492bb2f33a | 400 | py | Python | src/sms_verifier_app/storage.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | null | null | null | src/sms_verifier_app/storage.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 10 | 2020-02-12T02:51:31.000Z | 2022-02-10T13:33:43.000Z | src/sms_verifier_app/storage.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 1 | 2022-02-22T18:56:22.000Z | 2022-02-22T18:56:22.000Z | import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
| 28.571429 | 83 | 0.725 |
cf861b29f86a8b850b6da96651357383e82fbf7b | 5,106 | py | Python | simulations/sim_transition_model_learning.py | bastianalt/correlation_priors_for_rl | 9a98f345ac10e9767d854cd7a9681057a50a9737 | [
"MIT"
] | null | null | null | simulations/sim_transition_model_learning.py | bastianalt/correlation_priors_for_rl | 9a98f345ac10e9767d854cd7a9681057a50a9737 | [
"MIT"
] | null | null | null | simulations/sim_transition_model_learning.py | bastianalt/correlation_priors_for_rl | 9a98f345ac10e9767d854cd7a9681057a50a9737 | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
import os
import numpy as np
import xyzpy as xyz
from packages.utils.utils import positions2distmat, normalize01, hellinger, init_Dataset, emd
from packages.mdp.standard_domains import DiagonalTunnelWorld
from packages.mdp.belief_transition_model import PGVarStateTransitionModel
from packages.pgmult.pg_cov import NegExponential
from funcy import rpartial
def estimation_error(T, T_hat, metric, params):
"""
Computes the error between an estimated transition model and the corresponding ground truth based on a given
evaluation metric (with optional parameters).
:param T: [S x S x A] ground truth transition model
:param T_hat: [S x S x A] estimated transition model
:param metric: string specifying the evaluation metric
:param params: parameters of the evaluation metric
:return: metric value
"""
# select metric
if metric == 'emd':
fun = rpartial(emd, params)
elif metric == 'hellinger':
fun = hellinger
# MDP dimensions
_, nStates, nActions = T.shape
# evaluate the metric for each state and action
E = np.zeros([nStates, nActions])
for s, a in np.ndindex(nStates, nActions):
E[s, a] = fun(T[:, s, a], T_hat[:, s, a])
# return mean and std metric values (computed over the state space of the MDP)
return E.mean(), E.std()
def experiment(env, nData, methods, metrics, seed):
"""
Evaluates different methods to estimate the transition model of an MDP.
:param env: string specifying the test environment
:param nData: list of integers specifying the training dataset sizes to be tested (number of observed transitions)
:param methods: list of strings specifying the inference methods to be considered
:param metrics: list of strings specifying the evaluation metrics to be computed
:param seed: integer specifying the random seed of the experiment
:return: xarray containing the mean and standard deviations of all evaluation metrics (computed over the state
space of the MDP)
"""
# set random seed
np.random.seed(seed)
# initialize empty xarray to store results
coords = dict(nData=nData, methods=methods, metrics=metrics)
dims = ['nData', 'methods', 'metrics']
XR = init_Dataset(['mean', 'std'], coords, dims)
# create environment
if env == '10x10_TunnelWorldCorner':
mdp = DiagonalTunnelWorld(length=10, corner=True)
distmat = positions2distmat(mdp.statePositions)
# uniform action exploration policy
pi = np.full([mdp.nStates, mdp.nActions], fill_value=1/mdp.nActions)
# create data set
D = mdp.sampleTrajectories(nTrajs=1, nSteps=nData.max(), pi=pi)
states, actions = D['states'].ravel(), D['actions'].ravel()
# iterate over all inference methods
for method in methods:
# method specific settings
if method == 'pg':
distmat_kernel = normalize01(distmat + distmat.T) ** 2
Sigma = NegExponential(distmat_kernel)
T = PGVarStateTransitionModel(mdp.nStates, mdp.nActions, Sigma=Sigma, nonInformative=False)
elif method == 'dir_sparse':
alpha = 1e-3
elif method == 'dir_uniform':
alpha = 1
# iterate over different dataset sizes
for nSteps in nData:
# extract subset of data
S = states[0:nSteps]
A = actions[0:nSteps]
# create count matrix
X = np.zeros_like(mdp.T, dtype=int)
for s1, s2, a in zip(S[:-1], S[1:], A):
X[s2, s1, a] += 1
# estimate transition model
if method == 'pg':
T.data = X
T.fit()
T_hat = T.mean()
elif method in ('dir_sparse', 'dir_uniform'):
T_hat = (X + alpha) / (X + alpha).sum(axis=1, keepdims=True)
# evaluate estimate
for metric in metrics:
mean, std = estimation_error(mdp.T, T_hat, metric, distmat)
XR['mean'].loc[dict(methods=method, nData=nSteps, metrics=metric)] = mean
XR['std'].loc[dict(methods=method, nData=nSteps, metrics=metric)] = std
return XR
if __name__ == '__main__':
# path to store result
file = '../results/modellearning/result.h5'
assert os.path.isdir(os.path.dirname(file))
# number of Monte Carlo runs
MC = 20
# "outer" parameter sweep (performed via xyzpy)
combos = dict(
seed=np.r_[0:MC],
env=['10x10_TunnelWorldCorner'],
)
# "inner" parameter sweep (manually implemented to share quantities across loops)
constants = dict(
nData=np.linspace(0, 1e4, 20, dtype=int),
metrics=[
# 'emd',
'hellinger'
],
methods=[
'dir_sparse',
'dir_uniform',
'pg',
],
)
# run experiments
runner = xyz.Runner(experiment, var_names=None)
harvester = xyz.Harvester(runner, file)
harvester.harvest_combos(combos, constants=constants, parallel=True)
| 34.268456 | 118 | 0.635919 |
6cb98dc9bb488936371b38980d80a87088a84a43 | 16,375 | py | Python | ibis/backends/clickhouse/tests/test_functions.py | hdfkndkndknknknvklsbljsbsmb/ibis | dbacd7a52ca062529fb7bf6dec51b98d7199d1dc | [
"Apache-2.0"
] | null | null | null | ibis/backends/clickhouse/tests/test_functions.py | hdfkndkndknknknvklsbljsbsmb/ibis | dbacd7a52ca062529fb7bf6dec51b98d7199d1dc | [
"Apache-2.0"
] | null | null | null | ibis/backends/clickhouse/tests/test_functions.py | hdfkndkndknknknvklsbljsbsmb/ibis | dbacd7a52ca062529fb7bf6dec51b98d7199d1dc | [
"Apache-2.0"
] | 1 | 2021-09-20T07:51:20.000Z | 2021-09-20T07:51:20.000Z | import math
import operator
from datetime import date, datetime
from operator import methodcaller
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`double_col` AS Int8)'),
('int16', 'CAST(`double_col` AS Int16)'),
('float', 'CAST(`double_col` AS Float32)'),
# alltypes.double_col is non-nullable
(dt.Double(nullable=False), '`double_col`'),
],
)
def test_cast_double_col(alltypes, translate, to_type, expected):
expr = alltypes.double_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`string_col` AS Int8)'),
('int16', 'CAST(`string_col` AS Int16)'),
(dt.String(nullable=False), '`string_col`'),
('timestamp', 'CAST(`string_col` AS DateTime)'),
('date', 'CAST(`string_col` AS Date)'),
],
)
def test_cast_string_col(alltypes, translate, to_type, expected):
expr = alltypes.string_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.xfail(
raises=AssertionError, reason='Clickhouse doesn\'t have decimal type'
)
def test_decimal_cast():
assert False
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, translate, column):
col = alltypes[column]
result = col.cast(col.type())
assert result.equals(col)
assert translate(result) == f'`{column}`'
def test_timestamp_cast_noop(alltypes, translate):
target = dt.Timestamp(nullable=False)
result1 = alltypes.timestamp_col.cast(target)
result2 = alltypes.int_col.cast(target)
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
assert translate(result1) == '`timestamp_col`'
assert translate(result2) == 'CAST(`int_col` AS DateTime)'
def test_timestamp_now(con, translate):
expr = ibis.now()
# now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
assert translate(expr) == 'now()'
# assert con.execute(expr) == now
@pytest.mark.parametrize(
('unit', 'expected'),
[
('y', '2009-01-01'),
param('m', '2009-05-01', marks=pytest.mark.xfail),
('d', '2009-05-17'),
('w', '2009-05-11'),
('h', '2009-05-17 12:00:00'),
('minute', '2009-05-17 12:34:00'),
],
)
def test_timestamp_truncate(con, translate, unit, expected):
stamp = ibis.timestamp('2009-05-17 12:34:56')
expr = stamp.truncate(unit)
assert con.execute(expr) == pd.Timestamp(expected)
@pytest.mark.parametrize(
('func', 'expected'),
[
(methodcaller('year'), 2015),
(methodcaller('month'), 9),
(methodcaller('day'), 1),
(methodcaller('hour'), 14),
(methodcaller('minute'), 48),
(methodcaller('second'), 5),
],
)
def test_simple_datetime_operations(con, func, expected):
value = ibis.timestamp('2015-09-01 14:48:05.359')
with pytest.raises(ValueError):
con.execute(func(value))
value = ibis.timestamp('2015-09-01 14:48:05')
con.execute(func(value)) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
result = con.execute(L(value).nullifzero())
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(None).isnull(), True),
(L(1).isnull(), False),
(L(None).notnull(), False),
(L(1).notnull(), True),
],
)
def test_isnull_notnull(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
def test_fillna_nullif(con, expr, expected):
result = con.execute(expr)
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
(L('foo_bar'), 'String'),
(L(5), 'UInt8'),
(L(1.2345), 'Float64'),
(L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),
(L(date(2015, 9, 1)), 'Date'),
param(
ibis.NA,
'Null',
marks=pytest.mark.xfail(
raises=AssertionError,
reason=(
'Client/server version mismatch not handled in the '
'clickhouse driver'
),
),
),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
(methodcaller('substr', 0, 3), 'foo'),
(methodcaller('substr', 4, 3), 'bar'),
(methodcaller('substr', 1), 'oo_bar'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
def test_string_column_substring(con, alltypes, translate):
expr = alltypes.string_col.substr(2)
assert translate(expr) == 'substring(`string_col`, 2 + 1)'
assert len(con.execute(expr))
expr = alltypes.string_col.substr(0, 3)
assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'
assert len(con.execute(expr))
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
def test_string_lenght(con):
assert con.execute(L('FOO').length()) == 3
@pytest.mark.parametrize(
('value', 'op', 'expected'),
[
(L('foobar'), methodcaller('contains', 'bar'), True),
(L('foobar'), methodcaller('contains', 'foo'), True),
(L('foobar'), methodcaller('contains', 'baz'), False),
(L('100%'), methodcaller('contains', '%'), True),
(L('a_b_c'), methodcaller('contains', '_'), True),
],
)
def test_string_contains(con, op, value, expected):
assert con.execute(op(value)) == expected
# TODO: clickhouse-driver escaping bug
def test_re_replace(con, translate):
expr1 = L('Hello, World!').re_replace('.', '\\\\0\\\\0')
expr2 = L('Hello, World!').re_replace('^', 'here: ')
assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'
assert con.execute(expr2) == 'here: Hello, World!'
@pytest.mark.parametrize(
('value', 'expected'),
[(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?
)
def test_find_in_set(con, value, expected, translate):
vals = list('abc')
expr = value.find_in_set(vals)
assert con.execute(expr) == expected
def test_string_column_find_in_set(con, alltypes, translate):
s = alltypes.string_col
vals = list('abc')
expr = s.find_in_set(vals)
assert translate(expr) == "indexOf(['a','b','c'], `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('url', 'extract', 'expected'),
[
(L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),
(L('https://www.cloudera.com'), 'PROTOCOL', 'https'),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'PATH',
'/watch',
),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'QUERY',
'v=kEuEcWfewf8&t=10',
),
],
)
def test_parse_url(con, translate, url, extract, expected):
expr = url.parse_url(extract)
assert con.execute(expr) == expected
def test_parse_url_query_parameter(con, translate):
url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')
expr = url.parse_url('QUERY', 't')
assert con.execute(expr) == '10'
expr = url.parse_url('QUERY', 'v')
assert con.execute(expr) == 'kEuEcWfewf8'
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
(L('foobar').like(['%bar']), True),
(L('foobar').like(['foo%']), True),
(L('foobar').like(['%baz%']), False),
(L('foobar').like(['%bar', 'foo%']), True),
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
],
)
def test_string_find_like(con, expr, expected):
assert con.execute(expr) == expected
def test_string_column_like(con, alltypes, translate):
expr = alltypes.string_col.like('foo%')
assert translate(expr) == "`string_col` LIKE 'foo%'"
assert len(con.execute(expr))
expr = alltypes.string_col.like(['foo%', '%bar'])
expected = "`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'"
assert translate(expr) == expected
assert len(con.execute(expr))
def test_string_column_find(con, alltypes, translate):
s = alltypes.string_col
expr = s.find('a')
assert translate(expr) == "position(`string_col`, 'a') - 1"
assert len(con.execute(expr))
expr = s.find(s)
assert translate(expr) == "position(`string_col`, `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('call', 'expected'),
[
(methodcaller('log'), 'log(`double_col`)'),
(methodcaller('log2'), 'log2(`double_col`)'),
(methodcaller('log10'), 'log10(`double_col`)'),
(methodcaller('round'), 'round(`double_col`)'),
(methodcaller('round', 0), 'round(`double_col`, 0)'),
(methodcaller('round', 2), 'round(`double_col`, 2)'),
(methodcaller('exp'), 'exp(`double_col`)'),
(methodcaller('abs'), 'abs(`double_col`)'),
(methodcaller('ceil'), 'ceil(`double_col`)'),
(methodcaller('floor'), 'floor(`double_col`)'),
(methodcaller('sqrt'), 'sqrt(`double_col`)'),
(
methodcaller('sign'),
'intDivOrZero(`double_col`, abs(`double_col`))',
),
],
)
def test_translate_math_functions(con, alltypes, translate, call, expected):
expr = call(alltypes.double_col)
assert translate(expr) == expected
assert len(con.execute(expr))
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(-5).abs(), 5),
(L(5).abs(), 5),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
(L(5.556).ceil(), 6.0),
(L(5.556).floor(), 5.0),
(L(5.556).exp(), math.exp(5.556)),
(L(5.556).sign(), 1),
(L(-5.556).sign(), -1),
(L(0).sign(), 0),
(L(5.556).sqrt(), math.sqrt(5.556)),
(L(5.556).log(2), math.log(5.556, 2)),
(L(5.556).ln(), math.log(5.556)),
(L(5.556).log2(), math.log(5.556, 2)),
(L(5.556).log10(), math.log10(5.556)),
],
)
def test_math_functions(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_greatest(con, alltypes, translate):
expr = ibis.greatest(alltypes.int_col, 10)
assert translate(expr) == "greatest(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "greatest(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
def test_least(con, alltypes, translate):
expr = ibis.least(alltypes.int_col, 10)
assert translate(expr) == "least(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.least(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "least(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
# TODO: clickhouse-driver escaping bug
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_search('[a-z]'), True),
(L('abcd').re_search(r'[\\d]+'), False),
(L('1222').re_search(r'[\\d]+'), True),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_extract('([a-z]+)', 0), 'abcd'),
# (L('abcd').re_extract('(ab)(cd)', 1), 'cd'),
# valid group number but no match => empty string
(L('abcd').re_extract(r'(\\d)', 0), ''),
# match but not a valid group number => NULL
# (L('abcd').re_extract('abcd', 3), None),
],
)
def test_regexp_extract(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_column_regexp_extract(con, alltypes, translate):
expected = r"extractAll(`string_col`, '[\d]+')[3 + 1]"
expr = alltypes.string_col.re_extract(r'[\d]+', 3)
assert translate(expr) == expected
assert len(con.execute(expr))
def test_column_regexp_replace(con, alltypes, translate):
expected = r"replaceRegexpAll(`string_col`, '[\d]+', 'aaa')"
expr = alltypes.string_col.re_replace(r'[\d]+', 'aaa')
assert translate(expr) == expected
assert len(con.execute(expr))
def test_numeric_builtins_work(con, alltypes, df, translate):
expr = alltypes.double_col
result = expr.execute()
expected = df.double_col.fillna(0)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes, translate):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
expected = pd.Series([None] * nrows, name='na_column')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('attr', 'expected'),
[
(operator.methodcaller('year'), {2009, 2010}),
(operator.methodcaller('month'), set(range(1, 13))),
(operator.methodcaller('day'), set(range(1, 32))),
],
)
def test_date_extract_field(db, alltypes, attr, expected):
t = alltypes
expr = attr(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int)
assert set(result) == expected
def test_timestamp_from_integer(con, alltypes, translate):
# timestamp_col has datetime type
expr = alltypes.int_col.to_timestamp()
assert translate(expr) == 'toDateTime(`int_col`)'
assert len(con.execute(expr))
def test_count_distinct_with_filter(alltypes):
expr = alltypes.string_col.nunique(
where=alltypes.string_col.cast('int64') > 1
)
result = expr.execute()
expected = alltypes.string_col.execute()
expected = expected[expected.astype('int64') > 1].nunique()
assert result == expected
@pytest.mark.parametrize(
('sep', 'where_case', 'expected'),
[
(',', None, "arrayStringConcat(groupArray(`string_col`), ',')"),
('-', None, "arrayStringConcat(groupArray(`string_col`), '-')"),
pytest.param(
',',
0,
(
"arrayStringConcat(groupArray("
"CASE WHEN `bool_col` = 0 THEN "
"`string_col` ELSE Null END), ',')"
),
marks=pytest.mark.xfail(
reason=(
'`where` param needs `Nullable` column '
'but the all in testing data is not.'
'See also issue #2891'
)
),
),
],
)
def test_group_concat(alltypes, sep, where_case, expected, translate):
where = None if where_case is None else alltypes.bool_col == where_case
expr = alltypes.string_col.group_concat(sep, where)
assert translate(expr) == expected
| 29.137011 | 76 | 0.590229 |
7208905d658c9771f0cf3fb5f921e420cf135d3d | 2,427 | py | Python | youtube/youtube_selenium/youtube_scraper.py | ogunnoo/web-scraping | 9255806d76c078e5e0de3c56087fb4ea3f07b33f | [
"MIT"
] | null | null | null | youtube/youtube_selenium/youtube_scraper.py | ogunnoo/web-scraping | 9255806d76c078e5e0de3c56087fb4ea3f07b33f | [
"MIT"
] | null | null | null | youtube/youtube_selenium/youtube_scraper.py | ogunnoo/web-scraping | 9255806d76c078e5e0de3c56087fb4ea3f07b33f | [
"MIT"
] | null | null | null | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import time
import pandas as pd
def close_modal(driver):
try:
modal_close = driver.find_element(By.CLASS_NAME, "modal__close")
if modal_close:
modal_close.click()
except:
pass
options = webdriver.ChromeOptions()
url = 'https://www.youtube.com/c/JohnWatsonRooney/videos'
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
close_modal(driver)
driver.get(url)
elem = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, '//*[@id="video-title"]')))
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
old_height = driver.execute_script("return document.documentElement.scrollHeight")
print(driver.execute_script("return document.documentElement.scrollHeight"))
driver.execute_script(f"window.scrollTo({old_height}, document.documentElement.scrollHeight);")
new_height = driver.execute_script("return document.documentElement.scrollHeight")
print(driver.execute_script("return document.documentElement.scrollHeight"))
while driver.find_element_by_tag_name('div'):
driver.execute_script(f"window.scrollTo({old_height}, document.documentElement.scrollHeight);")
new_height = driver.execute_script("return document.documentElement.scrollHeight")
print(driver.execute_script("return document.documentElement.scrollHeight"))
Divs=driver.find_element_by_tag_name('div').text
if new_height == old_height:
print('end')
break
else:
old_height = new_height
time.sleep(3)
continue
print("Complete")
videos = driver.find_elements(By.CSS_SELECTOR, 'ytd-grid-video-renderer')
print(len(videos))
video_list =[]
for video in videos:
title = video.find_element(By.XPATH, './/*[@id="video-title"]').text
views = video.find_element(By.XPATH, './/*[@id="metadata-line"]/span[1]').text
posted = video.find_element(By.XPATH,'.//*[@id="metadata-line"]/span[2]').text
items = {
'title' : title,
'views' : views,
'posted': posted
}
video_list.append(items)
df = pd.DataFrame(video_list)
print(df)
| 32.36 | 108 | 0.73218 |
52a5cd578cd0b5760f3f3491a28e5cbf0dad513d | 1,371 | py | Python | tilecloud/store/bsddb.py | camptocamp/tilecloud | fa9864c969917ad47a6d4ce945fc19fe5cdcfb68 | [
"Unlicense"
] | 134 | 2017-02-25T22:14:50.000Z | 2022-03-04T09:51:13.000Z | tilecloud/store/bsddb.py | camptocamp/tilecloud | fa9864c969917ad47a6d4ce945fc19fe5cdcfb68 | [
"Unlicense"
] | 106 | 2017-03-07T13:45:04.000Z | 2022-03-31T13:01:49.000Z | tilecloud/store/bsddb.py | camptocamp/tilecloud | fa9864c969917ad47a6d4ce945fc19fe5cdcfb68 | [
"Unlicense"
] | 14 | 2017-05-05T14:46:50.000Z | 2022-01-02T18:37:31.000Z | from typing import Any, Iterator, Optional
import bsddb3 as bsddb # pylint: disable=import-error
from tilecloud import Tile, TileCoord, TileStore
class BSDDBTileStore(TileStore):
def __init__(self, db: bsddb.DB, **kwargs: Any):
self.db = db
TileStore.__init__(self, **kwargs)
def __contains__(self, tile: Tile) -> bool:
return tile is not None and str(tile.tilecoord) in self.db
def __len__(self) -> int:
return len(self.db)
def delete_one(self, tile: Tile) -> Tile:
key = str(tile.tilecoord).encode("utf-8")
if key in self.db:
del self.db[key]
return tile
def get_all(self) -> Iterator[Tile]:
for key, data in self.db.items():
tile = Tile(TileCoord.from_string(key), content_type=self.content_type, data=data)
yield tile
def get_one(self, tile: Tile) -> Optional[Tile]:
try:
tile.content_type = self.content_type
tile.data = self.db[str(tile.tilecoord).encode("utf-8")]
return tile
except KeyError:
return None
def list(self) -> Iterator[Tile]:
return map(lambda s: Tile(TileCoord.from_string(s)), self.db.keys())
def put_one(self, tile: Tile) -> Tile:
self.db[str(tile.tilecoord).encode("utf-8")] = getattr(tile, "data", "")
return tile
| 31.159091 | 94 | 0.61415 |
eb11b25d2b4921bbf1d381799f66304a9625cc70 | 18,792 | py | Python | lib/portal/docgenerator/Confluence2RST.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
] | null | null | null | lib/portal/docgenerator/Confluence2RST.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
] | 74 | 2015-12-28T16:17:20.000Z | 2021-09-08T12:28:59.000Z | lib/portal/docgenerator/Confluence2RST.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
] | null | null | null | import re
from JumpScale import j
class Confluence2RST():
def processDefs(self, line, doc, page):
return line
if not doc.processDefs:
return line
# print "processdefs:%s"%line
def processToken(token):
if token.find("{") != -1 or token.find("[") != -1 or token.find("]") != -1:
return token
# print "tok1:%s"%token
deff = self.defGet(token)
if deff is not None:
# print "founddef"
token = "[%s|%s]" % (deff.name, deff.pagename)
# print "tok2:%s"%token
return token
token = ""
lineout = ""
for char in line:
if char in [",", ";", ":", " ", ".", "?", "!", "|"]:
token = processToken(token)
lineout += "%s%s" % (token, char)
token = ""
elif char in ["/", "\\", "]"]:
lineout += "%s%s" % (token, char)
token = ""
else:
token += char
lineout += processToken(token)
lineout = self.findLinks(lineout)
return lineout
@staticmethod
def findLinks(line):
# r=r"\[[-:|_@#.?\w\s\\=/&]*\]"
r = r"\[[^\[\]]+\]" # TODO: does not seem right to me
if j.tools.code.regex.match(r, line): # find links
# print "match %s"% line
htmlelements = ""
for match in j.tools.code.regex.yieldRegexMatches(r, line):
try:
# print "link: %s" % match.founditem
link_id = link_class = None
match2 = match.founditem.replace("[", "").replace("]", "")
if match2.find("|") != -1:
parts = match2.split("|")
descr = parts[0]
link = parts[1]
if len(parts) >= 3:
if parts[2].strip() != "":
link_id = (parts[2].split('=')[1]).strip()
if len(parts) >= 4:
if parts[2].strip() != "":
link_id = (parts[2].split('=')[1]).strip()
link_class = (parts[3].split('=')[1]).strip()
if len(parts) >= 5:
htmlelements = parts[4]
elif match2.find(":") != -1:
descr, link = match2.split(":", 1)[1], match2
else:
link = match2
descr = link
except Exception as e:
return line
# if link.find(":") != -1: # TODO: what was the reason for this, probably have broken something now
# link=link.replace(":","___")
if link.find(";") != -1:
space, pagename = link.split(";", 1)
link = "/%s/%s" % (space.lower().strip().strip("/"), pagename.strip().strip("/"))
# print "match:%s"%match.founditem
# print "getlink:%s" %page.getLink(descr,link)
linkDest = "%s <%s>" % (descr, link)
line = line.replace(match.founditem, linkDest)
return line
def processMacro(self, macro, page):
macro = macro.strip().lstrip("{").strip()
macro = macro.strip().rstrip("}").strip()
if macro.find("code:") == 0:
page.addNewLine()
macro = macro[5:].strip()
if macro.startswith('template:'):
macrolines = macro.splitlines()
macro = '\n'.join(macrolines[1:])
page.addCodeBlock(macro)
page.addNewLine()
if macro.find("rst:") == 0:
page.addNewLine()
macro = macro[4:].strip()
page.addMessage(macro)
def convert(self, content, page=None, doc=None, requestContext=None, paramsExtra={}):
if content.find("@rstignore") != -1:
return ""
#styled_text = r'([\w\-:_/= *.\.\/\>\<\\{},|`!]+)'
styled_text = r'[^{0}\n]*?'
def limiter(char):
# Limiters can contain RE special chars, so I escape them here
limiter_re = ''.join('\\' + c for c in char)
# This is the RE which is used to replace wiki text formatting with equivalent HTML tag
return re.compile(r'(\W){0}([^ #{0}]{1}[^ \n{0}]?){0}(\W)'.format(
limiter_re, styled_text.format(limiter_re)))
def limiter_replacement(sub):
# return r'\1<{0}>\2</{0}>\3'.format(sub)
return r'\1{0}\2{0}\3'.format(sub)
def substitute_email(match):
return r'<a href="{0}">{1}</a>'.format(match.group(1), match.group(1).replace('mailto:', '', 1))
def escape_char(char):
return char
# return '&#{0};'.format(ord(char.group(1)))
substitutions = [
(r'\\([^\n\r\\])', ""),
# ('<', '<'),
# ('>', '>'),
(r'\@LF\b', '\n'), # This should come after !=
(r'&[\w #]*;', ""),
(limiter('`'), limiter_replacement('\'')),
# (limiter('**'), limiter_replacement('**')),
# (limiter('*'), limiter_replacement('**')),
(limiter('_'), limiter_replacement('*')),
(limiter('+'), limiter_replacement('')),
(limiter('-'), limiter_replacement('')),
(limiter('??'), limiter_replacement('')),
(limiter('^'), limiter_replacement('')),
(limiter('~'), limiter_replacement('')),
# {color: red}text goes here{color}
(re.compile(r'\{{color\:(.*?)\}}({0})\{{color\}}'.format(styled_text.format('{}')),
flags=re.DOTALL | re.MULTILINE | re.IGNORECASE),
r'<span style="color:\1">\2</span>'),
# Links & emails
#(r'\[(.*?)\]', substitute_email),
# blockquote
(r'bq\.\s+(.*?)\n', r'<blockquote>\1</blockquote>\n'),
# Escape characters by putting \ in front of it, e.g. \*
]
# First, divide the text into macros & non-macros
blocks = re.split(r'({{.*?}})', content, flags=re.DOTALL)
for i in range(len(blocks)):
if blocks[i].startswith('{{'): # a macro
continue
for tag_re, sub_re in substitutions:
blocks[i] = re.sub(tag_re, sub_re, blocks[i])
content = ''.join(blocks)
if page is None:
page = j.portal.tools.docgenerator.pageNewMD("temp")
# images=j.sal.fs.listFilesInDir(dirpath,False)
# images3=[]L
# for image in images:
# image2=image.lower()
# if image2.find(".jpg") != -1 or image2.find(".png") != -1:
# image2=image2.strip()
# image2=j.sal.fs.getBaseName(image2.replace("\\","/"))
# images3.append(image2)
state = "start"
macro = ""
params = ""
ulAttributes = ''
for line in content.split("\n"):
# print line
# print "IN:%s"%line
self._lastLine = line
if state not in ['macro']:
line = line.strip()
# \\ on their own line will emit <br>
if line == r'\\':
page.addNewLine()
line = ''
continue
if line.strip() == "" and state == "start":
page.addNewLine()
line = ''
continue
# print "#: %s %s" % (state,line)
# END TABLE
if state == "table" and (line[0:1] == "||" or line.find("|") != 0):
state = "start"
if params != "":
page.addList(trows, theader, classparams=params)
else:
page.addList(trows, theader)
params = ""
# PAGEBREAK
if state == "start" and (line.find(" ") != -1): # or line=="":
page.addNewLine()
continue
if state != "macro" and line == "":
page._checkBlock('', '', '')
continue
# SKIP LINES
if state != "macro" and line[0] == "#":
continue
# IMAGE
regex = r"\![\w\-:_/=*.,|?&][\w\-:_/= *.,|?&]*[\w\-:_/=*.,|?&]\!"
if (state == "start" or state == "table")and j.tools.code.regex.match(regex, line):
matches = j.tools.code.regex.findAll(regex, line)
for match in matches:
image = match.replace("!", "")
if '|' in image:
# Image may have attributes, like
# !image.png|border=1px solid black, margin=1px!
# these should be written as CSS properties. The syntax for the macro should follow CSS format
#
# Result: <img src="image.png" style="border: 1px solid black; margin: 1px" />
image, styles = image.split('|', 1)
styles = [attr.split('=') for attr in styles.split(',')]
else:
styles = []
if image.startswith('/') or image.startswith('http://'):
imagePath = image
else:
# imagePath = "/images/%s/%s" % (doc.getSpaceName(), image)
imagePath = "/images/%s/%s" % ("unknownspace", image)
# th=j.data.tags.getObject(tags)
# result=th.getValues(width=800,height=600,border=True)
#page.addImage(image, image, result["width"], result["height"])
#page.addImage(image, imagePath, styles=styles)
# line = line.replace(match, self.createImage(image, imagePath, styles=styles))
page.addMessage("unsupported image:%s" % imagePath)
continue
if line.find("{center}") > -1:
continue
if line.startswith("{toc:"):
# line="{{toc}}"
line = ""
continue
# 1 line macros
if (state == "start" or state == "table") and line.find("{{") != -1 and line.find("}}") != -1:
continue # not supported for now
# self.processMacro()
# macros = doc.preprocessor.macroexecutorPage.getMacroCandidates(line)
# for macro in macros:
# raise RuntimeError("macro in table not supported")
# # print "## 1linemacro:%s"%macro
# # mtayseer: this condition looks wrong!!
# if line.find("{{") != 0 or len(macros) > 1:
# htmlMacro = doc.preprocessor.macroexecutorPage.executeMacroReturnHTML(macro,
# doc=doc, requestContext=requestContext, paramsExtra=paramsExtra, pagemirror4jscss=page)
# line = line.replace(macro, htmlMacro)
# else:
# doc.preprocessor.macroexecutorPage.executeMacroAdd2Page(macro, page, doc=doc,
# requestContext=requestContext, paramsExtra=paramsExtra)
# line = ""
# macro = ""
# # print "processed 1 macro line:%s"%line
# if line.strip() == "":
# continue
# print "after1linemacrostate:%s %s"%(line,state)
if state == "start" and line.find("{{") != -1:
state = "macro"
if state == "macro":
macro += "%s\n" % line
if state == "macro" and line.find("}}") >= 0:
state = "start"
# print "macroend:%s"%line
# macrostr=macro
# if doc != None:
# doc.preprocessor.macroexecutorPage.executeMacroAdd2Page(macro, page, doc=doc, requestContext=requestContext, paramsExtra=paramsExtra)
# macro = ""
# # params=""
# continue
self.processMacro(macro, page)
macro = ""
continue
if line.strip() == "":
continue
# print "linkcheck: %s" % j.tools.code.regex.match("\[[-\\:|_\w\s/]*\]",line)
# FIND LINKS
line = self.findLinks(line)
# HEADING
header = j.tools.code.regex.getRegexMatch("^h(\d)\. (.+?)$", line)
if header and state == "start":
level, line = header.foundSubitems
level = int(level)
# line = self.processDefs(line, doc, page)
page.addHeading(line, level)
continue
unorderedItem = j.tools.code.regex.getRegexMatch("^(\*+) (.+?)$", line)
if state == "start" and unorderedItem:
stars, line = unorderedItem.foundSubitems
level = len(stars)
# line = self.processDefs(line, doc, page)
page.addBullet(line, level)
ulAttributes = '' # ulAttributes is set in the previous iteration of the for-loop. It should be reset _after_ the list is added
continue
numberedItem = j.tools.code.regex.getRegexMatch("^\*(#+) (.+?)$", line)
if state == "start" and numberedItem:
hashes, line = numberedItem.foundSubitems
level = len(hashes)
# line = self.processDefs(line, doc, page)
page.addBullet(line, level)
ulAttributes = ''
continue
# Read styles for lists
# The syntax will be like this
#
# *- id=main-menu | class=nav nav-list
# * item 1
# * item 2
ulAttributes = j.tools.code.regex.getRegexMatch("^(\*+)- (.+?)$", line)
if ulAttributes:
continue
else:
ulAttributes = ''
if state == "start" and j.tools.code.regex.match(".*\|\|.*", line) and len(line.split("||")) == 2:
# DESCRIPTIONS
p1, p2 = line.split("||")
# p2 = self.processDefs(line, doc, page)
page.addDescr(p1, p2)
continue
if state == "start" and (line.find("@divend") == 0 or line.find("@rowend") ==
0 or line.find("@colend") == 0 or line.find("@blockend") == 0):
# page.addMessage("</div>")
continue
if state == "start" and line.find("@block") == 0:
# divlower(divauto,page,"block")
arg = line.replace("@block", "").strip()
# if arg == "":
# arg = "container"
# page.addMessage("<div class=\"%s\">" % arg)
# page.divlevel.append("block")
continue
if state == "start" and line.find("@row") == 0:
# divlower(divauto,page,"row")
arg = line.replace("@row", "").strip()
# if arg == "":
# arg = "row-fluid"
# page.addMessage("<div class=\"%s\">" % arg)
# page.divlevel.append("row")
continue
if state == "start" and line.find("@col") == 0:
# divlower(divauto,page,"col")
line = line.replace("@col", "").strip()
# arg= line.replace("@col", "").strip()
# page.addMessage("<div class=\"span%s\">" % arg)
# page.divlevel.append("col")
continue
if state == "start" and line.find("@block") == 0:
line = line.replace("@block", "").strip()
# arg = line.replace("@block", "").strip()
# if arg == "":
# arg = "container-fluid"
# page.addMessage("<div class=\"%s\">" % arg)
# page.divlevel += 1
continue
# check params
if state == "start" and line.find("@params") == 0:
params = line.replace("@params", "").strip()
#from JumpScale.core.Shell import ipshell
# print "DEBUG NOW params, not implemented"
# ipshell()
if state == "start" and line.find("||") == 0:
# beginning of table
line = self.processDefs(line, doc, page)
state = "table"
cols = line.split("||")
cols = cols[1:-1]
theader = cols
trows = []
continue
if state == "start" and line.find("|") == 0:
# beginning of table
line = self.processDefs(line, doc, page)
state = "table"
theader = ""
trows = []
if state == "table" and line.find("|") == 0:
# ADD ROW TO TABLE
line = self.processDefs(line, doc, page)
cols = line.split("|")
trows.append(cols[1:-1])
# was a regular line so add
if state != "macro" and state != "table" and line != "":
if line[0] != "@":
line = self.processDefs(line, doc, page)
page.addMessage(line)
if page.body != "":
# work on the special includes with [[]]
includes = j.tools.code.regex.findAll("\[\[[\w :;,\.\*\!\?\^\=\'\-/]*\]\]", page.body)
for item in includes:
item2 = item.replace("[[", "").replace("]]", "")
if doc.preprocessor.docExists(item2):
doc2 = doc.preprocessor.docGet(item2)
else:
page.body = page.body.replace(
item, " ***error*** : COULD NOT FIND DOC %s, could not include." %
item2)
continue
page2 = j.portal.tools.docgenerator.pageNewMD("includeInConfluence2Wiki")
# page2.liblocation = page.liblocation
page2 = self.convert(doc2.content, page2, doc2)
page.body = page.body.replace(item, page2.body)
return page
| 40.5 | 183 | 0.437739 |
ae8ea2bf53707e593e6a3457943500bc2d3a3133 | 3,999 | py | Python | core/polyaxon/k8s/logging/monitor.py | admariner/polyaxon | ba355c38166047eb11e60de4cee4d7c3b48db323 | [
"Apache-2.0"
] | 3,200 | 2017-05-09T11:35:31.000Z | 2022-03-28T05:43:22.000Z | core/polyaxon/k8s/logging/monitor.py | admariner/polyaxon | ba355c38166047eb11e60de4cee4d7c3b48db323 | [
"Apache-2.0"
] | 1,324 | 2017-06-29T07:21:27.000Z | 2022-03-27T12:41:10.000Z | core/polyaxon/k8s/logging/monitor.py | admariner/polyaxon | ba355c38166047eb11e60de4cee4d7c3b48db323 | [
"Apache-2.0"
] | 341 | 2017-01-10T23:06:53.000Z | 2022-03-10T08:15:18.000Z | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Any, Iterable, Optional
from kubernetes.client.rest import ApiException
from polyaxon.client import RunClient
from polyaxon.exceptions import PolyaxonK8SError
from polyaxon.k8s.manager import K8SManager
from polyaxon.polyboard.logging import V1Log
from polyaxon.utils.tz_utils import now
def query_logs(
k8s_manager: K8SManager,
pod_id: str,
container_id: str,
stream: bool = False,
since_seconds: int = None,
) -> Any:
params = {}
if stream:
params = {"follow": True, "_preload_content": False}
if since_seconds:
params = {"since_seconds": since_seconds}
return k8s_manager.k8s_api.read_namespaced_pod_log(
pod_id, k8s_manager.namespace, container=container_id, timestamps=True, **params
)
def process_log_line(log_line: str):
if not isinstance(log_line, str):
log_line = log_line.decode("utf-8")
return V1Log.process_log_line(
value=log_line.strip(), node=None, pod=None, container=None
)
def stream_logs(
k8s_manager: K8SManager, pod_id: str, container_id: str
) -> Iterable[str]:
raw = None
retries = 0
no_logs = True
while retries < 3 and no_logs:
try:
raw = query_logs(
k8s_manager=k8s_manager,
pod_id=pod_id,
container_id=container_id,
stream=True,
)
except (PolyaxonK8SError, ApiException):
retries += 1
if not raw:
yield ""
else:
for log_line in raw.stream():
if log_line:
yield process_log_line(log_line=log_line)
def process_logs(
k8s_manager: K8SManager,
pod_id: str,
container_id: str,
filepath: str,
since_seconds: int,
) -> bool:
logs = None
retries = 0
no_logs = True
while retries < 3 and no_logs:
try:
logs = query_logs(
k8s_manager=k8s_manager,
pod_id=pod_id,
container_id=container_id,
since_seconds=since_seconds,
)
no_logs = False
except (PolyaxonK8SError, ApiException):
retries += 1
if not logs:
return False
log_lines = []
for log_line in logs.split("\n"):
if log_line:
log_lines.append(process_log_line(log_line=log_line))
# Creating the new file
if not log_lines:
return False
with open(filepath, "w+") as destination:
destination.write("\n".join(log_lines))
return True
def sync_logs(
k8s_manager: K8SManager,
client: RunClient,
last_check: Optional[datetime],
pod_id: str,
container_id: str,
owner: str,
project: str,
run_uuid: str,
):
new_check = now()
since_seconds = None
if last_check:
since_seconds = (new_check - last_check).total_seconds()
if since_seconds < 1:
return last_check
filepath = str(new_check.timestamp())
created = process_logs(
k8s_manager=k8s_manager,
pod_id=pod_id,
container_id=container_id,
since_seconds=since_seconds,
filepath=filepath,
)
if created:
client.client.upload_run_logs(
owner, project, run_uuid, uploadfile=filepath, path=filepath
)
return new_check
return last_check
| 25.634615 | 88 | 0.642411 |
a2729b6857aeb284a7ba704fad7c5e8a3761390b | 18,344 | py | Python | napalm_yang/models/openconfig/network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local_/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 64 | 2016-10-20T15:47:18.000Z | 2021-11-11T11:57:32.000Z | napalm_yang/models/openconfig/network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local_/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 126 | 2016-10-05T10:36:14.000Z | 2019-05-15T08:43:23.000Z | napalm_yang/models/openconfig/network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local_/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 63 | 2016-11-07T15:23:08.000Z | 2021-09-22T14:41:16.000Z | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class local_(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/connection-points/connection-point/endpoints/endpoint/local. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters
relating to a local interface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "local"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"connection-points",
"connection-point",
"endpoints",
"endpoint",
"local",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/config (container)
YANG Description: Configuration parameters relating to a local
endpoint
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to a local
endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/state (container)
YANG Description: Operational state parameters relating to a
local endpoint
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to a
local endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class local_(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/connection-points/connection-point/endpoints/endpoint/local. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters
relating to a local interface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "local"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"connection-points",
"connection-point",
"endpoints",
"endpoint",
"local",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/config (container)
YANG Description: Configuration parameters relating to a local
endpoint
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to a local
endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/state (container)
YANG Description: Operational state parameters relating to a
local endpoint
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/connection_points/connection_point/endpoints/endpoint/local/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to a
local endpoint
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| 38.216667 | 377 | 0.604666 |
18837fcac295fabfc6a00c588651d63dc7a67b7c | 111 | py | Python | playground/DAG/adapter/episode.py | xlcbingo1999/CloudSimPy | de515eceb5d177142bb154deff0d44c58f6aca88 | [
"MIT"
] | null | null | null | playground/DAG/adapter/episode.py | xlcbingo1999/CloudSimPy | de515eceb5d177142bb154deff0d44c58f6aca88 | [
"MIT"
] | null | null | null | playground/DAG/adapter/episode.py | xlcbingo1999/CloudSimPy | de515eceb5d177142bb154deff0d44c58f6aca88 | [
"MIT"
] | null | null | null | from playground.auxiliary.episode import Episode
from .broker import JobBroker
Episode.broker_cls = JobBroker
| 22.2 | 48 | 0.846847 |
3bbb9e08e48d27e2d1bc22239e045d5c9fd83aaf | 655 | py | Python | Python/palindromic-substrings.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/palindromic-substrings.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | Python/palindromic-substrings.py | sm2774us/leetcode_interview_prep_2021 | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | [
"Fair"
] | null | null | null | # Time: O(n)
# Space: O(n)
class Solution(object):
def countSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
def manacher(s):
s = '^#' + '#'.join(s) + '#$'
P = [0] * len(s)
C, R = 0, 0
for i in range(1, len(s) - 1):
i_mirror = 2*C-i
if R > i:
P[i] = min(R-i, P[i_mirror])
while s[i+1+P[i]] == s[i-1-P[i]]:
P[i] += 1
if i+P[i] > R:
C, R = i, i+P[i]
return P
return sum((max_len+1)//2 for max_len in manacher(s))
| 26.2 | 61 | 0.340458 |
f4a649e6f4985b7b74a851c9415c7b71860eafc4 | 314 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractTalesOfMU.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractTalesOfMU.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractTalesOfMU.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractTalesOfMU(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if any('volume' in tag.lower() for tag in item['tags']) and (chp or vol):
return buildReleaseMessageWithType(item, 'Tales of MU', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False
| 34.888889 | 110 | 0.713376 |
91ce3727ffe3fb5261ecb7f44dd7049974e424e4 | 476 | py | Python | packages/python/plotly/plotly/validators/treemap/marker/colorbar/_thickness.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/treemap/marker/colorbar/_thickness.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/treemap/marker/colorbar/_thickness.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class ThicknessValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="thickness", parent_name="treemap.marker.colorbar", **kwargs
):
super(ThicknessValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
| 31.733333 | 86 | 0.647059 |
984791337e030e0d0c62e258bd2b2e18be9c4c47 | 1,731 | py | Python | src/clearskies/column_types/integer_test.py | cmancone/clearskies | aaa33fef6d03205faf26f123183a46adc1dbef9c | [
"MIT"
] | 4 | 2021-04-23T18:13:06.000Z | 2022-03-26T01:51:01.000Z | src/clearskies/column_types/integer_test.py | cmancone/clearskies | aaa33fef6d03205faf26f123183a46adc1dbef9c | [
"MIT"
] | null | null | null | src/clearskies/column_types/integer_test.py | cmancone/clearskies | aaa33fef6d03205faf26f123183a46adc1dbef9c | [
"MIT"
] | null | null | null | import unittest
from .integer import Integer
class IntegerTest(unittest.TestCase):
def test_from_backend(self):
integer = Integer()
self.assertEquals(5, integer.from_backend('5'))
def test_check_input_bad(self):
integer = Integer()
integer.configure('age', {}, IntegerTest)
error = integer.input_errors('model', {'age': 'asdf'})
self.assertEquals({'age': 'age must be an integer'}, error)
def test_check_input_good(self):
integer = Integer()
integer.configure('age', {}, IntegerTest)
self.assertEquals({}, integer.input_errors('model', {'age': 15}))
self.assertEquals({}, integer.input_errors('model', {'age': None}))
self.assertEquals({}, integer.input_errors('model', {}))
def test_is_allowed_operator(self):
integer = Integer()
for operator in ['=', '<', '>', '<=', '>=']:
self.assertTrue(integer.is_allowed_operator(operator))
for operator in ['==', '<=>']:
self.assertFalse(integer.is_allowed_operator(operator))
def test_build_condition(self):
integer = Integer()
integer.configure('fraction', {}, int)
self.assertEquals('fraction=0.2', integer.build_condition(0.2))
self.assertEquals(
'fraction<10',
integer.build_condition(10, operator='<')
)
def test_check_search_value(self):
integer = Integer()
integer.configure('age', {}, IntegerTest)
self.assertEquals('', integer.check_search_value(25))
self.assertEquals('age must be an integer', integer.check_search_value(25.0))
self.assertEquals('age must be an integer', integer.check_search_value('asdf'))
| 38.466667 | 87 | 0.626228 |
c2943dd8f38ef2a06616f968fb0392caa053c575 | 753 | py | Python | chart2/ScrapeCallback.py | fulinmao/webScraping | ae92490b9ee486f72bc926d5c6ee7b88af193e57 | [
"Apache-2.0"
] | null | null | null | chart2/ScrapeCallback.py | fulinmao/webScraping | ae92490b9ee486f72bc926d5c6ee7b88af193e57 | [
"Apache-2.0"
] | null | null | null | chart2/ScrapeCallback.py | fulinmao/webScraping | ae92490b9ee486f72bc926d5c6ee7b88af193e57 | [
"Apache-2.0"
] | null | null | null | import csv
import re
import lxml.html
class ScrapeCallback:
def __init__(self):
self.writer = csv.writer(open('countries.csv' , 'w'))
self.fields = ('national_flag','area','population','iso','country','capital','continent','tld',
'currency_code','currency_name','phone','postal_code_format','postal_code_regex',
'languages','neighbours');
self.writer.writerow(self.fields)
def __call__(self, url , html):
if re.search('/view' , url):
tree = lxml.html.fromstring(html)
row = []
for field in self.fields:
row.append(tree.cssselect('table > tr#places_%s__row >td.w2p_fw' % field)[0].text_content())
self.writer.writerow(row);
| 31.375 | 108 | 0.606906 |
f344fd799efb79c23ce487e8b5f2240ad9dd61f2 | 5,882 | py | Python | PyTradier/order.py | zlopez101/PyTradier | 83397cf38bd636c471993b57fb71a12885affcb7 | [
"MIT"
] | 1 | 2021-04-30T23:59:20.000Z | 2021-04-30T23:59:20.000Z | PyTradier/order.py | zlopez101/PyTradier | 83397cf38bd636c471993b57fb71a12885affcb7 | [
"MIT"
] | 7 | 2021-05-08T00:47:59.000Z | 2021-05-12T01:45:37.000Z | PyTradier/order.py | zlopez101/PyTradier | 83397cf38bd636c471993b57fb71a12885affcb7 | [
"MIT"
] | null | null | null | """
This module is trying to anticipate the more complex orders that will be placed in the future.
Any algorithmic trading that occurs with this platform won't have time to preview each trade.
The program ought to evaluate its own trades
"""
from typing import Union
from PyTradier.exceptions import RequiredError
import re
class baseOrder:
accepted_durations = ["day", "gtc", "pre", "post"]
option_sides = ["buy_to_open", "buy_to_close", "sell_to_open", "sell_to_close"]
equity_sides = ["buy", "buy_to_cover", "sell", "sell_short"]
def __init__(
self,
symbol: str,
quantity: int,
side,
duration: str = "",
preview: Union[str, bool] = "",
tag: str = "",
):
if duration and duration not in baseOrder.accepted_durations:
raise RequiredError(
f'duration "{duration}" not one of {baseOrder.accepted_durations}'
)
self.symbol_process(symbol)
self.quantity = quantity
self.duration = duration
if self.option_symbol:
if side not in baseOrder.option_sides:
raise RequiredError(
f"side {side} is not accepted side for option orders. Please choose from {baseOrder.option_sides}"
)
else:
if side not in baseOrder.equity_sides:
raise RequiredError(
f"side {side} is not accepted side for equity orders. Please choose from {baseOrder.equity_sides}"
)
self.side = side
self.preview = preview
self.tag = tag
def symbol_process(self, symbol: str):
if len(symbol) > 10:
# we got an option
self.option_symbol = symbol
self.symbol = next(filter(None, re.split(r"(\d+)", symbol)))
else:
self.symbol = symbol
self.option_symbol = None
def __repr__(self):
return f"order(Symbol: {self.symbol})"
def params(self, _class: str) -> dict:
"""create the order parameter details in a form that Tradier API will understand
:param _class: the class of the order. Ex equity
:type _class: str
:return: order details dictionary dictionary
:rtype: dict
"""
details = {}
for key, value in {
key: value for key, value in self.__dict__.items() if value
}.items():
if key.startswith("_"):
details[key[1:]] = value
else:
details[key] = value
details["class"] = _class
return details
def make_legs(self, index: int, exclude: str = "") -> dict:
"""[summary]
don't include duration, tags, preview,
:param index: index of the order dictionary
:type index: int
:param exclude: key to be excluded, used with combo/multileg orders
:type exclude: str
:return: dictionary reference for the order
:rtype: dict
"""
leg = {}
for key, value in {
key: value
for key, value in self.__dict__.items()
if value and (key != "duration" and key != exclude)
}.items():
if key.startswith("_"):
leg[key[1:] + f"[{index}]"] = value
else:
leg[key + f"[{index}]"] = value
return leg
return {
f"option_symbol[{index}]": "option_symbol",
f"side[{index}]": "side",
f"quantity[{index}]": "quantity",
}
class LimitOrder(baseOrder):
"""A limit order is an order to buy or sell a stock at a specific price or better.
"""
_type = "limit"
def __init__(
self,
symbol: str,
side: str,
quantity: int,
limit_price: float,
duration: str = "",
preview: Union[str, bool] = "",
tag: str = "",
):
super().__init__(
symbol, quantity, side, duration=duration, preview=preview, tag=tag
)
self._type = "limit"
self.price = limit_price
class StopOrder(baseOrder):
"""A stop order, also referred to as a stop-loss order, is an order to buy or sell a stock once the price of the stock reaches a specified price, known as the stop price.
"""
def __init__(
self,
symbol: str,
side: str,
quantity: int,
stop_price: float,
duration: str = "",
preview: Union[str, bool] = "",
tag: str = "",
):
super().__init__(
symbol, quantity, side, duration=duration, preview=preview, tag=tag
)
self.type = "stop"
self.stop = stop_price
class StopLimitOrder(baseOrder):
"""A stop-limit order is an order to buy or sell a stock that combines the features of a stop order and a limit order
"""
def __init__(
self,
symbol: str,
side: str,
quantity: int,
stop_price: float,
limit_price: float,
duration: str = "",
preview: Union[str, bool] = "",
tag: str = "",
):
super().__init__(
symbol, quantity, side, duration=duration, preview=preview, tag=tag
)
self.type = "stop_limit"
self.stop = stop_price
self.price = limit_price
class MarketOrder(baseOrder):
def __init__(
self,
symbol: str,
side: str,
quantity: int,
duration: str = "",
preview: Union[str, bool] = "",
tag: str = "",
):
super().__init__(
symbol, quantity, side, duration=duration, preview=preview, tag=tag
)
self.type = "market"
class SpecOrder(baseOrder):
pass
if __name__ == "__main__":
order = LimitOrder("AAPL210605C000123000", "buy", 1, 2, "gtc")
print(order.params())
| 28.975369 | 174 | 0.555423 |
366f0bc93f5305fe4ad3251ea9c4adb0b350bc01 | 4,086 | py | Python | a10_octavia/db/models.py | mohdadeebkhan/a10-octavia | db97c19add08661d794ce81e54a21578d4be0003 | [
"Apache-2.0"
] | null | null | null | a10_octavia/db/models.py | mohdadeebkhan/a10-octavia | db97c19add08661d794ce81e54a21578d4be0003 | [
"Apache-2.0"
] | null | null | null | a10_octavia/db/models.py | mohdadeebkhan/a10-octavia | db97c19add08661d794ce81e54a21578d4be0003 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import models
import sqlalchemy as sa
from sqlalchemy.ext import orderinglist
from sqlalchemy import orm
from sqlalchemy.orm import validates
from sqlalchemy.sql import func
from a10_octavia.common import data_models
from a10_octavia.db import base_models
from octavia.i18n import _
class VThunder(base_models.BASE):
__data_model__ = data_models.VThunder
__tablename__ = 'vthunders'
id = sa.Column(sa.Integer, primary_key=True)
vthunder_id = sa.Column(sa.String(36), nullable=False)
project_id = sa.Column(sa.String(36))
amphora_id = sa.Column(sa.String(36), nullable=True)
device_name = sa.Column(sa.String(1024), nullable=False)
ip_address = sa.Column('ip_address', sa.String(64), nullable=False)
username = sa.Column(sa.String(1024), nullable=False)
password = sa.Column(sa.String(50), nullable=False)
axapi_version = sa.Column(sa.Integer, default=30, nullable=False)
undercloud = sa.Column(sa.Boolean(), default=False, nullable=False)
loadbalancer_id = sa.Column(sa.String(36))
compute_id = sa.Column(sa.String(36))
topology = sa.Column(sa.String(50))
role = sa.Column(sa.String(50))
last_udp_update = sa.Column(u'last_udp_update', sa.DateTime(), nullable=False)
status = sa.Column('status', sa.String(36), default='ACTIVE', nullable=False)
created_at = sa.Column(u'created_at', sa.DateTime(), nullable=True)
updated_at = sa.Column(u'updated_at', sa.DateTime(), nullable=True)
partition_name = sa.Column(sa.String(14), default='shared', nullable=False)
hierarchical_multitenancy = sa.Column(sa.String(7), default='disable', nullable=False)
last_write_mem = sa.Column(u'last_write_mem', sa.DateTime(), nullable=True)
acos_version = sa.Column(sa.String(36), nullable=True)
@classmethod
def find_by_loadbalancer_id(cls, loadbalancer_id, db_session=None):
return cls.find_by_attribute('loadbalancer_id', loadbalancer_id, db_session)
class VRID(base_models.BASE):
__data_model__ = data_models.VRID
__tablename__ = 'vrid'
id = sa.Column(sa.String(36), primary_key=True)
owner = sa.Column(sa.String(36), nullable=False)
vrid = sa.Column(sa.Integer, default=0)
vrid_port_id = sa.Column(sa.String(36), nullable=False)
vrid_floating_ip = sa.Column(sa.String(40), nullable=False)
subnet_id = sa.Column(sa.String(36), nullable=False)
class NATPool(base_models.BASE):
__data_model__ = data_models.NATPool
__tablename__ = 'nat_pool'
__table_args__ = (
sa.UniqueConstraint('name', 'subnet_id', name='unique_name_subnet_id'),
)
id = sa.Column(sa.String(64), primary_key=True)
name = sa.Column(sa.String(64), nullable=False)
subnet_id = sa.Column(sa.String(64), nullable=False)
start_address = sa.Column('start_address', sa.String(64), nullable=False)
end_address = sa.Column('end_address', sa.String(64), nullable=False)
member_ref_count = sa.Column(sa.Integer, default=0, nullable=False)
port_id = sa.Column(sa.String(64), nullable=False)
class VrrpSet(base_models.BASE):
__data_model__ = data_models.VrrpSet
__tablename__ = 'vrrp_set'
__table_args__ = (
sa.UniqueConstraint('mgmt_subnet', 'project_id', name='unique_name_project_subnet_set_id'),
)
mgmt_subnet = sa.Column(sa.String(64), primary_key=True)
project_id = sa.Column(sa.String(64), primary_key=True)
set_id = sa.Column(sa.Integer, default=0, nullable=False)
| 43.010526 | 99 | 0.725649 |
0fb6a59b99889f5b290752d5a8de5be6faa24f85 | 9,762 | py | Python | Signadyne_Digitizer/Signadyne_Digitizer.py | roniwinik/Drivers | ba473bc21d1b5321da1e6caadec5b4d624282edc | [
"MIT"
] | null | null | null | Signadyne_Digitizer/Signadyne_Digitizer.py | roniwinik/Drivers | ba473bc21d1b5321da1e6caadec5b4d624282edc | [
"MIT"
] | null | null | null | Signadyne_Digitizer/Signadyne_Digitizer.py | roniwinik/Drivers | ba473bc21d1b5321da1e6caadec5b4d624282edc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
import InstrumentDriver
import signadyne
import numpy as np
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements a Signadyne file handler"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# set time step and resolution
self.nBit = 16
self.bitRange = float(2**(self.nBit-1)-1)
# timeout
self.timeout_ms = int(1000 * self.dComCfg['Timeout'])
# create AWG instance
self.dig = signadyne.SD_AIN()
AWGPart = self.dig.getProductNameBySlot(1, int(self.comCfg.address))
if not isinstance(AWGPart, str):
raise InstrumentDriver.Error('Unit not available')
# check that model is supported
dOptionCfg = self.dInstrCfg['options']
for validId, validName in zip(dOptionCfg['model_id'], dOptionCfg['model_str']):
if AWGPart.find(validId)>=0:
# id found, stop searching
break
else:
# loop fell through, raise ID error
raise InstrumentDriver.IdError(AWGPart, dOptionCfg['model_id'])
# set model
self.setModel(validName)
# sampling rate and number of channles is set by model
if validName in ('M3102', 'M3302'):
# 500 MHz models
self.dt = 2E-9
self.nCh = 4
else:
# assume 100 MHz for all other models
self.dt = 10E-9
self.nCh = 4
# create list of sampled data
self.lTrace = [np.array([])] * self.nCh
self.dig.openWithSlot(AWGPart, 1, int(self.comCfg.address))
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
# do not check for error if close was called with an error
try:
# flush all memory
for n in range(self.nCh):
self.dig.DAQflush(n)
# close instrument
self.dig.close()
except:
# never return error here
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# start with setting local quant value
quant.setValue(value)
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name)>6:
ch = int(quant.name[2])
name = quant.name[6:]
else:
ch, name = None, ''
# proceed depending on command
if quant.name in ('External Trig Source', 'External Trig Config'):
extSource = int(self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(self.getCmdStringFromValue('External Trig Config'))
self.dig.DAQtriggerExternalConfig(0, extSource, trigBehavior)
elif quant.name in ('Trig I/O', 'Trig Sampling Mode'):
# get direction and sync from index of comboboxes
direction = int(self.getCmdStringFromValue('Trig I/O'))
sync = int(self.getCmdStringFromValue('Trig Sampling Mode'))
self.dig.triggerIOconfig(direction, sync)
elif quant.name in ('Analog Trig Channel', 'Analog Trig Config', 'Trig Threshold'):
# get trig channel
trigCh = self.getValueIndex('Analog Trig Channel')
mod = int(self.getCmdStringFromValue('Analog Trig Config'))
threshold = self.getValue('Trig Threshold')
self.dig.channelTriggerConfig(trigCh, mod, threshold)
elif name in ('Range', 'Impedance', 'Coupling'):
# set range, impedance, coupling at once
rang = self.getRange(ch)
imp = int(self.getCmdStringFromValue('Ch%d - Impedance' % ch))
coup = int(self.getCmdStringFromValue('Ch%d - Coupling' % ch))
self.dig.channelInputConfig(ch, rang, imp, coup)
return value
def performGetValue(self, quant, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name)>6:
ch = int(quant.name[2])
name = quant.name[6:]
else:
ch, name = None, ''
if name == 'Signal':
# get traces if first call
if self.isFirstCall(options):
# don't arm if in hardware trig mode
self.getTraces(bArm=(not self.isHardwareTrig(options)))
# return correct data
value = quant.getTraceDict(self.lTrace[ch], dt=self.dt)
else:
# for all others, return local value
value = quant.getValue()
return value
def performArm(self, quant_names, options={}):
"""Perform the instrument arm operation"""
# arm by calling get traces
self.getTraces(bArm=True, bMeasure=False)
def getTraces(self, bArm=True, bMeasure=True):
"""Get all active traces"""
# test timing
# import time
# t0 = time.clock()
# lT = []
# find out which traces to get
self.lTrace = [np.array([])] * self.nCh
lCh = []
iChMask = 0
for n in range(self.nCh):
if self.getValue('Ch%d - Enabled' % n):
lCh.append(n)
iChMask += 2**n
# get current settings
nPts = int(self.getValue('Number of samples'))
nSeg = int(self.getValue('Number of records'))
nAv = int(self.getValue('Number of averages'))
# trigger delay is in 1/sample rate
nTrigDelay = int(self.getValue('Trig Delay')/self.dt)
if bArm:
# configure trigger for all active channels
for ch in lCh:
# extra config for trig mode
if self.getValue('Trig Mode') == 'Digital trigger':
extSource = int(self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(self.getCmdStringFromValue('External Trig Config'))
self.dig.DAQtriggerExternalConfig(ch, extSource, trigBehavior)
# analogTriggerMask = 0
# self.dig.DAQtriggerConfig(ch, trigBehavior, extSource, analogTriggerMask)
elif self.getValue('Trig Mode') == 'Analog channel':
digitalTriggerMode= 0
digitalTriggerSource = 0
trigCh = self.getValueIndex('Analog Trig Channel')
analogTriggerMask = 2**trigCh
self.dig.DAQtriggerConfig(ch, digitalTriggerMode, digitalTriggerSource, analogTriggerMask)
# config daq and trig mode
trigMode = int(self.getCmdStringFromValue('Trig Mode'))
self.dig.DAQconfig(ch, nPts, nSeg*nAv, nTrigDelay, trigMode)
#
# start acquiring data
self.dig.DAQstartMultiple(iChMask)
# lT.append('Start %.1f ms' % (1000*(time.clock()-t0)))
#
# return if not measure
if not bMeasure:
return
# define number of cycles to read at a time
nCycleTotal = nSeg*nAv
# set cycles equal to number of records, else 100
nCyclePerCall = nSeg if nSeg>1 else 100
nCall = int(np.ceil(nCycleTotal/nCyclePerCall))
lScale = [(self.getRange(ch)/self.bitRange) for ch in range(self.nCh)]
for n in range(nCall):
# number of cycles for this call, could be fewer for last call
nCycle = min(nCyclePerCall, nCycleTotal-(n*nCyclePerCall))
# capture traces one by one
for ch in lCh:
data = self.DAQread(self.dig, ch, nPts*nCycle, int(self.timeout_ms/nCall))
# average, if wanted
scale = (self.getRange(ch)/self.bitRange)
if nAv > 1 and data.size>0:
nAvHere = nCycle/nSeg
data = data.reshape((nAvHere, nPts*nSeg)).mean(0)
# adjust scaling to account for summing averages
scale = lScale[ch]*(nAvHere/nAv)
else:
# use pre-calculated scaling
scale = lScale[ch]
# convert to voltage, add to total average
if n==0:
self.lTrace[ch] = data*scale
else:
self.lTrace[ch] += data*scale
# lT.append('N: %d, Tot %.1f ms' % (n, 1000*(time.clock()-t0)))
# # log timing info
# self.log(': '.join(lT))
def getRange(self, ch):
"""Get channel range, as voltage"""
rang = float(self.getCmdStringFromValue('Ch%d - Range' % ch))
return rang
def DAQread(self, dig, nDAQ, nPoints, timeOut):
"""Read data diretly to numpy array"""
if dig._SD_Object__handle > 0:
if nPoints > 0:
data = (signadyne.c_short * nPoints)()
nPointsOut = dig._SD_Object__signadyne_dll.SD_AIN_DAQread(dig._SD_Object__handle, nDAQ, data, nPoints, timeOut)
if nPointsOut > 0:
return np.frombuffer(data, dtype=np.int16, count=nPoints)
else:
return np.array([], dtype=np.int16)
else:
return signadyne.SD_Error.INVALID_VALUE
else:
return signadyne.SD_Error.MODULE_NOT_OPENED
if __name__ == '__main__':
pass
| 42.077586 | 127 | 0.572116 |
ac18c342fc579cd87419bdfb8b361a584c0a2015 | 70,932 | py | Python | services/cloud/code/backend/pythings_app/views.py | pythings/PythingsCloud | 661ca173f42b9fe00c3a61d3e7daf6c37a376641 | [
"Apache-2.0"
] | 2 | 2020-11-07T09:41:10.000Z | 2022-02-14T22:23:05.000Z | services/cloud/code/backend/pythings_app/views.py | pythings/PythingsCloud | 661ca173f42b9fe00c3a61d3e7daf6c37a376641 | [
"Apache-2.0"
] | 9 | 2022-01-11T23:34:14.000Z | 2022-01-11T23:47:59.000Z | services/cloud/code/backend/pythings_app/views.py | pythings/PythingsCloud | 661ca173f42b9fe00c3a61d3e7daf6c37a376641 | [
"Apache-2.0"
] | null | null | null | import os
import logging
import uuid
import time
import pytz
import datetime
# Django imports
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.conf import settings
from django.views.decorators.clickjacking import xframe_options_exempt
# Backend imports
from ..common.decorators import private_view, public_view
from ..common.utils import booleanize, format_exception, send_email, random_username
from ..common.exceptions import ErrorMessage
from ..common.time import timezonize, s_from_dt, dt, dt_from_s
from ..base_app.models import LoginToken
from .models import App, Thing, Session, Profile, WorkerMessageHandler, MessageCounter, ManagementMessage, WorkerMessage, Pool, File, Commit
from .helpers import create_app as create_app_helper
from .helpers import create_none_app, get_total_messages, get_total_devices, get_timezone_from_request
# Setup logging
logger = logging.getLogger(__name__)
# This is a support var used to prevent double click problems
ONGOING_SIGNUPS = {}
#==========================
# Web Setup view
#==========================
@public_view
def websetup(request):
# Init data
data={}
data['user'] = request.user
data['BASE_PATH'] = 'https://' + settings.MAIN_DOMAIN_NAME
return render(request, 'websetup.html', {'data': data})
#==========================
# New Thing view
#==========================
@private_view
def new_thing(request):
# Init data
data={}
data['user'] = request.user
return render(request, 'new_thing.html', {'data': data})
#=========================
# User login view
#=========================
@public_view
def user_login(request):
redirect= '/postlogin'
data = {}
# If authenticated user reloads the main URL
if request.method == 'GET' and request.user.is_authenticated():
return HttpResponseRedirect(redirect)
# If unauthenticated user tries to log in
if request.method == 'POST':
if not request.user.is_authenticated():
username = request.POST.get('username')
password = request.POST.get('password')
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
if "@" in username:
# Get the username from the email
try:
user = User.objects.get(email=username)
username = user.username
except User.DoesNotExist:
if password:
raise ErrorMessage('Check email and password')
else:
# Return here, we don't want to give any hints about existing users
data['success'] = 'Ok, you will shortly receive a login link by email (if we have your data).'
return render(request, 'login.html', {'data': data})
if password:
user = authenticate(username=username, password=password)
if user:
login(request, user)
return HttpResponseRedirect(redirect)
else:
raise ErrorMessage('Check email and password')
else:
# If empty password, send mail with login token
token = uuid.uuid4()
logger.debug('Sending login token "{}" via mail to {}'.format(token, user.email))
# Create token or update if existent (and never used)
try:
loginToken = LoginToken.objects.get(user=user)
except LoginToken.DoesNotExist:
LoginToken.objects.create(user=user, token=token)
else:
loginToken.token = token
loginToken.save()
send_email(to=user.email, subject='Pythings Cloud login link', text='Hello,\n\nhere is your login link: {}/login/?token={}\n\nOnce logged in, you can go to "My Account" and change password (or just keep using the login link feature).'.format(settings.MAIN_DOMAIN_NAME, token))
# Return here, we don't want to give any hints about existing users
data['success'] = 'Ok, you will shortly receive a login link by email (if we have your data).'
return render(request, 'login.html', {'data': data})
else:
# This should never happen.
# User tried to log-in while already logged in: log him out and then render the login
logout(request)
else:
# If we are logging in through a token
token = request.GET.get('token', None)
if token:
loginTokens = LoginToken.objects.filter(token=token)
if not loginTokens:
raise ErrorMessage('Token not valid or expired')
if len(loginTokens) > 1:
raise Exception('Consistency error: more than one user with the same login token ({})'.format(len(loginTokens)))
# Use the first and only token (todo: use the objects.get and correctly handle its exceptions)
loginToken = loginTokens[0]
# Get the user from the table
user = loginToken.user
# Set auth backend
user.backend = 'django.contrib.auth.backends.ModelBackend'
# Ok, log in the user
login(request, user)
loginToken.delete()
# Now redirect to site
return HttpResponseRedirect(redirect)
# All other cases, render the login page
return render(request, 'login.html', {'data': data})
#=========================
# Post login view
#=========================
@private_view
def postlogin(request):
if request.user.profile.last_accepted_terms < settings.TERMS_VERSION:
accepted = booleanize(request.GET.get('accepted', False))
if accepted:
request.user.profile.last_accepted_terms=settings.TERMS_VERSION
request.user.profile.save()
return HttpResponseRedirect('/dashboard')
data = {'action':'accept_terms'}
return render(request, 'postlogin.html', {'data': data})
else:
return HttpResponseRedirect('/dashboard')
#=========================
# User logout view
#=========================
@private_view
def user_logout(request):
logout(request)
return HttpResponseRedirect('/')
#=========================
# Register view
#=========================
@public_view
def register(request):
# user var
user = None
# Init data
data={}
data['user'] = request.user
data['status'] = None
if settings.INVITATION_CODE:
data['require_invitation'] = True
else:
data['require_invitation'] = False
# Get data
email = request.POST.get('email', None)
password = request.POST.get('password', None)
invitation = request.POST.get('invitation', None) # Verification code set for anyone
if request.user.is_authenticated():
return(HttpResponseRedirect('/dashboard'))
else:
if email and password:
# Check both email and password are set
if not email:
raise ErrorMessage('Missing email')
if not password:
raise ErrorMessage('Missing password')
# Check if we have to validate an invitation code
if settings.INVITATION_CODE:
if invitation != settings.INVITATION_CODE:
raise ErrorMessage('The invitation code you entered is not valid.')
if not email in ONGOING_SIGNUPS:
# Add user to recent signups dict
ONGOING_SIGNUPS[email] = None
# Check if user with this email already exists
if len(User.objects.filter(email = email)) > 0:
del ONGOING_SIGNUPS[email]
raise ErrorMessage('The email address you entered is already registered.')
# Register the user
user = User.objects.create_user(random_username(), password=password, email=email)
# Is this necessary?
user.save()
# Manually set the auth backend for the user
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
data['status'] = 'activated'
data['user'] = user
# Get email updates preference
email_updates = booleanize(request.POST.get('email_updates', False))
# Create the user Profile
logger.debug('Creating user profile for user "{}" with email_updates="{}"'.format(user.email, email_updates))
Profile.objects.create(user=request.user, last_accepted_terms=settings.TERMS_VERSION, email_updates=email_updates)
# Create Messages counter
logger.debug('Creating messages counter for user "{}" '.format(user.email))
MessageCounter.objects.create(user = user)
# Also create the None App for the user
logger.debug('Creating None App for user "{}" '.format(user.email))
create_none_app(request.user)
# Remove user from recent signups
del ONGOING_SIGNUPS[email]
return render(request, 'register.html', {'data': data})
else:
# Check previous requesta ctivated the user
i=0
while True:
if email not in ONGOING_SIGNUPS:
break
else:
time.sleep(1)
i+=1
if i>30:
raise ErrorMessage('Timed up. Your user might have been correctly created anyway. Please try to login if it does not work to signup again, if the error persists contact us')
users_with_this_email = User.objects.filter(email = email)
if users_with_this_email<1:
raise ErrorMessage('Error in creating the user. Please try again and if the error persists contact us')
else:
data['status'] = 'activated'
data['user'] = users_with_this_email[0]
user = authenticate(username=users_with_this_email[0].username, password=password)
if not user:
raise ErrorMessage('Error. Please try again and if the error persists contact us')
login(request, user)
return render(request, 'register.html', {'data': data})
else:
return render(request, 'register.html', {'data': data})
return render(request, 'register.html', {'data': data})
#==========================
# Account view
#==========================
@private_view
def account(request):
data={}
data['user'] = request.user
# Get profile and create on the fly if it does not exist yet (for old users)
try:
profile = Profile.objects.get(user=request.user)
except Profile.DoesNotExist:
Profile.objects.create(user=request.user)
profile = Profile.objects.get(user=request.user)
data['profile'] = profile
# Set values from POST and GET
edit = request.POST.get('edit', None)
if not edit:
edit = request.GET.get('edit', None)
data['edit'] = edit
value = request.POST.get('value', None)
# Fix None
if value and value.upper() == 'NONE':
value = None
if edit and edit.upper() == 'NONE':
edit = None
# Obtain total messages and total devices for a user
total_messages, _, _ = get_total_messages(request.user)
total_devices = get_total_devices(request.user)
# Set texts for totals
if profile.plan == 'Unlimited':
data['messages_usage'] = '{} of unlimited'.format(total_messages)
data['devices_usage'] = '{} of unlimited'.format(total_devices)
elif profile.plan == 'Betatester':
data['messages_usage'] = '{} of {} (remaining: {})'.format(total_messages, profile.plan_messages_limit, profile.plan_messages_limit - total_messages)
data['devices_usage'] = '{} of {}'.format(total_devices, profile.plan_things_limit)
elif profile.plan == 'Free':
data['messages_usage'] = '{} of unlimited'.format(total_messages)
data['devices_usage'] = '{} of {} (remaining: {})'.format(total_devices, profile.plan_things_limit, profile.plan_things_limit - total_devices)
else:
raise Exception('Unknown plan "{}" for user "{}"'.format(profile.plan, request.user.username))
# Do we have to edit something?
if edit:
try:
logger.info('Editing "{}" with value "{}"'.format(edit,value))
# Timezone
if edit=='timezone' and value:
# Validate
timezonize(value)
profile.timezone = value
profile.save()
# Email
elif edit=='email' and value:
request.user.email=value
request.user.save()
# Password
elif edit=='password' and value:
request.user.set_password(value)
request.user.save()
# API key
elif edit=='apikey' and value:
profile.apikey=value
profile.save()
# Plan
elif edit=='plan' and value:
profile.plan=value
profile.save()
# Type ID
elif edit=='type_id' and value:
value=int(value)
if value <= 10:
profile.type = 'Standard'
profile.type_id = int(value)
else:
profile.type = 'Advanced'
profile.type_id = int(value)
profile.save()
# Email preferences
elif edit=='email_preferences':
if booleanize(request.POST.get('do_update', False)):
email_updates = booleanize(request.POST.get('email_updates', False))
if profile.email_updates != email_updates:
profile.email_updates = email_updates
profile.save()
# Delete account
elif edit=='delete_account' and value:
if value == request.user.username:
logger.info('Deleting account for "{}"'.format(value))
user = request.user
# Logout and delete
logout(request)
user.delete()
data['success'] = 'Ok, account deleted. We are sorry to see you go!'
data['redirect_home'] = True
return render(request, 'success.html', {'data': data})
else:
raise ErrorMessage('Incorrect double-check on the Account ID.')
# Generic property
elif edit and value:
raise Exception('Unknown attribute "{}" to edit'.format(edit))
except ErrorMessage:
raise
except Exception as e:
logger.error('Error in performing the "{}" operation:"{}"'.format(edit, e))
data['error'] = 'Sorry, something unexpected happened. Please retry or contact support.'
return render(request, 'error.html', {'data': data})
return render(request, 'account.html', {'data': data})
#==========================
# Main Dashboard view
#==========================
@private_view
def dashboard(request):
# Init data
data={}
data['user'] = request.user
# Enumerate applications for this user
apps = App.objects.filter(user=request.user, hidden=False).order_by('name')
all_apps = App.objects.filter(user=request.user)
data['apps'] = apps
# Get last edit time
for app in apps:
app.latest_commit_ts = str(Commit.objects.filter(app=app).latest('ts').ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
# Enumerate things for this user Apps
data['lastsessions'] = []
for thing in Thing.objects.filter(app_id__in=[app.id for app in all_apps]):
try:
session = Session.objects.filter(thing=thing).latest('last_contact')
except:
pass
else:
# Get delta between now and last contact
deltatime_from_last_contact_s = time.time() - s_from_dt(session.last_contact)
session.connection_status = '<font color="red">OFFLINE</font>'
session.thing_status = 'OFFLINE'
try:
if deltatime_from_last_contact_s < int(thing.pool.settings.management_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
session.connection_status = '<font color="limegreen">ONLINE</font>'
session.thing_status = 'ONLINE'
except:
pass
try:
if deltatime_from_last_contact_s < int(thing.pool.settings.worker_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
session.connection_status = '<font color="limegreen">ONLINE</font>'
session.thing_status = 'ONLINE'
except:
pass
# Attach App name
session.app_name = thing.app.name
# Improve displaying time
session.last_contact = str(session.last_contact.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
global_status = '<font color="limegreen">OK</font>'
for _ in [1]:
if session.last_pythings_status[0:2].upper() != 'OK':
global_status = '<font color="red">NOK</font>'
break
if session.last_worker_status[0:2].upper() == 'KO':
global_status = '<font color="red">NOK</font>'
break
if session.last_management_status[0:2].upper() == 'KO':
global_status = '<font color="red">NOK</font>'
break
if session.last_worker_status[0:2].upper() == 'UN':
global_status = '<font color="orange">HOK</font>'
break
if session.last_management_status[0:2].upper() == 'UN':
global_status = '<font color="orange">HOK</font>'
break
session.global_status = global_status
# Append session to last sessions
data['lastsessions'].append(session)
# Get last worker message
try:
last_worker_msgs = WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, last=1)
for item in last_worker_msgs:
session.last_worker_msg = item
break
except Exception as e:
logger.error(e)
session.last_worker_msg = None
# Reorder based on name
data['lastsessions'].sort(key=lambda x: x.app_name, reverse=False)
# Render
return render(request, 'dashboard.html', {'data': data})
#===========================
# App Dashboard view
#===========================
@private_view
def dashboard_app(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
data['app'] = None
data['lastsessions'] = []
data['pool'] = None
data['orpool'] = request.GET.get('orpool',None)
data['action'] = request.GET.get('action', None)
data['view'] = request.GET.get('view', 'editor')
confirmed = request.GET.get('confirmed', False)
# Get AID form GET request
intaid = request.GET.get('intaid',None)
if intaid is None:
intaid = request.POST.get('intaid',None)
pool_name = request.GET.get('pool', None)
if pool_name is None:
pool_name = request.POST.get('pool', None)
# Set edit values from POST and GET
edit = request.POST.get('edit', None)
if not edit:
edit = request.GET.get('edit', None)
data['edit'] = edit
value = request.POST.get('value', None)
# Get the App
try:
app = App.objects.get(id=intaid, user=request.user)
data['app'] = app
except App.DoesNotExist:
data['error'] = 'App with intaid "{}" is not existent or you do not have access rights.'.format(intaid)
return render(request, 'error.html', {'data': data})
# Delete App
if data['action'] == 'delete' and confirmed:
# Start procedure for deleting the App
data['appname'] = app.name
# 1) Find all Things registered to this App and delete Up and Down messages
for thing in Thing.objects.filter(app=app):
logger.info('Removing messages for TID "{}" '.format(thing.tid))
WorkerMessage.objects.filter(aid=thing.app.aid, tid=thing.tid).delete()
ManagementMessage.objects.filter(aid=thing.app.aid, tid=thing.tid).delete()
# 2) Save Settings which are indirectly attached to the App's pools
setting_objects_to_delete=[]
for pool in Pool.objects.filter(app=app):
logger.info('Removing pool settings for pool "{}" '.format(pool.name))
setting_objects_to_delete.append(pool.settings)
# 3) Delete the App, this will trigger a "delete cascade" basically.
app.delete()
# 4) Remove settings leftover
for setting_object_to_delete in setting_objects_to_delete:
setting_object_to_delete.delete()
# Render OK-deleted page
return render(request, 'dashboard_app_deleted.html', {'data': data})
# Set PythingsOS versions
data['pythings_versions'] = settings.OS_VERSIONS
# If a pool is set, get it, otherwise use the default pool:
if pool_name:
try:
selected_pool = Pool.objects.get(app=app, name=pool_name)
data['pool'] = selected_pool
except Pool.DoesNotExist:
data['error'] = 'The pool "{}" does not exists'.format(pool_name)
return render(request, 'error.html', {'data': data})
else:
selected_pool = app.default_pool
# Enumerate App's versions
versions = []
for commit in Commit.objects.filter(app=app):
if commit.tag is not None:
versions.append(commit)
else:
if selected_pool.development:
versions.append(commit)
versions.reverse()
data['app_versions'] = versions
# Set the pool for the template
data['pool'] = selected_pool
# Enumerate the pools for this application
data['pools'] = []
for pool in Pool.objects.filter(app=app):
data['pools'].append(pool)
# Special case for use_latest
use_latest_app = request.POST.get('use_latest_app', None)
if use_latest_app:
if use_latest_app.upper()=='TRUE':
use_latest_app = True
if not selected_pool.use_latest_app_version:
selected_pool.use_latest_app_version = True
selected_pool.save()
elif use_latest_app.upper()=='FALSE':
use_latest_app = False
if selected_pool.use_latest_app_version:
selected_pool.use_latest_app_version = False
selected_pool.save()
else:
data['error'] = 'Unknown value "{}" for use_latest_app'.format(use_latest_app)
return render(request, 'error.html', {'data': data})
# Settings edit required?
if use_latest_app:
value=True
if edit and value:
try:
logger.info('Setting "{}" to "{}"'.format(edit,value))
if edit=='management_interval' and value:
int(value)
selected_pool.settings.management_interval = value
selected_pool.settings.save()
elif edit=='worker_interval' and value:
try:
int(value)
except ValueError:
if value.lower()=='auto':
value=value.lower()
else:
raise
selected_pool.settings.worker_interval = value
selected_pool.settings.save()
elif edit=='ssl' and value:
if value.upper()=='TRUE':
selected_pool.settings.ssl = True
else:
selected_pool.settings.ssl = False
selected_pool.settings.save()
elif edit=='payload_encryption' and value:
if value.upper()=='TRUE':
selected_pool.settings.payload_encryption = True
else:
selected_pool.settings.payload_encryption = False
selected_pool.settings.save()
elif edit=='pythings_version' and value:
if value not in data['pythings_versions']:
raise
selected_pool.settings.pythings_version = value
selected_pool.settings.save()
elif edit=='app_name' and value:
app.name = value
app.save()
elif edit=='app_version':
if use_latest_app:
if selected_pool.development:
commit = Commit.objects.filter(app=app).latest('ts')
else:
commit = Commit.objects.filter(app=app,tag__isnull=False).latest('ts')
selected_pool.settings.app_version = commit.cid
selected_pool.settings.save()
elif value:
# Validate
if len(value) >5 and value[0:4]==('tag:'):
value = value[4:]
commit = Commit.objects.get(app=app,tag=value)
else:
commit = Commit.objects.get(app=app,cid=value)
# Update
selected_pool.settings.app_version = commit.cid
selected_pool.settings.save()
else:
pass
# Generic property
else:
raise Exception()
except Exception as e:
logger.error('{}:{}'.format(type(e),str(e)))
data['error'] = 'The property "{}" does not exists or the value "{}" is not valid.'.format(edit, value)
return render(request, 'error.html', {'data': data})
# Trick: get pool App version tag if any:
try:
commit = Commit.objects.get(app=app,cid=selected_pool.settings.app_version)
selected_pool.settings.app_tag = commit.tag
except:
data['error'] = 'Consistency error: Cannot find commit for cid "{}"'.format(selected_pool.settings.app_version)
return render(request, 'error.html', {'data': data})
# Get latest version
data['app_latest_commit'] = Commit.objects.filter(app=app).latest('ts')
data['app_latest_commit_ts'] = str(data['app_latest_commit'].ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
# Enumerate things for this App and pool
for thing in Thing.objects.filter(app=app, pool=selected_pool):
try:
session = Session.objects.filter(thing=thing).latest('last_contact')
except:
pass
else:
# Get delta between now and last contact
deltatime_from_last_contact_s = time.time() - s_from_dt(session.last_contact) #session.last_contact
session.connection_status = '<font color="red">OFFLINE</font>'
session.thing_status = 'OFFLINE'
try:
if deltatime_from_last_contact_s < int(selected_pool.settings.management_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
session.connection_status = '<font color="limegreen">ONLINE</font>'
session.thing_status = 'ONLINE'
except:
pass
try:
if deltatime_from_last_contact_s < int(selected_pool.settings.worker_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
session.connection_status = '<font color="limegreen">ONLINE</font>'
session.thing_status = 'ONLINE'
except:
pass
# Improve displaying time
session.last_contact = str(session.last_contact.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
global_status = '<font color="limegreen">OK</font>'
for _ in [1]:
if session.last_pythings_status[0:2].upper() != 'OK':
global_status = '<font color="red">KO</font>'
break
if session.last_worker_status[0:2].upper() == 'KO':
global_status = '<font color="red">KO</font>'
break
if session.last_management_status[0:2].upper() == 'KO':
global_status = '<font color="red">KO</font>'
break
if session.last_worker_status[0:2].upper() == 'UN':
global_status = '<font color="orange">~OK</font>'
break
if session.last_management_status[0:2].upper() == 'UN':
global_status = '<font color="orange">~OK</font>'
break
session.global_status = global_status
# Add latest commit to understand if we have an AI
session.app_latest_commit = data['app_latest_commit']
# Append last session
data['lastsessions'].append(session)
# Get last worker message
try:
last_worker_msgs = WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, last=1)
for item in last_worker_msgs:
session.last_worker_msg = item
break
logger.debug('Last worker message: {}'.format(session.last_worker_msg) )
except Exception as e:
logger.error(e)
session.last_worker_msg = None
# Render
return render(request, 'dashboard_app.html', {'data': data})
#==================================
# Thing Dashboard view
#==================================
@private_view
def dashboard_thing(request):
profile_timezone = timezonize(get_timezone_from_request(request))
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
data['apps'] = {}
data['metrics'] = {}
data['timeseries'] = {}
tid = request.GET.get('tid',None)
if not tid:
# Try to set from POST
tid = request.POST.get('tid',None)
intaid = request.GET.get('intaid',None)
if not intaid:
# Try to set from POST
intaid = request.POST.get('intaid',None)
confirmed = request.GET.get('confirmed', False)
last = request.GET.get('last',None)
pool_name = request.GET.get('pool',None)
data['orpool'] = request.GET.get('orpool', None)
data['action'] = request.GET.get('action', None)
data['refresh'] = request.GET.get('refresh', '')
data['filterby'] = request.GET.get('filterby', '') # Do not use None, or you will end up with it in the HTML page
data['sid'] = request.GET.get('sid', None)
from_str = request.GET.get('from', None)
to_str = request.GET.get('to', None)
from_t = request.GET.get('from_t', None)
to_t = request.GET.get('to_t', None)
# Set values from POST and GET
edit = request.POST.get('edit', None)
if not edit:
edit = request.GET.get('edit', None)
data['edit'] = edit
name = request.POST.get('name', None)
if not tid:
return render(request, 'error.html', {'data': {'error': 'No Thing ID provided'}})
if not intaid:
return render(request, 'error.html', {'data': {'error': 'No App ID provided'}})
# Get all Apps
data['apps'] = App.objects.filter(user=request.user, hidden=False)
# Get App
try:
app = App.objects.get(id=intaid, user=request.user)
except App.DoesNotExist:
data['error'] = 'The App with Internal ID "{}" does not exists or you do not have access rights'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The Thing with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
# Check ownership
if thing.app.user != request.user:
data['error'] = 'The Device with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
# Change name if we have to:
if edit =='name' and name:
logger.debug('Changing device "{}" name to: {}'.format(thing.tid, name))
thing.name=name
thing.save()
# Install App if we have to
if data['action'] == 'install' and confirmed:
if thing.app_set_via != 'backend':
data['error'] = 'Error, the App for Thing "{}" was setup on the Thing itself and a new one cannot be installed'.format(thing.tid)
return render(request, 'error.html', {'data': data})
new_app_intaid = confirmed
logger.info('Will install App "{}" on thing "{}"'.format(new_app_intaid, thing.tid))
# Get the App to install:
try:
new_app = App.objects.get(id=new_app_intaid, user=request.user)
except Thing.DoesNotExist:
data['error'] = 'The new App to install with internal ID "{}" does not exists or you do not have access rights'.format(new_app_intaid)
return render(request, 'error.html', {'data': data})
# Set new App on the Thing
thing.app = new_app
thing.pool = new_app.default_pool
thing.save()
# Redirect to a fresh dashboard
return HttpResponseRedirect('/dashboard_thing/?tid={}&intaid={}'.format(thing.tid, new_app_intaid))
# Uninstall App if we have to
if data['action'] == 'uninstall' and confirmed:
if thing.app_set_via != 'backend':
data['error'] = 'Error, the App for Thing "{}" was setup on the Thing itself and cannot be uninstalled'.format(thing.tid)
return render(request, 'error.html', {'data': data})
logger.info('Will uninstall App on thing "{}"'.format(thing.tid))
# Get the "NoneApp" for this user, and if it does not exist, create it.
try:
none_app = App.objects.get(user=request.user, aid='00000000-0000-0000-0000-000000000000')
except App.DoesNotExist:
none_app = create_none_app(request.user)
# Set new (None) App on the Thing
thing.app = none_app
thing.pool = none_app.default_pool
thing.save()
# Redirect to a fresh dashboard
return HttpResponseRedirect('/dashboard_thing/?tid={}&intaid={}'.format(thing.tid, none_app.id))
# Delete (remove) a Thing
if data['action'] == 'remove' and confirmed:
logger.info('Removing TID "{}" '.format(thing.tid))
data['tid'] = thing.tid
try:
WorkerMessageHandler.delete(aid=thing.app.aid, tid=thing.tid)
except Exception as e:
data['error'] = 'Error in deleting Thing with ID "{}": {}'.format(thing.tid, e)
return render(request, 'error.html', {'data': data})
else:
try:
ManagementMessage.objects.filter(tid=thing.tid).delete()
except Exception as e:
logger.error('Error when deleting management messages for tid "{}"'.format(thing.tid))
thing.delete()
return render(request, 'dashboard_thing_deleted.html', {'data': data})
# If a pool is set get it, and change
if pool_name:
try:
thing.pool = Pool.objects.get(app=thing.app, name=pool_name)
thing.save()
except Pool.DoesNotExist:
data['error'] = 'The pool named "{}" does not exists'.format(pool_name)
return render(request, 'error.html', {'data': data})
# Get last worker messages
last_worker_msgs=[]
try:
last_worker_msgs_or = WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, last=3)
for item in last_worker_msgs_or:
# Fix time
item.ts = str(item.ts.astimezone(profile_timezone)).split('.')[0]
# Convert from json to string
item.data = str(item.data)
# Truncate if too long
if len(item.data) >= 150:
item.data = str(item.data[0:150]) + '...'
last_worker_msgs.append(item)
except Exception as e:
logger.error('Error when looping over worker messages for the dashboard: {}'.format(e))
data['last_worker_msgs'] = last_worker_msgs
# Get last management messages
last_management_msgs = []
try:
last_management_msgs = ManagementMessage.objects.filter(tid=thing.tid, aid=thing.app.aid).order_by('ts')[:3].reverse()
for msg in last_management_msgs:
msg.ts = str(msg.ts.astimezone(profile_timezone)).split('.')[0]
except:
logger.error('Error when looping over management messages for the dashboard: {}'.format(e))
data['last_management_msgs'] = last_management_msgs
# Load session
try:
session = Session.objects.filter(thing=thing).latest('last_contact')
data['session'] = session
session.duration = str(session.last_contact-session.started).split('.')[0]
if session.duration.startswith('0'):
session.duration = '0'+session.duration
except:
data['session'] = None
else:
# Compute status
deltatime_from_last_contact_s = time.time() - s_from_dt(session.last_contact) #session.last_contact
data['connection_status'] = '<font color="red">OFFLINE</font>'
data['thing_status'] = 'OFFLINE'
try:
if deltatime_from_last_contact_s < int(thing.pool.settings.management_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
data['connection_status'] = '<font color="limegreen">ONLINE</font>'
data['thing_status'] = 'ONLINE'
except:
pass
try:
if deltatime_from_last_contact_s < int(thing.pool.settings.worker_interval) + settings.CONTACT_TIMEOUT_TOLERANCE:
data['connection_status'] = '<font color="limegreen">ONLINE</font>'
data['thing_status'] = 'ONLINE'
except:
pass
# Formatting tricks
session.last_contact = str(session.last_contact.astimezone(profile_timezone)).split('.')[0]
if session.last_pythings_status.startswith('Ok:'):
session.last_pythings_status = 'OK'
# Format worker traceback if any
session.last_worker_status_traceback = None
try:
if session.last_worker_status.startswith('KO: '):
pieces = session.last_worker_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_worker_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_worker_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Format management traceback if any
session.last_management_status_traceback = None
try:
if session.last_management_status.startswith('KO: '):
pieces = session.last_management_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_management_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_management_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Prepare data for the plots: parse last messages json contents, and if float add to the data for the plots
if from_str and to_str:
# Parse from
from_str_date_part, from_str_time_part = from_str.split(' ')
from_day, from_month, from_year = from_str_date_part.split('/')
from_hour, from_minute = from_str_time_part.split(':')
from_dt = dt(int(from_year), int(from_month), int(from_day), int(from_hour), int(from_minute), 0, tzinfo=timezonize(request.user.profile.timezone))
# Parse to
to_str_date_part, to_str_time_part = to_str.split(' ')
to_day, to_month, to_year = to_str_date_part.split('/')
to_hour, to_minute = to_str_time_part.split(':')
to_dt = dt(int(to_year), int(to_month), int(to_day), int(to_hour), int(to_minute), 0, tzinfo=timezonize(request.user.profile.timezone))
elif from_t and to_t:
from_dt = dt_from_s(float(from_t))
to_dt = dt_from_s(float(to_t))
else:
# Set "to" to NOW
to_dt = datetime.datetime.now()
if last == '1m':
from_dt = to_dt - datetime.timedelta(minutes=1)
elif last == '10m':
from_dt = to_dt - datetime.timedelta(minutes=10)
elif last == '1h':
from_dt = to_dt - datetime.timedelta(minutes=60)
elif last == '1d':
from_dt = to_dt - datetime.timedelta(days=1)
elif last == '1W':
from_dt = to_dt - datetime.timedelta(days=7)
elif last == '1M':
from_dt = to_dt - datetime.timedelta(days=31)
elif last == '1Y':
from_dt = to_dt - datetime.timedelta(days=365)
else:
# Default to last is 1 hour
last = '1h'
from_dt = to_dt - datetime.timedelta(minutes=60)
# Now set from_t and to_t
data['from_t'] = s_from_dt(from_dt)
data['to_t'] = s_from_dt(to_dt)
# Add timezone if not already present
try:
from_dt = pytz.UTC.localize(from_dt)
except ValueError:
pass
try:
to_dt = pytz.UTC.localize(to_dt)
except ValueError:
pass
# Move to right timezone
from_dt = from_dt.astimezone(profile_timezone)
to_dt = to_dt.astimezone(profile_timezone)
data['last'] = last
data['from_dt'] = from_dt
data['to_dt'] = to_dt
data['from_dt_str'] = str(from_dt)
data['to_dt_str'] = str(to_dt)
data['from_dt_utcfake_str'] = str(from_dt.replace(tzinfo=pytz.UTC))
data['to_dt_utcfake_str'] = str(to_dt.replace(tzinfo=pytz.UTC))
# Get messages from DB
messages = []
try:
messages = WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, from_dt=from_dt, to_dt=to_dt)
except Exception as e:
logger.error(format_exception(e))
# Prepare data for Dygraphs
total_messages = 0
for message in messages:
# Increment message counter
total_messages += 1
# Load content
content = message.data
if not content:
continue
# Load timestamp
ts = message.ts.astimezone(profile_timezone)
timestamp_dygraphs = '{}/{:02d}/{:02d} {:02d}:{:02d}:{:02d}'.format(ts.year, ts.month, ts.day, ts.hour, ts.minute,ts.second)
#timestamp_dygraphs = int(s_from_dt(ts)*1000)
# Porcess all the message keys
for key in content:
# Try loading as numeric value
try:
metric_num_value = float(content[key])
except:
continue
# Append data
try:
data['timeseries'][key].append((timestamp_dygraphs, metric_num_value, ts))
except KeyError:
data['metrics'][key] = key
data['timeseries'][key] = []
data['timeseries'][key].append((timestamp_dygraphs, metric_num_value, ts))
# Set total messages
data['total_messages'] = total_messages
# Do we have to aggregate?
if total_messages > 10000:
logger.debug('Too many messages, we need to aggregate.')
aggrgeate_by = 10**len(str(int(total_messages/10000.0)))
data['aggregated'] = True
data['aggregate_by'] = aggrgeate_by
data['total_messages_aggregated'] = int(total_messages/aggrgeate_by)
data['timeseries_aggregated']={}
for key in data['timeseries']:
if key not in data['timeseries_aggregated']:
data['timeseries_aggregated'][key] = []
# Support vars
metric_avg = 0
metric_min = None
metric_max = None
start_time_dt = None
end_time_dt = None
# Loop and aggregate data
for i, entry in enumerate(data['timeseries'][key]):
# Start time
if start_time_dt is None:
start_time_dt = entry[2]
# Avg
metric_avg += entry[1]
# Min
if metric_min is None or entry[1] < metric_min:
metric_min = entry[1]
# Max
if metric_max is None or entry[1] > metric_max:
metric_max = entry[1]
if (i+1) % aggrgeate_by ==0:
# Append aggregated data
end_time_dt = entry[2]
avg_time_dt = dt_from_s(s_from_dt(start_time_dt) + ((s_from_dt(end_time_dt) - s_from_dt(start_time_dt))/2)).astimezone(profile_timezone)
timestamp_dygraphs_avg = '{}/{:02d}/{:02d} {:02d}:{:02d}:{:02d}'.format(avg_time_dt.year, avg_time_dt.month, avg_time_dt.day, avg_time_dt.hour, avg_time_dt.minute, avg_time_dt.second)
data['timeseries_aggregated'][key].append((timestamp_dygraphs_avg, metric_avg/aggrgeate_by, metric_min, metric_max))
# Reset counters
metric_avg = 0
metric_min = None
metric_max = None
start_time_dt = None
# Reassign series
del data['timeseries']
data['timeseries'] = data['timeseries_aggregated']
logger.debug('Done aggregating')
# Load last sessions
try:
count =0
sessions = Session.objects.filter(thing=thing).order_by('-last_contact')[0:3]
for session in sessions:
count += 1
session.count = count
session.duration = str(session.last_contact-session.started)
if '.' in session.duration:
session.duration=session.duration.split('.')[0]
session.started = str(session.started.astimezone(profile_timezone)).split('.')[0]
session.last_contact = str(session.last_contact.astimezone(profile_timezone)).split('.')[0]
# Format worker traceback if any
session.last_worker_status_traceback = None
try:
if session.last_worker_status.startswith('KO: '):
pieces = session.last_worker_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_worker_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_worker_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Format management traceback if any
session.last_management_status_traceback = None
try:
if session.last_management_status.startswith('KO: '):
pieces = session.last_management_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_management_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_management_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
data['sessions'] = sessions
except:
data['sessions'] = None
# Enumerate the pools for this application
data['pools'] = []
for pool in Pool.objects.filter(app=thing.app):
data['pools'].append(pool)
# Ok, render
return render(request, 'dashboard_thing.html', {'data': data})
#===========================
# Session Dashboard view
#===========================
@private_view
def dashboard_thing_sessions(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
tid = request.GET.get('tid',None)
data['tid'] = tid
data['orpool'] = request.GET.get('orpool', None)
intaid = request.GET.get('intaid',None)
if not intaid:
intaid = request.POST.get('intaid',None)
data['intaid'] = intaid
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if start is not None:
start = int(start)
end = start + 10
elif end is not None:
end = int(end)
start = end-10
if start<0: start = 0
else:
start = 0
end = 10
data['start'] = start
data['end'] = end
# Get App
try:
app = App.objects.get(id=intaid, user=request.user)
except App.DoesNotExist:
data['error'] = 'The App with Internal ID "{}" does not exists or you do not have access rights'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The Thing with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
# Load sessions
try:
count = 0
sessions = Session.objects.filter(thing=thing).order_by('-last_contact')[start:end]
for session in sessions:
count += 1
session.count = count
session.duration = session.last_contact-session.started
session.started = str(session.started.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
session.last_contact = str(session.last_contact.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
# Format worker traceback if any
session.last_worker_status_traceback = None
try:
if session.last_worker_status.startswith('KO: '):
pieces = session.last_worker_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_worker_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_worker_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
# Format management traceback if any
session.last_management_status_traceback = None
try:
if session.last_management_status.startswith('KO: '):
pieces = session.last_management_status.replace('(Traceback', '\nTraceback').split('\n')
sub_pieces = pieces[0].split(' ')
session.last_management_status = sub_pieces[0] + ' ' + sub_pieces[1] + ': ' + ' '.join(sub_pieces[2:])
session.last_management_status_traceback = '\n'.join(pieces[1:])[:-1]
except:
pass
data['sessions'] = sessions
except:
data['sessions'] = None
# Ok, render
return render(request, 'dashboard_thing_sessions.html', {'data': data})
#===========================
# Message Dashboard view
#===========================
@private_view
def dashboard_thing_messages(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
tid = request.GET.get('tid',None)
if not tid:
tid = request.POST.get('tid',None)
data['tid'] = tid
intaid = request.GET.get('intaid',None)
if not intaid:
intaid = request.POST.get('intaid',None)
data['intaid'] = intaid
data['orpool'] = request.GET.get('orpool', None)
if not data['orpool']:
data['orpool'] = request.POST.get('orpool', None)
data['type'] = request.GET.get('type', None)
if not data['type']:
data['type'] = request.POST.get('type', None)
pagination = request.GET.get('pagination', 100)
# Force a pagination of 10 messages for the management
if data['type']=='management':
pagination=10
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if start is not None:
start = int(start)
end = start + pagination
elif end is not None:
end = int(end)
start = end-pagination
if start<0: start = 0
else:
start = 0
end = pagination
# Get App
try:
app = App.objects.get(id=intaid, user=request.user)
except App.DoesNotExist:
data['error'] = 'The App with Internal ID "{}" does not exists or you do not have access rights'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The Thing with ID "{}" does not exists or you do not have access rights'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
data['messages'] = []
if data['type']=='worker':
# Load worker messages
try:
for msg in WorkerMessageHandler.get(aid=thing.app.aid, tid=thing.tid, last=100):
# Fix time
msg.ts = str(msg.ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
# Convert from json to string
msg.data = str(msg.data)
# Truncate if too long
if len(msg.data) >= 150:
msg.data = str(msg.data[0:150]) + '...'
data['messages'].append(msg)
except Exception as e:
logger.debug('Error: {}'.format(e))
pass
elif data['type'] == 'management':
# Create new message if we are requested to do so
new_msg = request.POST.get('new_msg',None)
data['generated_uuid'] = str(uuid.uuid4())
generated_uuid = request.POST.get('generated_uuid',None)
if new_msg and generated_uuid:
# Does a message already exists?
try:
ManagementMessage.objects.get(tid=thing.tid, uuid=generated_uuid)
except:
ManagementMessage.objects.create(aid=thing.app.aid, tid=thing.tid, data=new_msg, uuid=generated_uuid)
# Load management messages
try:
for msg in ManagementMessage.objects.filter(tid=thing.tid, aid=thing.app.aid, type='APP').order_by('-ts')[start:end]:
msg.ts = str(msg.ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
data['messages'].append(msg)
except:
pass
else:
data['error'] = 'The value "{}" for message type is not valid.'.format(type)
return render(request, 'error.html', {'data': data})
# Set pagination
data['start'] = start
data['end'] = end if len(data['messages'])>=pagination else 0
# Ok, render
return render(request, 'dashboard_thing_messages.html', {'data': data})
#===========================
# Remote Shell view
#===========================
@private_view
def dashboard_thing_shell(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
intaid = request.GET.get('intaid',None)
if not intaid:
intaid = request.POST.get('intaid',None)
tid = request.GET.get('tid',None)
if not tid:
tid = request.POST.get('tid',None)
data['tid'] = tid
data['orpool'] = request.GET.get('orpool', None)
if not data['orpool']:
data['orpool'] = request.POST.get('orpool', None)
# Get App
try:
app = App.objects.get(id=intaid)
except App.DoesNotExist:
data['error'] = 'The app with internal id "{}" does not exists'.format(intaid)
return render(request, 'error.html', {'data': data})
# Get Thing
try:
thing = Thing.objects.get(tid=tid, app=app)
except Thing.DoesNotExist:
data['error'] = 'The thing with tid "{}" does not exists'.format(tid)
return render(request, 'error.html', {'data': data})
data['thing'] = thing
data['messages'] = []
# Create new message if we are requested to do so
new_msg = request.POST.get('new_msg',None)
if not new_msg:
new_msg = request.GET.get('new_msg',None)
data['generated_uuid'] = str(uuid.uuid4())
generated_uuid = request.POST.get('generated_uuid',None)
if not generated_uuid:
generated_uuid = request.GET.get('generated_uuid',None)
if new_msg and generated_uuid:
# Does a message already exists?
try:
ManagementMessage.objects.get(tid=thing.tid, uuid=generated_uuid)
except:
ManagementMessage.objects.create(aid=thing.app.aid, tid=thing.tid, data=new_msg, uuid=generated_uuid, type='CMD', thing=thing)
# Load CMD management messages (filter by Thing as they are linked to the thing and not a specific app)
for msg in ManagementMessage.objects.filter(thing=thing, type='CMD').order_by('ts'):
msg.ts = str(msg.ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
if msg.reply:
msg.reply_clean = msg.reply.rstrip('\n')
msg.reply_clean = msg.reply_clean.rstrip('\n\r')
else:
msg.reply = None
data['messages'].append(msg)
# Ok, render
return render(request, 'dashboard_thing_shell.html', {'data': data})
#===========================
# New App view
#===========================
@private_view
def new_app(request):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
data['app'] = None
data['lastsessions'] = []
data['pool'] = None
data['pythings_versions'] = settings.OS_VERSIONS
# Get name form GET request
app_name = request.POST.get('app_name',None)
pythings_version = request.POST.get('pythings_version',None)
management_interval = request.POST.get('management_interval',None)
worker_interval = request.POST.get('worker_interval',None)
# Set if to use the latest app version or not
use_latest_app_version = request.POST.get('uselatest', False)
if use_latest_app_version:
use_latest_app_version = True
if app_name:
create_app_helper(name = app_name,
user = request.user,
aid = None,
management_interval = management_interval,
worker_interval = worker_interval,
pythings_version = pythings_version,
use_latest_app_version = use_latest_app_version)
data['app_name'] = app_name
return render(request, 'new_app.html', {'data': data})
#===========================
# App code editor view
#===========================
@private_view
def dashboard_app_code_editor(request, embed=False):
# Init data
data={}
data['user'] = request.user
data['profile'] = Profile.objects.get(user=request.user)
data['app'] = None
data['embed'] = '_embed' if embed else ''
# Get data
intaid = request.GET.get('intaid', None)
cid = request.GET.get('cid', None)
fileid = request.GET.get('fileid', None)
do_commit = request.GET.get('commit', None)
savednew = request.GET.get('savednew', False)
tagop = request.GET.get('tagop', None)
tagname = request.GET.get('tagname', None)
openworker = booleanize(request.GET.get('openworker', False))
data['savednew'] = savednew
data['tagop'] = tagop
data['tagname'] = tagname
if savednew != False and savednew.upper() != 'FALSE':
savednew=True
# Fix None
if cid is not None and cid.upper() == 'NONE':
cid = None
if savednew:
cid=None
data['cid'] = cid
# Get the application
try:
app = App.objects.get(id=intaid, user=request.user)
data['app'] = app
except App.DoesNotExist:
data['error'] = 'The app does not exist or you don\'t have access rights'
return render(request, 'error.html', {'data': data})
# Do we have to open the worker by default?
if openworker:
file = File.objects.filter(app=app,name='worker_task.py').order_by('-ts').first()
fileid = file.id
# Get the file if set
if fileid:
try:
fileid = int(fileid)
except:
data['error'] = 'Cannot properly handle file id'
return render(request, 'error.html', {'data': data})
try:
logger.debug('Trying to load file %s for app %s', fileid,app)
file = File.objects.get(app=app,id=fileid)
# Switch to new version if just saved
if savednew:
file = File.objects.get(app=app,name=file.name,committed=False)
fileid = file.id
data['file'] = file
except File.DoesNotExist:
# TODO: create empty file, not raise
data['error'] = 'Cannot find the version of the specified file'
return render(request, 'error.html', {'data': data})
# Get last or required commit. This is always the base for now.
latest_commit = Commit.objects.filter(app=app).latest('ts')
try:
if cid is not None and cid != latest_commit.cid:
commit = Commit.objects.get(app=app,cid=cid)
data['editable'] = False
else:
commit = latest_commit
data['editable'] = True
except:
data['error'] = 'Cannot find commit with cid "{}"'.format(cid)
return render(request, 'error.html', {'data': data})
# Load uncommitted files, if we don't have specified a particular cid
global_app_uncommitted_files = File.objects.filter(app=app,committed=False)
data['global_app_uncommitted_files'] = global_app_uncommitted_files
if cid is None:
uncommitted_files = global_app_uncommitted_files
else:
uncommitted_files = []
# Create a new commit if required
if do_commit:
if len(File.objects.filter(app=app,committed=False)) > 0:
newcommit = Commit.objects.create(app=app)
committed_files=[]
# First, newcommit the unnewcommitted
for file in uncommitted_files:
newcommit.files.add(file)
file.committed=True
file.save()
committed_files.append(file.path+'/'+file.name)
# Then, all the last commit app's files (if not already committed in newer version)
for file in commit.files.all():
if file.path+'/'+file.name in committed_files:
pass
else:
newcommit.files.add(file)
newcommit.valid=True
newcommit.save()
# Reload uncommitted files
uncommitted_files = File.objects.filter(app=app,committed=False)
# If we had to commit but we still have uncommitted files, somethign went wrong
if uncommitted_files:
data['error'] = 'Something went wrong in committing, some files are still not committed.'
return render(request, 'error.html', {'data': data})
# Remap commit to the new one
commit = newcommit
# Get all the pools for this app
for pool in Pool.objects.filter(app=app):
# Update version only on development if set so
if pool.use_latest_app_version and pool.development:
pool.settings.app_version = commit.cid
pool.settings.save()
# Also, for each thing in the pool with custom settings, update the version
for thing in Thing.objects.filter(pool=pool, use_custom_settings=True):
thing.version = commit.cid
thing.save()
data['cid'] = commit.cid
data['global_app_uncommitted_files'] = []
# Do we have to tag the commit?
if tagop in ['create','edit'] and tagname is not None:
commit.tag=tagname
commit.save()
# Get all the pools for this app
for pool in Pool.objects.filter(app=app):
# Update version only on staging if set so
if pool.use_latest_app_version and pool.staging:
pool.settings.app_version = commit.cid
pool.settings.save()
# Also, for each thing in the pool with custom settings, update the version
for thing in Thing.objects.filter(pool=pool, use_custom_settings=True):
thing.version = commit.cid
thing.save()
# Set commit in data
data['commit'] = commit
# Set commit status
if uncommitted_files:
data['commit_status'] = 'UN'
else:
data['commit_status'] = 'EV'
# Get all commits
data['commits'] = Commit.objects.filter(app=app).order_by('-ts')
# Create data files lists for app_files, commited and uncommitted
app_files = []
for file in commit.files.all():
app_files.append(file)
for file in uncommitted_files:
app_files.append(file)
data['app_files'] = app_files
data['app_committed_files'] = {}
data['app_uncommitted_files'] = {}
for file in commit.files.all():
data['app_committed_files'][file.name]=file.id
for file in uncommitted_files:
data['app_uncommitted_files'][file.name]=file.id
data['fileid'] = fileid
# Fix timestamp for file
if 'file' in data and data['file']:
data['file'].ts= str(data['file'].ts.astimezone(timezonize(get_timezone_from_request(request)))).split('.')[0]
# Render
return render(request, 'dashboard_app_code_editor.html', {'data': data})
def dashboard_app_code_editor_embed(request):
return dashboard_app_code_editor(request, embed=True)
#===========================
# Apps list view
#===========================
@private_view
@xframe_options_exempt
def list_apps(request):
# Init data
data={}
data['user'] = request.user
data['apps'] = []
# Enumerate applications
for app in App.objects.filter(user=request.user):
data['apps'].append(app)
return render(request, 'list_apps.html', {'data': data})
#==========================
# Main view
#==========================
@public_view
def main(request):
# Init data
data={}
data['user'] = request.user
data['menu'] = request.GET.get('menu', 'closed')
if os.path.isfile('/opt/code/backend/pythings_app/templates/custom/main.html'):
return render(request, 'custom/main.html', {'data': data})
else:
return HttpResponseRedirect('/dashboard')
#==========================
# Terms view
#==========================
@public_view
def terms(request):
# Init data
data={}
data['user'] = request.user
return render(request, 'terms.html', {'data': data})
#==========================
# Privacy view
#==========================
@public_view
def privacy(request):
# Init data
data={}
data['user'] = request.user
return render(request, 'privacy.html', {'data': data})
#==================================================
#
# E X T R A V I E W S
#
#==================================================
| 37.176101 | 292 | 0.560678 |
29781b3d51072b78947f4e019ed1d8f9e2b0cb5c | 51 | py | Python | web/create_db.py | green-latte/hubenydisthost | 2a9136f7d4dbfd9b60dd7a2392a2343109c6caa9 | [
"MIT"
] | null | null | null | web/create_db.py | green-latte/hubenydisthost | 2a9136f7d4dbfd9b60dd7a2392a2343109c6caa9 | [
"MIT"
] | null | null | null | web/create_db.py | green-latte/hubenydisthost | 2a9136f7d4dbfd9b60dd7a2392a2343109c6caa9 | [
"MIT"
] | null | null | null | # create_db.py
from app import db
db.create_all()
| 10.2 | 18 | 0.745098 |
efb1acec5f8aa2196158cad9b08ee1dc2c9a216d | 343 | py | Python | artemis/remote/plotting/__init__.py | peteroconnor-bc/artemis | ad2871fae7d986bf10580eec27aee5b7315adad5 | [
"BSD-2-Clause-FreeBSD"
] | 235 | 2016-08-26T14:18:51.000Z | 2022-03-13T10:54:39.000Z | artemis/remote/plotting/__init__.py | peteroconnor-bc/artemis | ad2871fae7d986bf10580eec27aee5b7315adad5 | [
"BSD-2-Clause-FreeBSD"
] | 112 | 2016-04-30T11:48:38.000Z | 2021-01-12T20:17:32.000Z | artemis/remote/plotting/__init__.py | peteroconnor-bc/artemis | ad2871fae7d986bf10580eec27aee5b7315adad5 | [
"BSD-2-Clause-FreeBSD"
] | 31 | 2016-11-05T19:09:19.000Z | 2021-09-13T07:35:40.000Z | forward_to_server = True
def should_I_forward_to_server():
return forward_to_server
def set_forward_to_server(val=True):
'''
If set to false, makes sure that the process calling dbplot does not forward to a different plotting server.
:param val:
:return:
'''
global forward_to_server
forward_to_server = val | 22.866667 | 112 | 0.725948 |
8eed9e7308107207edddebeb83ed1716d2b1a976 | 7,263 | py | Python | testflows/_core/contrib/pygments/lexers/smalltalk.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 3 | 2020-06-25T19:23:19.000Z | 2021-10-20T19:29:56.000Z | testflows/_core/contrib/pygments/lexers/smalltalk.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | null | null | null | testflows/_core/contrib/pygments/lexers/smalltalk.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 1 | 2020-02-24T12:31:45.000Z | 2020-02-24T12:31:45.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.smalltalk
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Smalltalk and related languages.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from testflows._core.contrib.pygments.lexer import RegexLexer, include, bygroups, default
from testflows._core.contrib.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SmalltalkLexer', 'NewspeakLexer']
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
.. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition': [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
default('#pop'), # else pop
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
# literals
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
# skip whitespace and comments
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
# Squeak fileout format (optional)
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
.. versionadded:: 1.1
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root': [
(r'\b(Newsqueak2)\b', Keyword.Declaration),
(r"'[^']*'", String),
(r'\b(class)(\s+)(\w+)(\s*)',
bygroups(Keyword.Declaration, Text, Name.Class, Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'<\w+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+', Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace': [
(r'\s+', Text),
(r'"[^"]*"', Comment)
],
}
| 37.056122 | 100 | 0.430814 |
be806713f1efb80d14496dcb191d6da323a0558b | 1,172 | py | Python | Others/Source/09/9.1/import_test3.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | 1 | 2018-05-30T01:38:23.000Z | 2018-05-30T01:38:23.000Z | Others/Source/09/9.1/import_test3.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | Others/Source/09/9.1/import_test3.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
# 导入sys、os两个模块
import sys,os
# 使用模块名作为前缀来访问模块中的成员
print(sys.argv[0])
# os模块的sep变量代表平台上的路径分隔符
print(os.sep) | 55.809524 | 74 | 0.210751 |
28fb80356a4f4a0e33bcb5e6568aecc351550d23 | 78,371 | py | Python | phangsPipeline/casaSingleDishRoutines.py | e-koch/phangs_imaging_scripts | 66b8c49a78bdb0b928a77db0caefd28e26bdb2f0 | [
"MIT"
] | null | null | null | phangsPipeline/casaSingleDishRoutines.py | e-koch/phangs_imaging_scripts | 66b8c49a78bdb0b928a77db0caefd28e26bdb2f0 | [
"MIT"
] | null | null | null | phangsPipeline/casaSingleDishRoutines.py | e-koch/phangs_imaging_scripts | 66b8c49a78bdb0b928a77db0caefd28e26bdb2f0 | [
"MIT"
] | null | null | null | """
Standalone routines to analyze and manipulate single dish data.
This is based on "TP_ALMA_data_reduction/ALMA-TP-tools.py".
Last modifications:
- Initial developments by C. Herrera.
- 31.01.2017: read_source_coordinates
- 01.02.2017: More than 1 line can be defined to be excluded for baseline corrections (bug fixed 21/03/2017)
- 02.02.2017: Handle TOPO ALMA frame vs the given LSRK velocity for extraction of cube and baseline
- 27.03.2017: extract_jyperk. It was not working for Cycle 1 data.
- 26.07.2017: add flag of 7m antennas (CM#)
- 26.07.2017: correct spw Tsys value associated with the averaged spw science value
(tsysmap[spws_scie[i]+1] = spws_tsys[i]-> tsysmap[spws_scie[i]+1] = spws_tsys[ddif.argmin()])
- 26.07.2017: modified convert_vel2chan_line, because some asap files had mixed the IFs,
having IFNO and IFID different.
- 10.10.2017: handle imaging of 2 SGs of the same galaxy.
- 28.11.2017: change directory to conciliate 12m+7m data reduction with TP data reduction directory trees.
- 01.06.2017: Add tarfile because in some projects the jyperk file is in a tar file (auxproduct.tgz).
- 21.09.2020: Add call to GET_SOURCENAME to handle mismatched source names between the galaxy specific script and #ON_SOURCE target from the table
- 05.09.2020: There is a version modified by C. Faesi 2020/09/05 to be run in casa 5.1 or later for Cycle 7 data.
The modification includes setting `bdfflags=True` when calling importadsm, and the calling of
`/bin/bdflags2MS` and `es.fixForCSV2555` and later commands in `import_and_split_ant`
are not needed for Cycle 7 data.
- 01.07.2021: Adapted to phangs alma pipeline, renamed the code as casaSingleDishRoutines, by D. Liu.
- 02.07.2021: Trying to adapt for CASA 5, renamed the code as casaSingleDishNewRoutines, by D. Liu.
Still need to do (probably outdated):
- Work on errors when files are not found, where asdm import did not work fine, etc.
- Add timer (suggestion by CF)
- Add GET_SOURCENAME in main script to call the right source name. DONE CMF 21.09.2020.
- 2021-07-05 can not split with ant='0&0' ? if no split, can not obtain a reasonable final fits image cube?!
"""
# python2 to python3: print, sort
# Note that some sd* commands are deleted since CASA 5.
# see https://casa.nrao.edu/casadocs/casa-5.0.0/introduction/release-notes-50
# The following single dish tasks are renamed (name in CASA 4.7 -> 5.0). Note all tasks with 'old'
# at the end of the name will be deleted in future releases.
# tsdbaseline -> sdbaseline
# tsdcal -> sdcal
# tsdfit -> sdfit
# tsdsmooth -> sdsmooth
# sdaverage -> sdaverageold
# sdbaseline -> sdbaselineold
# sdbaseline2 -> sdbaseline2old
# sdcal -> sdcalold
# sdcal2 -> sdcal2old
# sdcoadd -> sdcoaddold
# sdfit -> sdfitold
# sdflag -> sdflagold
# sdflagmanager -> sdflagmanager
# sdgrid -> sdgridold
# sdlist -> sdlistold
# sdmath -> sdmathold
# sdplot -> sdplotold
# sdreduce -> sdreduceold
# sdsave -> sdsaveold
# sdscale -> sdscaleold
# sddstat -> sdstatold
# ASAP data format will also be disabled since CASA 5.
# see https://casa.nrao.edu/casadocs/casa-5.4.1/single-dish-calibration/future-development-goals-for-casa-single-dish
# Use plotms to replace sdplot,
# see https://casa.nrao.edu/docs/cookbook/casa_cookbook009.html
# TODO
#region Imports and definitions
import os, sys, re, shutil, inspect, copy, time, datetime, json, ast
import numpy as np
from scipy.ndimage import label
#import pyfits # CASA has pyfits, not astropy
import glob
import tarfile
import imp
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Analysis utilities
import analysisUtils as au
es = au.stuffForScienceDataReduction()
# CASA stuff
from . import casaStuff
# Spectral lines
from . import utilsLines as lines
# Pipeline versionining
from .pipelineVersion import version as pipeVer
#endregion
#region Routines for basic characterization
#endregion
#region Routines to analyze and extract lines in measurement sets
# Physical constants
sol_kms = 2.99792458e5
c_light = sol_kms # Speed of light in km/s
pi = np.pi
# path constants
path_script = '../script/' # Path to the script folder.
path_raw = '../raw/' # Path to the raw folder.
path_dataproduct = '../data/' # Path to data products.
# precasa5
if hasattr(casaStuff, 'sdsave'):
precasa5 = True
fsuffix = '.asap'
else:
precasa5 = False
fsuffix = '.ms'
# Check if data was calibrated with the pipeline
def checkpipeline():
if len(glob.glob(path_script+'*.xml')) > 0:
logger.info("> Data was reduced by ALMA/JAO using an automatized pipeline ")
logger.info("> Setting the variable 'pipeline' to True")
return True
else:
logger.info("> Data was reduced by ALMA/JAO using scripts ")
logger.info("> Setting the variable 'pipeline' to False")
return False
# Creating CASA tools
#def createCasaTool(mytool):
#
# if (type(casac.Quantity) != type): # casa 4.x
# myt = mytool()
# else: # casa 3.x
# myt = mytool.create()
# return(myt)
# Retrieve name of the column
def getDataColumnName(inputms):
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(inputms)
colnames = mytb.colnames()
if 'FLOAT_DATA' in colnames:
data_query= 'FLOAT_DATA'
else:
data_query = 'DATA'
mytb.close()
return(data_query)
def getDataColumnForSDBaseline(vis):
"""
Returns the names of the corrected data columns (corrected) in a measurement set.
"""
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(vis)
names = copy.copy(mytb.colnames())
mytb.close()
columns = []
for i in ['DATA','FLOAT_DATA','CORRECTED_DATA']:
if i in names:
columns.append(i)
#logger.debug('getDataColumnForSDBaseline: vis = %r'%(vis))
#logger.debug('getDataColumnForSDBaseline: colnames = %s'%(names))
#logger.debug('getDataColumnForSDBaseline: columns = %s'%(columns))
if 'CORRECTED_DATA' in columns:
return 'corrected'
elif 'FLOAT_DATA' in columns:
return 'float_data'
else:
return 'data'
def getDataColumnForPlotMS(vis):
return getDataColumnForSDBaseline(vis)
def getDataColumnForSplit(vis):
return getDataColumnForSDBaseline(vis)
def check_data_dir_being_touched(filename, clear_failed_run = False):
if os.path.exists(filename+'.touch'):
if clear_failed_run:
rm_data_dir(filename, check_being_touched = False)
rm_data_dir(filename+'.touch', check_being_touched = False)
else:
logger.error("Found "+filename+'.touch! Seems something is still running or failed? Please delete the *.touch dir to start over:\n'+os.path.abspath(filename+'.touch'))
raise Exception("Found "+filename+'.touch! Seems something is still running or failed? Please delete the *.touch dir to start over:\n'+os.path.abspath(filename+'.touch'))
def rm_data_dir(filename, check_being_touched = True):
if check_being_touched:
check_data_dir_being_touched(filename)
if os.path.exists(filename):
logger.info('Deleting '+filename)
shutil.rmtree(filename)
if os.path.exists(filename+'.flagversions'):
logger.info('Deleting '+filename+'.flagversions')
shutil.rmtree(filename+'.flagversions')
def cp_data_dir(filename_in, filename_out, check_being_touched = True, log_copied_from = False):
if not os.path.exists(filename_in):
logger.error("Data dir not found! Please check: "+os.path.abspath(filename_in))
raise Exception("Data dir not found! Please check: "+os.path.abspath(filename_in))
rm_data_dir(filename_out, check_being_touched = check_being_touched)
logger.info('Copying '+filename_in+' to '+filename_out)
shutil.copytree(filename_in, filename_out)
if os.path.exists(filename_in+'.flagversions'):
shutil.copytree(filename_in+'.flagversions', filename_out+'.flagversions')
if log_copied_from:
with open(filename_out+'.copied.from.txt', 'w') as fp:
fp.write(filename_in+'\n')
# by ALMA
def scaleAutocorr(vis, scale=1., antenna='', spw='', field='', scan=''):
if os.path.exists(vis) == False:
logger.warning("Could not find MS.")
return
if os.path.exists(vis+'/table.dat') == False:
logger.warning("No table.dat. This does not appear to be an MS.")
return
mymsmd = au.createCasaTool(casaStuff.msmdtool)
mytb = au.createCasaTool(casaStuff.tbtool)
conditions = ["ANTENNA1==ANTENNA2"]
mymsmd.open(vis)
if antenna != '':
if not isinstance(antenna, (list, tuple)):
antenna = [antenna]
antennaids = []
for i in antenna:
if re.match("^[0-9]+$", str(i)): # digits only: antenna ID
antennaids.append(int(i))
else: # otherwise: antenna name
antennaids.append(mymsmd.antennaids(i)[0])
conditions.append("ANTENNA1 in %s" % str(antennaids))
if spw != '':
if not isinstance(spw, (list, tuple)):
spw = [spw]
datadescids = []
for i in spw:
datadescids.append(mymsmd.datadescids(spw=int(i))[0])
conditions.append("DATA_DESC_ID in %s" % str(datadescids))
if field != '':
if not isinstance(field, (list, tuple)):
field = [field]
fieldids = []
for i in field:
if re.match("^[0-9]+$", str(i)): # digits only: field ID
fieldids.append(int(i))
else: # otherwise: field name
fieldids.append(mymsmd.fieldsforname(i)[0])
conditions.append("FIELD_ID in %s" % str(fieldids))
if scan != '':
if not isinstance(scan, (list, tuple)):
scan = [scan]
scannumbers = [int(i) for i in scan]
conditions.append("SCAN_NUMBER in %s" % str(scannumbers))
mymsmd.close()
if precasa5:
datacolumn = getDataColumnName(vis)
logger.info("Multiplying %s to the dataset %s column %s." % (str(scale), vis, datacolumn))
logger.info("The selection criteria are '%s'." % (" && ".join(conditions)))
mytb.open(vis, nomodify=False)
subtb = mytb.query(" && ".join(conditions))
try:
data = subtb.getcol(datacolumn)
logger.info("Dimension of the selected data: %s" % str(data.shape))
subtb.putcol(datacolumn, data*scale)
except:
logger.info("An error occurred upon reading/writing the data.")
finally:
logger.info("Closing the table.")
mytb.flush()
subtb.close()
mytb.close()
else:
logger.info("Opening the table "+vis)
mytb.open(vis, nomodify=False)
subtb = mytb.query(" && ".join(conditions))
datacolumns = []
for datacolumn in subtb.colnames():
if datacolumn in ['DATA','FLOAT_DATA','MODEL_DATA','CORRECTED_DATA']:
datacolumns.append(datacolumn)
for datacolumn in datacolumns:
try:
data = subtb.getcol(datacolumn)
logger.info("Dimension of the selected data: %s" % str(data.shape))
subtb.putcol(datacolumn, data*scale)
except:
logger.info("An error occurred upon reading/writing the data column "+datacolumn+"! The scaleAutocorr function may have failed!")
logger.info("Closing the table.")
mytb.flush()
subtb.close()
mytb.close()
# Create vector with antenna names
def read_ants_names(filename):
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(filename + '/ANTENNA')
vec_ants = mytb.getcol('NAME')
mytb.close()
return vec_ants
# Correct the Tsysmap (useful for old data)
def get_tsysmap(tsysmap,spws_scie,spws_tsys,freq_rep_scie,freq_rep_tsys):
for i in range(len(freq_rep_scie)):
diff = [abs(freq_rep_tsys[j] - freq_rep_scie[i]) for j in range(len(freq_rep_tsys))]
ddif = np.array(diff)
tsysmap[spws_scie[i]] = spws_tsys[ddif.argmin()]
tsysmap[spws_scie[i]+1] = spws_tsys[ddif.argmin()]
logger.info("Final map used for the observations: (they should have the same frequency)")
for i in range(len(spws_scie)):
logger.info(' %s, %s'%(spws_scie[i],tsysmap[spws_scie[i]]))
return tsysmap
# Read spw information (source and Tsys)
def read_spw(filename,source):
# Tsys spws (index)
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(filename + '/SYSCAL')
spwstsys = mytb.getcol('SPECTRAL_WINDOW_ID')
spws_tsys = np.unique(spwstsys).tolist()
mytb.close()
# Science spws (index)
mytb.open(filename + '/SOURCE')
names = mytb.getcol('NAME')
numli = mytb.getcol('NUM_LINES')
ss = np.where((names == source) & (numli == 1))
spws_scie = [int(mytb.getcol('SPECTRAL_WINDOW_ID',startrow=i,nrow=1)) for i in ss[0]]
rest_freq_scie = [float(mytb.getcol('REST_FREQUENCY',startrow=i,nrow=1)) for i in ss[0]]
mytb.close()
mytb.open(filename + '/SPECTRAL_WINDOW')
names = mytb.getcol('NAME')
rest_freq_scie = [rest_freq_scie[i] for i in range(len(spws_scie)) if "FULL_RES" in names[spws_scie[i]]]
spws_scie = [spw for spw in spws_scie if "FULL_RES" in names[spw]]
spws_scie = au.getScienceSpws(filename)
spws_scie = spws_scie.split(",")
spws_scie = [int(i) for i in spws_scie]
# Read number of channels, frequency at channel zero and compute representative frequency
freq_zero_scie = range(len(spws_scie))
chan_width_scie = range(len(spws_scie))
num_chan_scie = range(len(spws_scie))
freq_rep_scie = range(len(spws_scie))
for i in range(len(spws_scie)):
freq_zero_scie[i] = float(mytb.getcol('REF_FREQUENCY',startrow=spws_scie[i],nrow=1))
chan_width_scie[i] = float(mytb.getcol('CHAN_WIDTH',startrow=spws_scie[i],nrow=1)[0])
num_chan_scie[i] = float(mytb.getcol('NUM_CHAN',startrow=spws_scie[i],nrow=1))
freq_rep_scie[i] = (num_chan_scie[i]/2*chan_width_scie[i]+freq_zero_scie[i])/1e6
freq_zero_tsys = range(len(spws_tsys))
chan_width_tsys = range(len(spws_tsys))
num_chan_tsys = range(len(spws_tsys))
freq_rep_tsys = range(len(spws_tsys))
for i in range(len(spws_tsys)):
freq_zero_tsys[i] = float(mytb.getcol('REF_FREQUENCY',startrow=spws_tsys[i],nrow=1))
chan_width_tsys[i] = float(mytb.getcol('CHAN_WIDTH',startrow=spws_tsys[i],nrow=1)[0])
num_chan_tsys[i] = float(mytb.getcol('NUM_CHAN',startrow=spws_tsys[i],nrow=1))
freq_rep_tsys[i] = (num_chan_tsys[i]/2*chan_width_tsys[i]+freq_zero_tsys[i])/1e6
mytb.close()
return spws_scie,spws_tsys,freq_rep_scie,freq_rep_tsys,chan_width_scie,num_chan_scie
# Get information of the source velocity
def read_vel_source(filename,source):
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(filename + '/SOURCE')
names = mytb.getcol('NAME')
numli = mytb.getcol('NUM_LINES')
ss = np.where((names == source) & (numli == 1))[0]
vel_source = float(mytb.getcol('SYSVEL',startrow=ss[0],nrow=1))/1e3
vel_frame = mytb.getcolkeywords('SYSVEL')['MEASINFO']['Ref']
logger.info("Frame of source velocity is: "+vel_frame)
mytb.close()
return vel_source
# SPW where the requested line is located
def get_spw_line(vel_source,freq_rest,spws_info):
#science spws
spws_scie,spws_tsys,freq_rep_scie,freq_rep_tsys,chan_width_scie,num_chan_scie = spws_info
found = False
for i in range(len(spws_scie)):
freq_ini = (freq_rep_scie[i]-num_chan_scie[i]/2*chan_width_scie[i]*1e-6)/(1-vel_source/c_light) # initial frequency in spw -> still to be check since observations are in TOPO
freq_fin = (freq_rep_scie[i]+num_chan_scie[i]/2*chan_width_scie[i]*1e-6)/(1-vel_source/c_light) # final frequency in spw -> still to be check since observations are in TOPO
if freq_rest > min(freq_ini,freq_fin) and freq_rest < max(freq_ini,freq_fin):
found = True
return spws_scie[i]
if found == False:
logger.info("** Requested line with rest frequency "+str(freq_rest/1e3)+" GHz is not on the data **")
return False
# Extract flagging from original data reduction file.
def extract_flagging(filename, pipeline, flag_dir='', flag_file=''):
if os.path.exists(path_script+'file_flags.py'):
shutil.move(path_script+'file_flags.py', path_script+'file_flags.py.backup')
file_flag = open(path_script+'file_flags.py', 'w')
#fileflagread = ori_path+'/galaxy-specific-scripts/flags-folder/'+flag_file
if flag_dir == '' or flag_file == '':
fileflagread = 'FILEDOESNOTEXIST'
else:
fileflagread = os.path.join(flag_dir, flag_file)
if pipeline == True:
if os.path.exists(fileflagread) == False:
logger.info("No flagging will be done. If you want to flag something, please create a file ")
logger.info("with the specific flags using the task sdflag." )
logger.info("Example: ")
logger.info("sdflag(infile = 'uid___A002_X9998b8_X5d5.ms.PM04.asap',")
logger.info(" mode = 'manual',")
logger.info(" spw = '19:0~119;3960~4079,21:0~500;3960~4079',")
logger.info(" overwrite = True)")
logger.info(" Save it as GalName-flagfile.py in galaxy-specific-scripts/flags-folder")
else:
logger.info("Reading file "+fileflagread+" for flagging")
with open(fileflagread) as f: lines_f = f.readlines()
for i in range(len(lines_f)): file_flag.write(lines_f[i])
logger.info("Flags saved in "+path_script+'file_flags.py')
else:
file_script = path_script+filename+'.scriptForSDCalibration.py'
with open(file_script) as f: lines_f = f.readlines()
with open(file_script) as f:
for i, line in enumerate(f):
ll = i
if "sdflag(infile" in line:
ss = line.index("sdflag(i")
while len(lines_f[ll].split()) != 0:
file_flag.write((lines_f[ll])[ss:len(lines_f[ll])])
ll = ll+1
if os.path.exists(fileflagread) == True:
logger.info("Reading file "+fileflagread+" for flagging")
with open(fileflagread) as f: lines_f = f.readlines()
for i in range(len(lines_f)): file_flag.write(lines_f[i])
logger.info("Flags saved in "+path_script+'file_flags.py')
file_flag.close()
# Convert the given velocity to channels (using MS file)
def convert_vel2chan(filename,freq_rest,vel_cube,spw_line,vel_source,spws_info,coords):
spws_scie,freq_rep_scie,chan_width_scie,num_chan_scie = spws_info[0],spws_info[2],spws_info[4],spws_info[5]
freq_rep_line = freq_rep_scie[np.where(np.array(spws_scie) == spw_line)[0]]
chan_width_line = (chan_width_scie[np.where(np.array(spws_scie) == spw_line)[0]])/1e6
num_chan_line = num_chan_scie[np.where(np.array(spws_scie) == spw_line)[0]]
vel1 = float((vel_cube.split('~'))[0])
vel2 = float((vel_cube.split('~'))[1])
freq1 = (1-vel1/c_light)*freq_rest
freq2 = (1-vel2/c_light)*freq_rest
ra = coords.split()[1]
ra = ra.replace("h",":")
ra = ra.replace("m",":")
dec = coords.split()[2]
dec = dec.replace("d",":")
dec = dec.replace("m",":")
date = au.getObservationStartDate(filename)
date = (date.split()[0]).replace('-','/')+'/'+date.split()[1]
freq1_topo = au.lsrkToTopo(freq1,date,ra,dec)
freq2_topo = au.lsrkToTopo(freq2,date,ra,dec)
freq_chan0 = freq_rep_line-(num_chan_line/2-0.5)*chan_width_line
chan1 = int(round((freq1_topo-freq_chan0)/chan_width_line))
chan2 = int(round((freq2_topo-freq_chan0)/chan_width_line))
return min(chan1,chan2),max(chan1,chan2)
# Convert the given velocity to channels (using ASAP file with unique spw)
def convert_vel2chan_line(filename_in,freq_rest,vel_line,spw_line,coords,date):
# freq_rest must be in units of MHz
vel1 = float((vel_line.split('~'))[0])
vel2 = float((vel_line.split('~'))[1])
freq1 = (1-vel1/c_light)*freq_rest*1e6 # in units of Hz
freq2 = (1-vel2/c_light)*freq_rest*1e6 # in units of Hz
ra = coords.split()[1]
ra = ra.replace("h",":")
ra = ra.replace("m",":")
dec = coords.split()[2]
dec = dec.replace("d",":")
dec = dec.replace("m",":")
freq1_topo = au.lsrkToTopo(freq1, date, ra, dec)
freq2_topo = au.lsrkToTopo(freq2, date, ra, dec)
if fsuffix == '.asap':
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(filename_in)
nchan = mytb.getkeyword('nChan')
if_eq = mytb.getcol('FREQ_ID',startrow=1,nrow=1)
bandw = mytb.getkeyword('Bandwidth')
mytb.close()
mytb.open(filename_in+'/FREQUENCIES')
freq_chanref = mytb.getcol('REFVAL',startrow=if_eq,nrow=1) # /1e6 # keep in units of Hz
chanref = mytb.getcol('REFPIX',startrow=if_eq,nrow=1)
chan_width = mytb.getcol('INCREMENT',startrow=if_eq,nrow=1) # /1e6 # keep in units of Hz
mytb.close()
else:
mytb = au.createCasaTool(casaStuff.tbtool)
mytb.open(filename_in+os.sep+'SPECTRAL_WINDOW')
spwids = np.arange(mytb.nrows())
spw_line = -1
for ispw in spwids:
nchan = mytb.getcell('NUM_CHAN', ispw)
chan_freqs = mytb.getcell('CHAN_FREQ', ispw)
chan_widths = mytb.getcell('CHAN_WIDTH', ispw)
# check if line fully in this spw
if (np.min(chan_freqs)-freq1_topo)*(np.max(chan_freqs)-freq1_topo) < 0 and \
(np.min(chan_freqs)-freq2_topo)*(np.max(chan_freqs)-freq2_topo) < 0:
spw_line = ispw
break
if spw_line < 0:
logger.error('Error! Could not find a spectral window that fully contains the line from frequency %s to %s Hz.'%(freq1_topo, freq2_topo))
raise Exception('Error! Could not find a spectral window that fully contains the line from frequency %s to %s Hz.'%(freq1_topo, freq2_topo))
nchan = mytb.getcell('NUM_CHAN', spw_line)
chan_freqs = mytb.getcell('CHAN_FREQ', spw_line)
chan_widths = mytb.getcell('CHAN_WIDTH', spw_line)
chanref = 0
freq_chanref = chan_freqs[chanref]
chan_width = chan_widths[chanref]
mytb.close()
# note that the returned chan indices below start from 0
# also note that the spw_line may change
freq_chan0 = freq_chanref-chanref*chan_width
chan1 = int(round((freq1_topo-freq_chan0)/chan_width))
chan2 = int(round((freq2_topo-freq_chan0)/chan_width))
return min(chan1,chan2),max(chan1,chan2),nchan,spw_line
# Create string with spw and channel for baseline correction
def str_spw4baseline(filename_in,freq_rest,vel_line,spw_line,coords):
#filename = re.search('(.+?).ms',filename_in).group(0) # this is the asap data? <TODO><20210705> dzliu commented out this, not sure..
date = au.getObservationStartDate(filename_in)
date = (date.split()[0]).replace('-','/')+'/'+date.split()[1]
vel_line_s = vel_line.split(';')
nlines = len(vel_line_s)
channels_v = range(nlines*2)
for i in range(nlines):
vel_str = vel_line_s[i]
chan1_line,chan2_line,nchan_line,spw_line = convert_vel2chan_line(filename_in,freq_rest,vel_str,spw_line,coords,date)
channels_v[2*i+1] = chan2_line
channels_v[2*i] = chan1_line
channels_v = sorted(channels_v)
# String to define spws for baseline correction
spw_extr = str(spw_line)+":0~"+str(channels_v[0])+";"
if nlines > 1:
for i in range(nlines-1):
spw_extr = spw_extr + str(channels_v[2*i+1])+"~"+ str(channels_v[2*i+2])+";"
spw_extr = spw_extr + str(channels_v[-1])+"~"+str(max(channels_v[-1],nchan_line))
return spw_extr
# Extract variable jyperk, used to convert from K to Jy.
def extract_jyperk(filename, spw_line, pipeline):
logger.info("Extracting Jy per K conversion factor")
if pipeline == True:
file_script = 'jyperk.csv'
ant_arr = []
spw_arr = []
val_arr = []
if os.path.isfile(file_script) == False:
filetgz = glob.glob("*auxproducts.tgz")
tar = tarfile.open(filetgz[0])
tar.extractall()
tar.close()
with open(file_script) as f:
for line in f:
if filename in line:
line_arr = line.split(',')
ant_arr.append(line_arr[1])
spw_arr.append(int(line_arr[2]))
val_arr.append(line_arr[4][0:line_arr[4].index('\n')])
jyperk = {k: {e:{'mean':{}} for e in np.unique(spw_arr)} for k in np.unique(ant_arr)}
for i in range(len(ant_arr)): jyperk[ant_arr[i]][spw_arr[i]]['mean']= float(val_arr[i])
return jyperk
else:
file_script = path_script+filename+'.scriptForSDCalibration.py'
vec_jyperk = ''
with open(file_script) as f: lines_f = f.readlines()
with open(file_script) as f:
for i, line in enumerate(f):
ll = i
if "jyperk = " in line:
ss = line.index("jyperk")
while len(lines_f[ll].split()) != 0:
if ll == i+1: ss2 = lines_f[ll].index("{")
if ll == i:
vec_jyperk = vec_jyperk+(lines_f[ll])[ss:len(lines_f[ll])]
else:
vec_jyperk = vec_jyperk+(lines_f[ll])[ss2:len(lines_f[ll])]
ll = ll+1
kw = {}
exec(vec_jyperk) in kw
jyperk = kw['jyperk']
return jyperk
# Read source coordinates
def read_source_coordinates(filename,source):
coord_source = au.getRADecForSource(filename,source)
RA_h = (coord_source.split(' ')[0]).split(':')[0]
RA_m = (coord_source.split(' ')[0]).split(':')[1]
RA_s = (coord_source.split(' ')[0]).split(':')[2]
DEC_d = (coord_source.split(' ')[1]).split(':')[0]
DEC_m = (coord_source.split(' ')[1]).split(':')[1]
DEC_s = (coord_source.split(' ')[1]).split(':')[2]
coord = "J2000 "+str(RA_h)+"h"+str(RA_m)+"m"+str(RA_s[0:6])+" "+str(DEC_d)+"d"+str(DEC_m)+"m"+str(DEC_s)
return coord
# Get source name
def get_sourcename(filename):
mytb = au.createCasaTool(casaStuff.msmdtool)
mytb.open(filename)
source = mytb.fieldnames()[mytb.fieldsforintent('OBSERVE_TARGET#ON_SOURCE')[0]]
mytb.close()
return source
# Create string of spws to apply the Tsys
def str_spw_apply_tsys(spws_info):
#science spws
spws_scie,spws_tsys,freq_rep_scie,freq_rep_tsys = spws_info[0:4]
spws_all = spws_tsys+spws_scie
#spws_all = sorted(spws_all)
spws_all = sorted(list(set(spws_all)))
#spws_tsys_str = (str(spws_tsys))[1:len(str(spws_tsys))-1]
#spws_scie_str = (str(spws_scie))[1:len(str(spws_scie))-1]
#spws_all_str = (str(spws_all))[1:len(str(spws_all))-1]
spws_tsys_str = ','.join([str(t) for t in spws_tsys])
spws_scie_str = ','.join([str(t) for t in spws_scie])
spws_all_str = ','.join([str(t) for t in spws_all])
return spws_scie_str,spws_tsys_str,spws_all_str
# Check date of observations to decide if the non-linearity correction should be applied or not.
def check_date_nonlinearity(filename):
date_obs = au.getObservationStart(filename)/24/60/60.
date_change = au.dateStringToMJD('2015/10/01 00:00:00')
if abs(date_obs-date_change) <= 1:
logger.info("Data obtained within 1 day of the change, be careful!" )
if date_obs >= date_change:
logger.info("Data obtained after 2015/10/01, non-linearity not applied")
return False
if date_obs < date_change:
logger.info("Data obtained before 2015/10/01, non-linearity applied")
return True
# Check if we are in the correct directory
def checkdir(currentdir,path_galaxy):
if path_galaxy in currentdir:
return True
else:
return False
def checktmp():
if os.path.isdir('../'+path_galaxy) == False:
logger.info("Temporal folder does not exists. Creating it and copying raw data")
os.system('mkdir -p ../'+path_galaxy)
os.system('cp -rf ../../../'+path_galaxy[4:-1]+'/calibration ../'+path_galaxy)
os.system('cp -rf ../../../'+path_galaxy[4:-1]+'/raw ../'+path_galaxy)
os.system('cp -rf ../../../'+path_galaxy[4:-1]+'/script ../'+path_galaxy)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Data reduction steps
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#
# Step 0
#*-*-*-*-*-*
def check_exists(filename):
filename_asdm = filename[0:filename.find('.ms')]+'.asdm.sdm'
logger.info("> Checking ALMA raw data existence: ")
logger.info(" "+os.path.abspath(os.path.join(path_raw, filename_asdm))+" "+str(os.path.exists(path_raw+filename_asdm)))
if os.path.exists(path_raw+filename_asdm) == True:
return True
else:
logger.info("** Original ALMA data "+filename_asdm +" does NOT exist: **")
logger.info(" Skipping file ")
return False
#*-*-*-*-*-*_*-*-*-*-*-*
# Step 1 Import data
#*-*-*-*-*-*-*-*-*-*-*
def import_and_split_ant(filename, precycle7=True, doallants=True, dosplitants=True, doplots=True):
"""Import and split antenna for single dish raw data.
We will copy the raw "*.asdm.sdm" data from original place to working place.
Args:
filename (str): The data name for output with suffix ".ms". Does not include the file path, which should be defined in the global variable `path_raw`.
precycle7 (bool): Whether the data is taken pre-Cycle7, i.e., Cycle 0-6.
precasa5 (bool): Whether using pre-CASA5 versions, i.e., CASA 3.X.X-4.X.X.
doallants (bool): Whether making an MS data with all antennae in it.
"""
# <TODO> Can we be more smart on defining the precycle7 variable?
logger.info("==================================================")
logger.info("= Step 1 - Import ASDM data and split by antenna =")
logger.info("==================================================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_1_for_'+filename[0:-3]):
logger.info('Found file: done_step_1_for_'+filename[0:-3]+'. Will not re-do step 1.')
return
if not os.path.isdir('plots'):
os.makedirs('plots') # folder containing all plots
if not os.path.isdir('obs_lists'):
os.makedirs('obs_lists') # folder containing all observation lists (i.e., listobs, sdlist)
# 1.1 Import of the ASDM
logger.info("1.1 Importing from ASDM to MS")
# clear up previous failed runs if *.touch exists
check_data_dir_being_touched(filename, clear_failed_run=True)
if not os.path.exists(filename):
# mark the current running with a *.touch directory
os.mkdir(filename+'.touch')
# copy raw data to filename0, then run importasdm
filename0 = filename[0:filename.find('.ms')] # remove the suffix ".ms"
if not (os.path.exists(filename0) and os.path.isfile(filename0+'.copied.from.txt')):
cp_data_dir(path_raw+filename0+'.asdm.sdm', filename0, log_copied_from = True)
if precycle7:
bdfflags=False
else:
bdfflags=True
logger.info('Running CASA importasdm: '+filename0+' -> '+filename)
casaStuff.importasdm(filename0,
asis='Antenna Station Receiver Source CalAtmosphere CalWVR CorrelatorMode SBSummary',
bdfflags=bdfflags,
process_caldevice=False,
with_pointing_correction=True)
if precycle7 and precasa5:
# Transfer specific flags (BDF flags) from the ADSM to the MS file
logger.info(os.environ['CASAPATH'].split()[0]+'/bin/bdflags2MS -f "COR DELA INT MIS SIG SYN TFB WVR ZER" '+filename0+' '+filename)
os.system(os.environ['CASAPATH'].split()[0]+'/bin/bdflags2MS -f "COR DELA INT MIS SIG SYN TFB WVR ZER" '+filename0+' '+filename)
# Check for known issue, CSV-2555: Inconsistency in FIELD_ID, SOURCE_ID and Spw_ID in single dish data
es.fixForCSV2555(filename)
# 1.2 Listobs
logger.info("1.2 Creating listobs for MS file")
outname = filename+'.listobs.txt'
if os.path.exists('obs_lists/'+outname):
os.system('rm -rf obs_lists/'+outname)
casaStuff.listobs(vis = filename,
listfile = 'obs_lists/'+outname)
if doplots == True:
logger.info("Running au.getTPSampling, saving plots to "+'plots/'+filename+'.sampling.png')
au.getTPSampling(vis = filename,
showplot = True,
plotfile = 'plots/'+filename+'.sampling.png')
# 1.3 A priori flagging: e.g., mount is off source, calibration device is not in correct position, power levels are not optimized, WCA not loaded...
logger.info("1.3 Applying a priori flagging, check plots/"+filename+".flagcmd.png plot to see these flags.")
if doplots:
casaStuff.flagcmd(vis = filename,
inpmode = 'table',
useapplied = True,
action = 'plot',
plotfile = 'plots/'+filename+'.flagcmd.png')
casaStuff.flagcmd(vis = filename,
inpmode = 'table',
useapplied = True,
action = 'apply')
# mark the current running as finished by deleting the *.touch directory
os.rmdir(filename+'.touch')
else:
logger.info('Found imported data: '+filename+' - Steps 1.2 and 1.3 are skipped.')
# If there are, flag 7m antennas
vec_ants = read_ants_names(filename)
ants_7m = [s for s in vec_ants if "CM" in s]
if len(ants_7m) > 0:
logger.info('Found 7m antennae, flagging those.')
str_ants = ', '.join(ants_7m)
casaStuff.flagdata(vis = filename,
mode = 'manual',
antenna = str_ants,
action = 'apply')
# if doallants, make an MS with all antennae in it
if doallants:
cp_data_dir(filename, filename+'.allant'+fsuffix)
# if precasa5, always dosplitants
if precasa5:
dosplitants = True
# if dosplitants, make an MS for each antenna, with a file name like filename+'.'+ant+fsuffix
if dosplitants:
# 1.4 Split by antenna
logger.info("1.4 Splitting the file by antennas")
vec_ants_t = read_ants_names(filename)
vec_ants = [s for s in vec_ants_t if any(xs in s for xs in ['PM','DV'])]
for ant in vec_ants:
rm_data_dir(filename+'.'+ant+fsuffix)
if precasa5:
casaStuff.sdsave(infile = filename,
splitant = True,
outfile = filename+fsuffix,
overwrite = True)
# note that output file names will be filename+'.'+ant+fsuffix
#1.5 sdlist
logger.info("1.5 Create sdlist for each splitted file.")
for ant in vec_ants:
if os.path.exists('obs_lists/'+filename+'.'+ant+fsuffix+'.sdlist'):
os.remove('obs_lists/'+filename+'.'+ant+fsuffix+'.sdlist')
casaStuff.sdlist(infile = filename+'.'+ant+fsuffix+'',
outfile = 'obs_lists/'+filename+'.'+ant+fsuffix+'.sdlist')
else:
for ant in vec_ants:
use_casa_split_antenna = True
if use_casa_split_antenna:
logger.info('Running split to make '+filename+'.'+ant+fsuffix+', datacolumn is '+getDataColumnForSplit(filename))
casaStuff.split(vis = filename,
outputvis = filename+'.'+ant+fsuffix,
antenna = '%s&&&'%(ant),
datacolumn = getDataColumnForSplit(filename))
#<Note># CASA split with antenna = '0&0' does not work, should use '0&&&' to get only autocorrelations,
# see https://casa.nrao.edu/docs/taskref/split-task.html
else:
#<TODO># these are not well tested
# this is an alternative way to split single antenna autocorr data
filename_in = filename
filename_out = filename+'.'+ant+fsuffix+'.tmp'
cp_data_dir(filename_in, filename_out)
#
other_ants = copy.copy(vec_ants)
other_ants.remove(ant)
str_other_ants = ';'.join(other_ants)
logger.info('Running flagdata to flag '+str_other_ants+' in '+filename_out)
casaStuff.flagdata(vis = filename_out,
mode = 'manual',
antenna = str_other_ants,
action = 'apply')
#
filename_in = filename+'.'+ant+fsuffix+'.tmp'
filename_out = filename+'.'+ant+fsuffix
rm_data_dir(filename_out)
logger.info('Running split to make '+filename_out+', datacolumn is '+getDataColumnForSplit(filename_in))
casaStuff.split(vis = filename_in,
outputvis = filename_out,
keepflags = False,
datacolumn = getDataColumnForSplit(filename_in))
#1.5 sdlist
logger.info("1.5 Create listobs for each splitted file.")
for ant in vec_ants:
if os.path.exists('obs_lists/'+filename+'.'+ant+fsuffix+'.listobs.txt'):
os.remove('obs_lists/'+filename+'.'+ant+fsuffix+'.listobs.txt')
casaStuff.listobs(vis = filename+'.'+ant+fsuffix+'',
listfile = 'obs_lists/'+filename+'.'+ant+fsuffix+'.listobs.txt')
with open('done_step_1_for_'+filename[0:-3], 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Step 2 Generate Tsys and apply flagging
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
def gen_tsys_and_flag(filename, spws_info, pipeline, flag_dir='', flag_file='', doplots=False):
logger.info("========================================================")
logger.info(" Step 2 Generate Tsys and apply flagging")
logger.info("========================================================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_2_for_'+filename[0:-3]):
logger.info('Found file: done_step_2_for_'+filename[0:-3]+'. Will not re-do step 2.')
with open(filename+'.spwmap.json', 'r') as fp:
spwmap = json.load(fp)
spwmap = ast.literal_eval(json.dumps(spwmap)) # Removing uni-code chars
return spwmap
#if checkdir(os.getcwd(),path_galaxy) == False:
# os.chdir('../'+path_galaxy+'calibration')
if not os.path.isdir('plots'):
os.makedirs('plots') # folder containing all plots
# 2.1 Generation of the Tsys cal table
logger.info(" 2.1 Generating Tsys calibration table")
rm_data_dir(filename+'.tsys')
logger.info('Running gencal to make '+filename+'.tsys')
casaStuff.gencal(vis = filename,
caltable = filename+'.tsys',
caltype = 'tsys')
# 2.2 Create png plots of CASA Tsys and bandpass solution
logger.info(" 2.2 Create plots of Tsys and bandpass solution")
if doplots:
if os.path.exists('plots/'+filename+'.tsys.plots.overlayTime/'+filename+'.tsys'):
os.system('rm -Rf plots/'+filename+'.tsys.plots.overlayTime/'+filename+'.tsys')
casaStuff.plotbandpass(caltable=filename+'.tsys',
overlay='time',
xaxis='freq', yaxis='amp',
subplot=22,
buildpdf=False,
interactive=False,
showatm=True,
pwv='auto',
chanrange='92.1875%',
showfdm=True,
field='',
figfile='plots/'+filename+'.tsys.plots.overlayTime/'+filename+'.tsys')
# Create png plots for Tsys per source with antennas
es.checkCalTable(filename+'.tsys', msName=filename, interactive=False)
if os.path.exists('plots/'+filename+'.tsys.plots'):
os.system('rm -rf plots/'+filename+'.tsys.plots')
os.system('mv '+filename+'.tsys.plots'+' '+'plots/')
# 2.3 Do initial flagging
logger.info("2.3 Initial flagging, reading flags in file file_flags.py. You can modify this file to add more flags")
extract_flagging(filename, pipeline, flag_dir=flag_dir, flag_file=flag_file) # Extract flags from original ALMA calibration script (sdflag entries)
if os.path.exists(path_script+'file_flags.py'):
execfile(path_script+'file_flags.py') #<TODO><DZLIU>#
# 2.4 Create Tsys map
logger.info("2.4 Creating Tsysmaps" )
# Read spws and frquencies for science and tsys
spws_scie,spws_tsys,freq_rep_scie,freq_rep_tsys = spws_info[0:4]
#from recipes.almahelpers import tsysspwmap
tsysmap = casaStuff.tsysspwmap(vis = filename, tsystable = filename+'.tsys', trim = False)
logger.info("Spectral windows for science are: %s, %s"%(spws_scie, freq_rep_scie))
logger.info("Spectral windows for tsys are : %s, %s"%(spws_tsys, freq_rep_tsys))
logger.info("Original map between science and tsys spws: (they should have the same frequency)")
for i in range(len(spws_scie)):
logger.info('%s, %s'%(spws_scie[i],tsysmap[spws_scie[i]]))
#tsysmap = get_tsysmap(tsysmap,spws_scie,spws_tsys,freq_rep_scie,freq_rep_tsys)
spwmap = {}
for i in spws_scie:
if not tsysmap[i] in spwmap.keys():
spwmap[tsysmap[i]] = []
spwmap[tsysmap[i]].append(i)
with open(filename+'.spwmap.json', 'w') as fp:
json.dump(spwmap, fp, sort_keys=True, indent=4) # write spwmap to json file
with open('done_step_2_for_'+filename[0:-3], 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
return spwmap
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Step 3 From counts to Kelvin
#-*-*-*-*-*-*-*-*-*-*
def counts2kelvin(filename, ant_list=None, spws_info=None, spwmap=None, doplots=False):
logger.info("==================================")
logger.info("= Step 3 - From counts to Kelvin =")
logger.info("==================================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_3_for_'+filename[0:-3]):
logger.info('Found file: done_step_3_for_'+filename[0:-3]+'. Will not re-do step 3.')
return
if ant_list is None:
ant_list = []
if spwmap is None:
logger.error('Error! spwmap is not defined when calling counts2kelvin()!')
raise Exception('Error! spwmap is not defined when calling counts2kelvin()!')
if spws_info is None:
logger.error('Error! spws_info is not defined when calling counts2kelvin()!')
raise Exception('Error! spws_info is not defined when calling counts2kelvin()!')
if not os.path.isdir('plots'):
os.makedirs('plots') # folder containing all plots
logger.info("3.1 Converting data into Kelvin Ta* = Tsys * (ON-OFF)/OFF")
# Get string with needed spws to apply Tsys
spws_scie_str, spws_tsys_str, spws_all_str = str_spw_apply_tsys(spws_info)
print('filename: '+str(filename))
print('ant_list: '+str(ant_list))
print('spws_scie_str: '+str(spws_scie_str))
print('spws_tsys_str: '+str(spws_tsys_str))
print('spws_all_str: '+str(spws_all_str))
print('spwmap: '+str(spwmap))
print('doplots: '+str(doplots))
fin = fsuffix
finout = fsuffix+'.2'
if len(ant_list) == 0:
ant_list = [None]
for ant in ant_list:
if ant is not None:
filename_in = filename+'.'+ant+fin
filename_out = filename+'.'+ant+finout
else:
filename_in = filename+'.allant'+fin
filename_out = filename+'.allant'+finout
rm_data_dir(filename_out)
if precasa5:
logger.info('Running sdcal2 to make '+filename_out)
casaStuff.sdcal2(infile = filename_in,
calmode = 'ps,tsys,apply',
spw = spws_all_str,
tsysspw = spws_tsys_str,
spwmap = spwmap,
outfile = filename_out,
overwrite = True)
if doplots == True:
es.SDcheckSpectra(filename_out, spwIds=spws_scie_str, interactive=False)
else:
cp_data_dir(filename_in, filename_out)
logger.info('Running sdcal to make '+filename_out)
casaStuff.sdcal(infile = filename_out,
calmode = 'ps,tsys,apply',
spw = spws_all_str,
spwmap = spwmap,
outfile = filename_out,
overwrite = True,
)
# -- https://casa.nrao.edu/casadocs/casa-5.4.1/single-dish-calibration/single-dish-data-calibration-and-reduction
# Note that we didn't specify the Tsys spectral windows in the call to sdcal.
# For ALMA single-dish data from Cycle 3 onward, this is okay since the Tsys
# and science data share the same spectral window.
# Alternatively, the mapping between the Tsys
# and science spectral windows can be explicitly set with spwmap and spw.
# In this case, we would use:
# sdcal(infile=vis, calmode='ps,tsys,apply', spwmap={17:[17], 19:[19], 21:[21],23:[23]}, spw='17,19,21,23')
if doplots == True:
es.SDcheckSpectra(filename_out, msName=filename_out, spwIds=spws_scie_str, interactive=False)
# must use new analysisUtils.py with getCasaVersion()
# this will create plot files in directory filename_out+'.plots'
# note that these plots are uncalibrated
apply_nl = check_date_nonlinearity(filename)
if apply_nl == True:
logger.info("3.2 Applying non-linearity correction factor if data were obtained before the 2015-10-01")
if precasa5:
casaStuff.sdscale(infile = filename_out,
outfile = filename_out,
factor = 1.25,
overwrite=True)
else:
#raise Exception('Data need pre-CASA-5 version for sdscale!')
pass #<TODO># this is for debug, uncomment this!
# end for ant loop
with open('done_step_3_for_'+filename[0:-3], 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Step 4 Extract the cube including the line
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
def extract_cube(filename, source, name_line, ant_list=None, freq_rest=None, spws_info=None, vel_source=None, vel_cube=None, doplots=False, overwrite=False):
logger.info("=========================================================")
logger.info("= Step 4 - Extracting cube including the requested line =")
logger.info("=========================================================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_4_for_'+filename[0:-3]):
logger.info('Found file: done_step_4_for_'+filename[0:-3]+'. Will not re-do step 4.')
return
if ant_list is None:
ant_list = []
if freq_rest is None:
logger.error('Error! freq_rest is not defined when calling extract_cube()!')
raise Exception('Error! freq_rest is not defined when calling extract_cube()!')
if spws_info is None:
logger.error('Error! spws_info is not defined when calling extract_cube()!')
raise Exception('Error! spws_info is not defined when calling extract_cube()!')
if vel_source is None:
logger.error('Error! vel_source is not defined when calling extract_cube()!')
raise Exception('Error! vel_source is not defined when calling extract_cube()!')
if vel_cube is None:
logger.error('Error! vel_cube is not defined when calling extract_cube()!')
raise Exception('Error! vel_cube is not defined when calling extract_cube()!')
if not os.path.isdir('plots'):
os.makedirs('plots') # folder containing all plots
if not os.path.isdir('obs_lists'):
os.mkdirs('obs_lists') # folder containing all observation lists (i.e., listobs, sdlist)
# Defining extensions
fin = fsuffix+'.2'
finout = fsuffix+'.3'
if len(ant_list) == 0:
ant_list = [None]
for ant in ant_list:
if ant is not None:
filename_in = filename+'.'+ant+fin
filename_out = filename+'.'+ant+finout
else:
filename_in = filename+'.allant'+fin
filename_out = filename+'.allant'+finout
# Get the spw where the requested line is located
spw_line = get_spw_line(vel_source,freq_rest,spws_info)
logger.info("source: "+str(source))
logger.info("vel_source: "+str(vel_source))
logger.info("freq_rest: "+str(freq_rest))
logger.info("spw_line: "+str(spw_line))
# Plotting the line
if doplots:
plotfile = 'plots/'+filename_in+'.spw'+str(spw_line)+'.spec.png'
if os.path.exists(plotfile) and overwrite:
os.remove(plotfile)
if not os.path.exists(plotfile):
logger.info("4.1 Plotting each spw")
if precasa5:
logger.info('Running sdplot to make '+plotfile)
casaStuff.sdplot(infile=filename_in,
plottype='spectra', specunit='channel',
timeaverage=True, stack='p',
outfile=plotfile)
else:
logger.info('Running plotms to make '+plotfile)
casaStuff.plotms(vis=filename_in,
ydatacolumn=getDataColumnForPlotMS(filename_in),
intent='OBSERVE_TARGET#ON_SOURCE',
field=source, spw=str(spw_line),
averagedata=True, avgtime='86400', avgscan=True,
xaxis='vel', yaxis='amp', coloraxis='ant1', showlegend=True,
iteraxis='corr', xselfscale=True, xsharedaxis=True, gridrows=2,
highres=True, dpi=300, showmajorgrid=True, majorstyle='dot',
plotfile=plotfile, overwrite=True,
)
# Get the string of the channels to be extracted from the original cube
coords = read_source_coordinates(filename,source)
chan1_cube,chan2_cube = convert_vel2chan(filename,freq_rest,vel_cube,spw_line,vel_source,spws_info,coords)
spw_extr = str(spw_line)+":"+str(chan1_cube)+"~"+str(chan2_cube)
logger.info("4.2 Extracting a cube with the line")
rm_data_dir(filename_out)
if precasa5:
logger.info('Running sdsave to make '+filename_out)
casaStuff.sdsave(infile=filename_in,
field=source,
spw=spw_extr,
outfile=filename_out)
listfile = 'obs_list/'+filename_out+'.list'
if os.path.exists(listfile):
logger.info('Deleting '+listfile)
os.remove(listfile)
logger.info('Running sdlist to make '+listfile)
casaStuff.sdlist(infile=filename_out,
outfile=listfile)
else:
logger.info('Running split to make '+filename_out+', datacolumn is '+getDataColumnForSplit(filename_in))
casaStuff.split(vis=filename_in,
field=source,
spw=spw_extr,
outputvis=filename_out,
datacolumn=getDataColumnForSplit(filename_in))
listfile = 'obs_list/'+filename_out+'.listobs.txt'
if os.path.exists(listfile):
logger.info('Deleting '+listfile)
os.remove(listfile)
logger.info('Running listobs to make '+listfile)
casaStuff.listobs(vis=filename_out,
listfile=listfile)
if doplots == True:
logger.info("4.3 Plotting the line spectrum averaged in time")
if name_line != '':
name_line2 = re.sub(r'_([0-9]+kmsres)', r'_originalres', name_line)
else:
name_line2 = 'unknown'
plotfile = 'plots/'+filename_out+'.line.'+name_line2+'.spec.png'
if os.path.exists(plotfile):
os.remove(plotfile)
if precasa5:
logger.info('Running sdplot to make '+plotfile)
casaStuff.sdplot(infile=filename_out,
plottype='spectra', specunit='km/s',
restfreq=str(freq_rest)+'MHz',
timeaverage=True, stack='p',
polaverage=True,
outfile=plotfile) # no outfile?
else:
logger.info('Running plotms to make '+plotfile)
casaStuff.plotms(vis=filename_out,
ydatacolumn=getDataColumnForPlotMS(filename_out),
intent='OBSERVE_TARGET#ON_SOURCE',
restfreq=str(freq_rest)+'MHz',
averagedata=True, avgtime='86400', avgscan=True,
xaxis='vel', yaxis='amp', coloraxis='ant1', showlegend=True,
iteraxis='corr', xselfscale=True, xsharedaxis=True, gridrows=2,
highres=True, dpi=300, showmajorgrid=True, majorstyle='dot',
plotfile=plotfile, overwrite=True,
)
# end for ant loop
with open('done_step_4_for_'+filename[0:-3], 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Step 5 Baseline correction
#-*-*-*-*-*-*-*-*-*-*
def baseline(filename, source, ant_list=None, freq_rest=None, spws_info=None, vel_source=None, vel_line=None, bl_order=1, doplots=True):
logger.info("================================")
logger.info("= Step 5 - Baseline correction =")
logger.info("================================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_5_for_'+filename[0:-3]):
logger.info('Found file: done_step_5_for_'+filename[0:-3]+'. Will not re-do step 5.')
return
if ant_list is None:
ant_list = []
if freq_rest is None:
logger.error('Error! freq_rest is not defined when calling baseline()!')
raise Exception('Error! freq_rest is not defined when calling baseline()!')
if spws_info is None:
logger.error('Error! spws_info is not defined when calling baseline()!')
raise Exception('Error! spws_info is not defined when calling baseline()!')
if vel_source is None:
logger.error('Error! vel_source is not defined when calling baseline()!')
raise Exception('Error! vel_source is not defined when calling baseline()!')
if vel_line is None:
logger.error('Error! vel_line is not defined when calling baseline()!')
raise Exception('Error! vel_line is not defined when calling baseline()!')
if not os.path.isdir('plots'):
os.makedirs('plots') # folder containing all plots
# Definition of extension
fin = fsuffix+'.3'
finout = fsuffix+'.4'
if len(ant_list) == 0:
ant_list = [None]
for ant in ant_list:
if ant is not None:
filename_in = filename+'.'+ant+fin
filename_out = filename+'.'+ant+finout
else:
filename_in = filename+'.allant'+fin
filename_out = filename+'.allant'+finout
# Extract the ID of the spw where the line is
spw_line = get_spw_line(vel_source,freq_rest,spws_info)
# Convert the velocity range in channels and get spw string for baseline fitting
coords = read_source_coordinates(filename,source)
spw_extr = str_spw4baseline(filename_in,freq_rest,vel_line,spw_line,coords)
# Subtracting the baseline
rm_data_dir(filename_out)
logger.info('Running sdbaseline to make '+filename_out+', spw = '+str(spw_extr)+', order = '+str(bl_order))
casaStuff.sdbaseline(infile = filename_in,
datacolumn = getDataColumnForSDBaseline(filename_in),
spw = spw_extr,
maskmode = 'list',
blfunc = 'poly',
order = bl_order,
outfile = filename_out,
overwrite = True)
if doplots:
# PLotting the result from the baseline correction. Spectra avergarfed in time
plotfile = 'plots/'+filename_out+'_baseline_corrected.png'
if os.path.exists(plotfile):
os.remove(plotfile)
if precasa5:
logger.info('Running sdplot to make '+plotfile)
casaStuff.sdplot(infile=filename_out,
plottype='spectra',
specunit='km/s',
restfreq=str(freq_rest)+'MHz',
timeaverage=True,
stack='p',
outfile=plotfile,
polaverage=True)
else:
logger.info('Running plotms to make '+plotfile)
casaStuff.plotms(vis=filename_out,
ydatacolumn=getDataColumnForPlotMS(filename_out),
intent='OBSERVE_TARGET#ON_SOURCE',
restfreq=str(freq_rest)+'MHz',
averagedata=True, avgtime='86400', avgscan=True,
xaxis='vel', yaxis='amp', coloraxis='ant1', showlegend=True,
iteraxis='corr', xselfscale=True, xsharedaxis=True, gridrows=2,
highres=True, dpi=300, showmajorgrid=True, majorstyle='dot',
plotfile=plotfile, overwrite=True,
)
os.system('mv *blparam.txt obs_lists/')
# end for ant loop
with open('done_step_5_for_'+filename[0:-3], 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Step 6 Concatenate antennas
#-*-*-*-*-*-*-*-*-*-*
def concat_ants(filename, ant_list=None, freq_rest=None, spws_info=None, vel_source=None, pipeline=True):
logger.info("========================================================")
logger.info("= Step 6 - Concatenate antennas and K to Jy conversion =")
logger.info("========================================================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_6_for_'+filename[0:-3]):
logger.info('Found file: done_step_6_for_'+filename[0:-3]+'. Will not re-do step 6.')
return
if ant_list is None:
ant_list = []
if freq_rest is None:
logger.error('Error! freq_rest is not defined when calling extract_cube()!')
raise Exception('Error! freq_rest is not defined when calling extract_cube()!')
if spws_info is None:
logger.error('Error! spws_info is not defined when calling extract_cube()!')
raise Exception('Error! spws_info is not defined when calling extract_cube()!')
if vel_source is None:
logger.error('Error! vel_source is not defined when calling extract_cube()!')
raise Exception('Error! vel_source is not defined when calling extract_cube()!')
# Defining extensions
fin = fsuffix+'.4'
finout = '.ms.5'
# check antenna list
#if len(ant_list) == 0:
# ant_list = [None]
# prepare antenna list to concate
#lis_fils = [f for f in os.listdir(".") if (f.endswith(fin) and f.startswith(filename))]
#vec_As = [f[f.find(filename)+len(filename)+1:f.rfind(fin)] for f in lis_fils]
if len(ant_list) > 0:
lis_fils = []
for ant in ant_list:
filename_in = filename+'.'+ant+fin
filename_out = filename+'.'+ant+finout
rm_data_dir(filename_out)
if precasa5:
# Converting from ASAP to MS
logger.info("6.1 Converting from ASAP to MS")
logger.info('Running sdsave to make '+filename_out)
casaStuff.sdsave(infile = filename_in,
outfile = filename_out,
outform='MS2')
else:
cp_data_dir(filename_in, filename_out) # they are all *.ms, just copy it over
lis_fils.append(filename_out)
# Concatenation
logger.info("6.2 Concatenating antennas")
#lis_fils = [f for f in os.listdir(".") if f.endswith('.ms.5') and f.startswith(filename)]
rm_data_dir(filename+'.cal')
logger.info('Running concat to make '+filename+'.cal')
casaStuff.concat(vis = lis_fils, concatvis = filename+'.cal')
else:
filename_in = filename+'.allant'+fin
filename_out = filename+'.allant'+finout
cp_data_dir(filename_in, filename_out)
cp_data_dir(filename_out, filename+'.cal')
# Convert the Science Target Units from Kelvin to Jansky
logger.info("6.3 Convert the Science Target Units from Kelvin to Jansky")
spw_line = get_spw_line(vel_source, freq_rest, spws_info) # get the original spw ID
jyperk = extract_jyperk(filename, spw_line, pipeline)
cp_data_dir(filename+'.cal', filename+'.cal.jy')
logger.info('Running scaleAutocorr on '+filename+'.cal.jy')
for ant in jyperk.keys():
logger.info('ant: %s, spw_line: %s, jyperk[ant][spw_line][\'mean\']: %s'%(ant, spw_line, jyperk[ant][spw_line]['mean']))
if precasa5:
scaleAutocorr(vis=filename+'.cal.jy', scale=jyperk[ant][spw_line]['mean'], antenna=ant, spw=spw_line) # in asap spw number does not change after split?
else:
scaleAutocorr(vis=filename+'.cal.jy', scale=jyperk[ant][spw_line]['mean'], antenna=ant, spw=0) # spw is always 0
# Rename line spw to spw=0
logger.info("6.4 Renaming spw of line "+str(spw_line)+" to 0")
fin = '.cal.jy'
finout = '.cal.jy.tmp'
cp_data_dir(filename+fin, filename+finout)
fin = '.cal.jy.tmp'
finout = '.cal.jy'
rm_data_dir(filename+finout)
logger.info('Running split to make '+filename+finout+', datacolumn is '+getDataColumnForSplit(filename+fin))
if precasa5:
casaStuff.split(vis=filename+fin,
outputvis=filename+finout,
datacolumn='all')
else:
casaStuff.split(vis=filename+fin,
outputvis=filename+finout,
datacolumn=getDataColumnForSplit(filename+fin))
# listobs
if os.path.exists(filename+finout+'.listobs.txt'):
os.remove(filename+finout+'.listobs.txt')
casaStuff.listobs(vis=filename+finout, listfile=filename+finout+'.listobs.txt')
with open('done_step_6_for_'+filename[0:-3], 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Step 7 - Imaging
#-*-*-*-*-*-*-*-*-*-*
def imaging(source, name_line, phcenter, vel_source, source_vel_kms, vwidth_kms, chan_dv_kms, freq_rest_im,
joint_imaging_dir='', doplots=False):
logger.info("====================")
logger.info("= Step 7 - Imaging =")
logger.info("====================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_7'):
logger.info('Found file: done_step_7. Will not re-do step 7.')
return
#if checkdir(os.getcwd(),path_galaxy) == False:
# os.chdir('../'+path_galaxy+'calibration')
fwhmfactor = 1.13 # Factor to estimate the ALMA theoretical beam
diameter = 12 # Diameter of ALMA antennas in meters
# Search for files already calibrated
path = '.'
Msnames = [f for f in os.listdir(path) if f.endswith('.cal.jy')]
if doplots:
plotfile = True
else:
plotfile = ''
# If 2 SGs have to be imaged together, look for *cal.jy files for the second part of the galaxy
if joint_imaging_dir != '':
logger.info("Two Science goals are considerated to create the final image of the galaxy "+source)
path2 = joint_imaging_dir # ori_path+'/../'+path_galaxy2+'calibration/' # <TODO> for NGC4254b NGC4321b NGC3627b
logger.info('PATH to 2nd part of the galaxy '+path2)
Msnames2 = [path2+f for f in os.listdir(path2) if f.endswith('.cal.jy')]
Msnames = Msnames+Msnames2
logger.info('Msnames: %s'%(Msnames))
# Definition of parameters for imaging
xSampling, ySampling, maxsize = au.getTPSampling(Msnames[0], showplot=False, plotfile=plotfile) # plot will be saved as vis+'.obsid%d.sampling.png' % (obsid) in default
# Read frequency
#msmd.open(Msnames[0])
#freq = msmd.meanfreq(0)
#msmd.close()
mymsmd = au.createCasaTool(casaStuff.msmdtool)
mymsmd.open(Msnames[0])
freq = mymsmd.meanfreq(0)
mymsmd.close()
logger.info("Reading frequency in image: "+str(freq))
# Coordinate of phasecenter read from the data or used as input
if phcenter == False:
coord_phase = read_source_coordinates(Msnames[0],source)
logger.info("Coordinate of phasecenter, read from the data: ")
logger.info(str(coord_phase))
else:
logger.info("Coordinate of phasecenter entered by the user: ")
coord_phase = phcenter
logger.info(str(coord_phase))
# Source velocity for imaging, read from the data or used as input
if source_vel_kms == False:
source_vel_kms = vel_source
logger.info("Velocity of source used for imaging read from the data: ")
logger.info(str(source_vel_kms))
else:
logger.info("Velocity of source used for imaging entered by the user: ")
source_vel_kms = source_vel_kms
logger.info(str(source_vel_kms))
theorybeam = fwhmfactor*c_light*1e3/freq/diameter*180/pi*3600
cell = theorybeam/9.0
if 'factorim' in globals():
imsize = int(round(maxsize/cell)*factorim)
else:
imsize = int(round(maxsize/cell)*1.5)
start_vel = source_vel_kms-vwidth_kms/2
nchans_vel = int(round(vwidth_kms/chan_dv_kms))
if os.path.exists('ALMA_TP.'+source+'.'+name_line+'.image'):
shutil.rmtree('ALMA_TP.'+source+'.'+name_line+'.image')
logger.info("Start imaging")
logger.info("Imaging from velocity "+str(start_vel)+", using "+str(nchans_vel)+" channels.")
logger.info("Rest frequency is "+str(freq_rest_im)+" GHz.")
logger.info("Cell and image sizes are: "+str(cell)+"arcsec and "+str(imsize))
logger.info('Msnames: %s'%(Msnames))
casaStuff.sdimaging(infiles = Msnames,
mode = 'velocity',
nchan = nchans_vel,
width = str(chan_dv_kms)+'km/s',
start = str(start_vel)+'km/s',
veltype = "radio",
outframe = 'LSRK',
restfreq = str(freq_rest_im)+'GHz',
gridfunction = 'SF',
convsupport = 6,
phasecenter = coord_phase,
imsize = imsize,
cell = str(cell)+'arcsec',
overwrite = True,
outfile = 'ALMA_TP.'+source+'.'+name_line+'.image')
# Correct the brightness unit in the image header
casaStuff.imhead(imagename = 'ALMA_TP.'+source+'.'+name_line+'.image',
mode = 'put',
hdkey = 'bunit',
hdvalue = 'Jy/beam')
# Add Restoring Beam Header Information to the Science Image
minor, major, fwhmsfBeam, sfbeam = au.sfBeam(frequency=freq*1e-9,
pixelsize=cell,
convsupport=6,
img=None, #to use Gaussian theorybeam
stokes='both',
xSamplingArcsec=xSampling,
ySamplingArcsec=ySampling,
fwhmfactor=fwhmfactor,
diameter=diameter)
#ia.open('ALMA_TP.'+source+'.'+name_line+'.image')
#ia.setrestoringbeam(major = str(sfbeam)+'arcsec', minor = str(sfbeam)+'arcsec', pa = '0deg')
#ia.done()
myia = au.createCasaTool(casaStuff.iatool)
myia.open('ALMA_TP.'+source+'.'+name_line+'.image')
myia.setrestoringbeam(major = str(sfbeam)+'arcsec', minor = str(sfbeam)+'arcsec', pa = '0deg')
myia.close()
if doplots == True:
casaStuff.viewer('ALMA_TP.'+source+'.'+name_line+'.image')
with open('done_step_7', 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Step 8 - Export fits file
#-*-*-*-*-*-*-*-*-*-*-*-*-*
def export_fits(name_line, source, output_file):
logger.info("========================")
logger.info("= Step 8 - Export FITS =")
logger.info("========================")
logger.info("Current directory: "+os.getcwd())
if os.path.exists('done_step_8'):
logger.info('Found file: done_step_8. Will not re-do step 8.')
return
#if os.path.isdir(ori_path+'/'+path_dataproduct) == False:
# os.system('mkdir '+ori_path+'/'+path_dataproduct) # folder containing all plots
#if checkdir(os.getcwd(),path_galaxy) == False:
# os.chdir('../'+path_galaxy+'calibration')
#
imagename = 'ALMA_TP.'+source+'.'+name_line+'.image'
weightname = 'ALMA_TP.'+source+'.'+name_line+'.image.weight'
imagefile = imagename + '.fits'
weightfile = weightname + '.fits'
# Export to fits file
if os.path.exists(imagefile):
os.system('rm -Rf '+imagefile)
if os.path.exists(weightfile):
os.system('rm -Rf '+weightfile)
casaStuff.exportfits(imagename = imagename,
fitsimage = imagefile)
casaStuff.exportfits(imagename = weightname,
fitsimage = weightfile)
logger.info('> Exported FITS to "%s"'%(imagefile))
logger.info('> Exported FITS to "%s"'%(weightfile))
shutil.copy2(imagefile, output_file)
logger.info('> Copied FITS to "%s"'%(output_file))
with open('done_step_8', 'w') as outlogfile:
outlogfile.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ' ' + time.strftime('%Z'))
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
# Main body TP ALMA data reduction.
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
def run_ALMA_TP_tools(
path_galaxy = '',
flag_file = '',
doplots = True,
dosplitants = True,
bl_order = 1,
source = '',
freq_rest = np.nan,
vel_cube = '',
vel_line = '',
phase_center = '',
source_vel_kms = np.nan,
vwidth_kms = np.nan,
chan_dv_kms = np.nan,
freq_rest_im = np.nan,
name_line = '',
output_file = '',
do_step = [],
EBexclude = None,
):
if path_galaxy == '' or source == '' or np.isnan(freq_rest) or np.isnan(source_vel_kms) or np.isnan(vwidth_kms) \
or np.isnan(chan_dv_kms) or np.isnan(freq_rest_im) or name_line == '' or output_file == '':
logger.info('Error! Invalid input arguments when calling run_ALMA_TP_tools.')
return
path_calibration = os.path.join(path_galaxy, 'calibration')
logger.info("==================================")
logger.info(" Starting TP ALMA data reduction ")
logger.info("==================================")
logger.info("> You are executing the ALMA-TP-pipeline script from the directory: ")
logger.info(" "+os.getcwd())
ori_path = os.getcwd() # Current directory
#checktmp() # check if the tmp folder exists. If not, do it and copy the data.
#print("> Changing directory to "+path_galaxy+'calibration'+"\n")
#os.chdir('../'+path_galaxy+'calibration') # Working on the calibration folder of the current galaxy
logger.info("> Changing directory to "+os.path.join(path_galaxy,'calibration'))
os.chdir(os.path.join(path_galaxy,'calibration')) # Working on the calibration folder of the current galaxy
pipeline = checkpipeline() # Pipeline reduced data (True or False)
# Defining Execution Blocks (EBS) names
EBsnames = [f for f in os.listdir(path_raw) if f.endswith('.asdm.sdm')]
#if 'EBexclude' in globals():
if EBexclude is not None:
if np.isscalar(EBexclude):
EBexclude = [EBexclude]
EBsnames = [s for s in EBsnames if s[0:-9] not in EBexclude]
if len(do_step) == 0:
do_step = [1,2,3,4,5,6,7,8]
# Do data reduction for each EB
for EBs in EBsnames:
#
if pipeline == False:
EBs = EBs.replace('.ms.scriptForSDCalibration.py', '.asdm.sdm')
filename = 'u'+re.search('u(.+?).asdm.sdm', EBs).group(1)+'.ms'
file_exists = check_exists(filename) # Check weather the raw data exists
#
if file_exists == True:
if 1 in do_step:
import_and_split_ant(filename,
doplots=doplots,
dosplitants=dosplitants) # Import and split data per antenna
source = get_sourcename(filename) # read the source name directly from the ms
vec_ants_t = read_ants_names(filename) # Read vector with name of all antennas
vec_ants = [s for s in vec_ants_t if any(xs in s for xs in ['PM','DV'])] # Get only 12m antennas.
vel_source = read_vel_source(filename,source) # Read source velocity
spws_info = read_spw(filename,source) # Read information of spws (science and Tsys)
#
if 2 in do_step:
spwmap = gen_tsys_and_flag(filename, spws_info, pipeline,
flag_dir=os.path.join(ori_path, 'galaxy-specific-scripts', 'flags-folder'),
flag_file='',
doplots=doplots)
#
if not dosplitants:
if not precasa5:
vec_ants = None
#
if 3 in do_step:
counts2kelvin(filename, ant_list=vec_ants,
spws_info=spws_info, spwmap=spwmap, doplots=doplots)
#
if 4 in do_step:
extract_cube(filename, source, name_line, ant_list=vec_ants,
freq_rest=freq_rest, spws_info=spws_info, vel_source=vel_source, vel_cube=vel_cube, doplots=doplots)
#
if 5 in do_step:
baseline(filename, source, ant_list=vec_ants,
freq_rest=freq_rest, spws_info=spws_info, vel_source=vel_source, vel_line=vel_line, bl_order=bl_order,
doplots=doplots)
#
if 6 in do_step:
# concat ants and convert flux unit to Jy
concat_ants(filename, ant_list=vec_ants,
freq_rest=freq_rest, spws_info=spws_info, vel_source=vel_source, pipeline=pipeline)
#
#
vel_source = read_vel_source(filename, source)
#
if 7 in do_step:
imaging(source, name_line, phase_center, vel_source, source_vel_kms, vwidth_kms, chan_dv_kms, freq_rest_im, doplots=doplots)
#
if 8 in do_step:
export_fits(name_line, source, output_file)
#
logger.info("> Changing directory to "+ori_path+'')
os.chdir(ori_path)
| 43.013721 | 183 | 0.591265 |
7984bc9a71a72a18e834f1a4401b7236157d8406 | 7,178 | py | Python | ssshare/ss/parse.py | 1nfty/ShadowSocksShare | f6ccb71b8084b6ae3add774abfb039438bb08530 | [
"Apache-2.0"
] | 1 | 2020-05-13T17:50:02.000Z | 2020-05-13T17:50:02.000Z | ssshare/ss/parse.py | 1nfty/ShadowSocksShare | f6ccb71b8084b6ae3add774abfb039438bb08530 | [
"Apache-2.0"
] | null | null | null | ssshare/ss/parse.py | 1nfty/ShadowSocksShare | f6ccb71b8084b6ae3add774abfb039438bb08530 | [
"Apache-2.0"
] | 2 | 2020-02-07T06:59:33.000Z | 2020-09-11T07:34:24.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import zbar
import requests
from PIL import Image
from io import BytesIO
from numpy import array, uint8
import base64
import urllib
import json
import logging
scanner = zbar.Scanner()
def decode(string):
try:
return str(
base64.urlsafe_b64decode(
bytes(
string.strip('/') + (4 - len(string.strip('/')) % 4) * '=' + '====',
'utf-8')), 'utf-8')
except Exception as e:
print(e, string)
raise Exception(e, string)
def encode(decoded):
return base64.urlsafe_b64encode(
bytes(str(decoded), 'utf-8')).decode('utf-8').replace('=', '')
def parse(uri, default_title='untitled'):
server = dict()
stripped = re.sub('ssr?://', '', uri)
if uri[2] == ':':
# ss
if '#' in uri:
stripped, remarks = stripped.split('#')[:2]
server['remarks'] = urllib.parse.unquote(remarks)
else:
server['remarks'] += 'SSR'
decoded = decode(stripped)
data = decoded.split('@', maxsplit=1)
server['method'], server['password'] = data[0].split(':', maxsplit=1)
server['server'], server['server_port'] = data[1].rsplit(':', maxsplit=1)
elif uri[2] == 'r':
# ssr
decoded = decode(stripped)
data = decoded.split('/?')
[
server['server'],
server['server_port'],
server['ssr_protocol'],
server['method'],
server['obfs'],
password_enc,
] = data[0].rsplit(':', maxsplit=5)
server['password'] = decode(password_enc)
server['remarks'] += 'SSR'
if len(data) > 1:
appendix = data[1].split('&')
content = {i.split('=')[0]: i.split('=')[1] for i in appendix}
for key in content:
server[key] = decode(content[key])
if server['ssr_protocol'] != 'origin' and server['obfs'] != 'plain':
server['remarks'] += 'SSR'
return server
def scanNetQR(img_url, headers=None):
if img_url.startswith('http'):
img_bytes = requests.get(img_url, headers=headers).content
elif img_url.startswith('data:image'):
img_bytes = base64.decodebytes(bytes(img_url.split(',')[1], 'utf-8'))
img = array(Image.open(BytesIO(img_bytes)))
info = scanner.scan(img.astype(uint8) * 255) + scanner.scan((1 - img).astype(uint8) * 255)
if len(info) == 0:
raise ValueError('scanner fail to identify qr code')
return info[0].data.decode('utf-8')
def get_href(string, pattern='.*'):
found = re.findall(r'(?<=<a\s+href=")[^"]+(?=">%s</a>)' % pattern, string)
if found:
return found[0]
def gen_uri(servers):
'''{
"server": server['server'],
"server_ipv6": "::",
"server_port": int(server['server_port']),
"local_address": "127.0.0.1",
"local_port": 1080,
"password": server['password'],
"timeout": 300,
"udp_timeout": 60,
"method": method,
"protocol": ssr_protocol,
"protocol_param": "",
"obfs": obfs,
"obfs_param": "",
"fast_open": False,
"workers": 1,
"group": "rea11y.best"
},'''
result_servers = list()
for server in servers:
if 'password' not in server:
server['password'] = ''
try:
for key in ['method', 'password', 'server', 'server_port']:
assert key in server, '{key} not in server data'.format(key)
for k, v in (('ssr_protocol', 'origin'), ('obfs', 'plain')):
if k in server and server[k] == v:
server.pop(k)
is_ss = 'ssr_protocol' not in server and 'obfs' not in server
if is_ss:
# if not completed, it's ss
decoded = '{method}:{password}@{hostname}:{port}'.format(
method=server['method'],
password=server['password'],
hostname=server['server'],
port=server['server_port'],
)
ss_uri = 'ss://{}#{}'.format(
str(base64.urlsafe_b64encode(bytes(decoded, encoding='utf8')), encoding='utf-8'),
urllib.parse.quote(server['remarks'])
)
# ssr formatted account info
ssr_decoded = ':'.join([
server['server'],
server['server_port'],
'origin',
server['method'],
'plain',
encode(server['password']),
])
ssr_decoded += '/?remarks={remarks}&group={group}'.format(
remarks=encode(server['remarks']),
group=encode("rea11y"),
)
ssr_uri = 'ssr://{endoced}'.format(
endoced=encode(ssr_decoded)
)
else:
decoded_head = ':'.join([str(i) for i in [
server['server'],
server['server_port'],
server.get('ssr_protocol', 'origin'),
server['method'],
server.get('obfs', 'plain'),
encode(server['password'])
]])
appendix = [(key, server[key]) for key in ['obfsparam', 'protoparam', 'remarks'] if key in server]
appendix.append(('group', 'rea11y'))
appendix_str = '&'.join(['{key}={val}'.format(
key=item[0],
val=encode(item[1])
) for item in appendix])
decoded = '/?'.join([decoded_head, appendix_str])
ss_uri = 'ssr://{endoced}'.format(endoced=encode(decoded))
ssr_uri = ss_uri
server['uri'] = ss_uri
server['ssr_uri'] = ssr_uri
server['decoded_url'] = urllib.parse.unquote(ss_uri)
server_data_to_json = {
"server": server['server'],
"server_ipv6": "::",
"server_port": int(server['server_port']),
"local_address": "127.0.0.1",
"local_port": 1080,
"password": server['password'],
"group": "rea11y"
}
if 'ssr_protocol' in server:
server['protocol'] = server['ssr_protocol']
for key in ['obfs', 'method', 'protocol', 'obfsparam', 'protoparam', 'udpport', 'uot']:
if key in server:
server_data_to_json[key] = server.get(key)
server['json'] = json.dumps(
server_data_to_json,
ensure_ascii=False,
indent=2,
)
result_servers.append(server)
except (KeyError, EOFError, ValueError) as e:
logging.exception(e, stack_info=True)
return result_servers
| 35.014634 | 114 | 0.482586 |
c4d6a6042469330b4446c0f598d42fa012b67d22 | 1,776 | py | Python | liter/writer.py | ChenchaoZhao/TorchLiter | 4ca875f3d7c11d034f9ab69b48bbbccb3cd2c913 | [
"MIT"
] | 3 | 2021-06-26T16:05:39.000Z | 2021-07-21T18:22:12.000Z | liter/writer.py | ChenchaoZhao/TorchLiter | 4ca875f3d7c11d034f9ab69b48bbbccb3cd2c913 | [
"MIT"
] | 3 | 2021-03-11T01:32:35.000Z | 2021-03-22T17:28:29.000Z | liter/writer.py | ChenchaoZhao/TorchLiter | 4ca875f3d7c11d034f9ab69b48bbbccb3cd2c913 | [
"MIT"
] | null | null | null | import csv
import os
from . import REPR_INDENT
__all__ = ["CSVWriter"]
class CSVWriter:
"""CSV writer."""
def __init__(self, path, columns, delimiter=","):
self.path = path
self.path_exists = os.path.exists(path)
self.delimiter = delimiter
if self.path_exists:
with open(path, "r") as f:
header = next(csv.reader(f, delimiter=self.delimiter))
if set(header) == set(columns):
self.columns = header
self.write_header = False
else:
raise ValueError(
"Header in file is inconsistent with columns: "
f"header: {header}; columns {columns}"
)
else:
self.columns = columns
self.write_header = True
self.file = None
self.writer = None
def open(self):
mode = "a+" if self.path_exists else "w"
self.file = open(self.path, mode, buffering=1)
self.writer = csv.DictWriter(
f=self.file, fieldnames=self.columns, delimiter=self.delimiter
)
if self.write_header:
self.writer.writeheader()
def close(self):
self.file.close()
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write_row(self, row_dict):
self.writer.writerow(row_dict)
def __call__(self, row_dict):
self.write_row(row_dict)
def __repr__(self):
out = []
out.append(self.__class__.__name__)
out.append(" " * REPR_INDENT + f"filepath: {self.path}")
out.append(" " * REPR_INDENT + f"columns: {self.columns}")
return "\n".join(out)
| 26.909091 | 74 | 0.556869 |
d29ff363862bc8b7124f00dacbad38a2eb1530a2 | 215 | py | Python | survey/surveys/metadata/__init__.py | vahndi/quant-survey | 1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f | [
"MIT"
] | 2 | 2021-04-10T21:50:36.000Z | 2022-03-26T16:46:52.000Z | survey/surveys/metadata/__init__.py | vahndi/quant-survey | 1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f | [
"MIT"
] | 11 | 2020-08-30T18:47:14.000Z | 2021-09-09T15:57:19.000Z | survey/surveys/metadata/__init__.py | vahndi/quant-survey | 1a0fcf0c22e2c7306cba0218f82d24c97d28ee1f | [
"MIT"
] | null | null | null | from survey.surveys.metadata.attribute_metadata import AttributeMetadata
from survey.surveys.metadata.category_metadata import CategoryMetadata
from survey.surveys.metadata.question_metadata import QuestionMetadata
| 53.75 | 72 | 0.902326 |
99fe6bc5e46ee4011347db1a3e694bf30cff9156 | 6,214 | py | Python | napari/utils/notifications.py | ianhi/napari | c473058dae3b665b98dc9fed28c310b6d660d063 | [
"BSD-3-Clause"
] | null | null | null | napari/utils/notifications.py | ianhi/napari | c473058dae3b665b98dc9fed28c310b6d660d063 | [
"BSD-3-Clause"
] | null | null | null | napari/utils/notifications.py | ianhi/napari | c473058dae3b665b98dc9fed28c310b6d660d063 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import os
import sys
import warnings
from datetime import datetime
from enum import auto
from types import TracebackType
from typing import Callable, List, Optional, Sequence, Tuple, Type, Union
from .events import Event, EventEmitter
from .misc import StringEnum
class NotificationSeverity(StringEnum):
"""Severity levels for the notification dialog. Along with icons for each."""
ERROR = auto()
WARNING = auto()
INFO = auto()
DEBUG = auto()
def as_icon(self):
return {
self.ERROR: "ⓧ",
self.WARNING: "⚠️",
self.INFO: "ⓘ",
self.DEBUG: "🐛",
}[self]
ActionSequence = Sequence[Tuple[str, Callable[[], None]]]
class Notification(Event):
"""A Notifcation event. Usually created by :class:`NotificationManager`.
Parameters
----------
message : str
The main message/payload of the notification.
severity : str or NotificationSeverity, optional
The severity of the notification, by default
`NotificationSeverity.WARNING`.
actions : sequence of tuple, optional
Where each tuple is a `(str, callable)` 2-tuple where the first item
is a name for the action (which may, for example, be put on a button),
and the callable is a callback to perform when the action is triggered.
(for example, one might show a traceback dialog). by default ()
"""
def __init__(
self,
message: str,
severity: Union[
str, NotificationSeverity
] = NotificationSeverity.WARNING,
actions: ActionSequence = (),
**kwargs,
):
self.severity = NotificationSeverity(severity)
super().__init__(type=str(self.severity).lower(), **kwargs)
self.message = message
self.actions = actions
# let's store when the object was created;
self.date = datetime.now()
@classmethod
def from_exception(cls, exc: BaseException, **kwargs) -> 'Notification':
return ErrorNotification(exc, **kwargs)
@classmethod
def from_warning(cls, warning: Warning, **kwargs) -> 'Notification':
return WarningNotification(warning, **kwargs)
class ErrorNotification(Notification):
exception: BaseException
def __init__(self, exception: BaseException, *args, **kwargs):
msg = getattr(exception, 'message', str(exception))
actions = getattr(exception, 'actions', ())
super().__init__(msg, NotificationSeverity.ERROR, actions)
self.exception = exception
class WarningNotification(Notification):
warning: Warning
def __init__(self, warning: Warning, *args, **kwargs):
msg = getattr(warning, 'message', str(warning))
actions = getattr(warning, 'actions', ())
super().__init__(msg, NotificationSeverity.WARNING, actions)
self.warning = warning
class NotificationManager:
"""
A notification manager, to route all notifications through.
Only one instance is in general available through napari; as we need
notification to all flow to a single location that is registered with the
sys.except_hook and showwarning hook.
This can and should be used a context manager; the context manager will
properly re-entered, and install/remove hooks and keep them in a stack to
restore them.
While it might seem unnecessary to make it re-entrant; or to make the
re-entrancy no-op; one need to consider that this could be used inside
another context manager that modify except_hook and showwarning.
Currently the original except and show warnings hooks are not called; but
this could be changed in the future; this poses some questions with the
re-entrency of the hooks themselves.
"""
records: List[Notification]
_instance: Optional[NotificationManager] = None
def __init__(self) -> None:
self.records: List[Notification] = []
self.exit_on_error = os.getenv('NAPARI_EXIT_ON_ERROR') in ('1', 'True')
self.notification_ready = self.changed = EventEmitter(
source=self, event_class=Notification
)
self._originals_except_hooks = []
self._original_showwarnings_hooks = []
def __enter__(self):
self.install_hooks()
return self
def __exit__(self, *args, **kwargs):
self.restore_hooks()
def install_hooks(self):
"""
Install a sys.excepthook and a showwarning hook to display any message
in the UI, storing the previous hooks to be restored if necessary
"""
self._originals_except_hooks.append(sys.excepthook)
sys.excepthook = self.receive_error
self._original_showwarnings_hooks.append(warnings.showwarning)
warnings.showwarning = self.receive_warning
def restore_hooks(self):
"""
Remove hooks installed by `install_hooks` and restore previous hooks.
"""
sys.excepthook = self._originals_except_hooks.pop()
warnings.showwarning = self._original_showwarnings_hooks.pop()
def dispatch(self, notification: Notification):
self.records.append(notification)
self.notification_ready(notification)
def receive_error(
self,
exctype: Type[BaseException],
value: BaseException,
traceback: TracebackType,
):
if isinstance(value, KeyboardInterrupt):
sys.exit("Closed by KeyboardInterrupt")
if self.exit_on_error:
sys.__excepthook__(exctype, value, traceback)
sys.exit("Exit on error")
try:
self.dispatch(Notification.from_exception(value))
except Exception:
pass
def receive_warning(
self,
message: Warning,
category: Type[Warning],
filename: str,
lineno: int,
file=None,
line=None,
):
self.dispatch(Notification.from_warning(message))
def receive_info(self, message: str):
self.dispatch(Notification(message, 'INFO'))
notification_manager = NotificationManager()
def show_info(message: str):
notification_manager.receive_info(message)
| 31.383838 | 82 | 0.66318 |
5b1fe7efec32b3472a56ae919df82392910ae281 | 1,757 | py | Python | python/katana/example_utils.py | hagabb/katana | a52a688b90315a79aa95cf8d279fd7f949a3b94b | [
"BSD-3-Clause"
] | null | null | null | python/katana/example_utils.py | hagabb/katana | a52a688b90315a79aa95cf8d279fd7f949a3b94b | [
"BSD-3-Clause"
] | null | null | null | python/katana/example_utils.py | hagabb/katana | a52a688b90315a79aa95cf8d279fd7f949a3b94b | [
"BSD-3-Clause"
] | null | null | null | import os
import shutil
import tarfile
import urllib.request
from pathlib import Path
__all__ = ["get_input"]
def get_inputs_directory(*, invalidate=False):
inputs_dir = None
# Use the build paths if they exist.
paths_to_check = list(Path(__file__).parents) + list(Path.cwd().parents)
for path in paths_to_check:
# TODO(amp): If we can abstract the input version info a shared file, this should look for
# specifically that version.
ci_inputs_path = path / "inputs" / "current"
if ci_inputs_path.is_dir():
inputs_dir = ci_inputs_path
# Otherwise use a cache directory
if not inputs_dir:
inputs_dir = Path(os.environ["HOME"]) / ".cache" / "katana" / "inputs"
if inputs_dir.is_dir() and (inputs_dir / "propertygraphs" / "ldbc_003").is_dir():
if not invalidate:
return inputs_dir
try:
shutil.rmtree(inputs_dir)
except OSError:
inputs_dir.unlink()
inputs_dir.mkdir(parents=True, exist_ok=True)
fn, _headers = urllib.request.urlretrieve(
"https://katana-ci-public.s3.us-east-1.amazonaws.com/inputs/katana-inputs-v19.tar.gz"
)
try:
with tarfile.open(fn) as tar:
tar.extractall(inputs_dir)
finally:
os.unlink(fn)
return inputs_dir
def get_input(rel_path):
"""
Download the standard Galois inputs (with local caching on disk) and return a path to a file in that archive.
>>> from katana.property_graph import PropertyGraph
... graph = PropertyGraph(get_input("propertygraphs/ldbc_003"))
"""
path = get_inputs_directory() / rel_path
if path.exists():
return path
return get_inputs_directory(invalidate=True) / rel_path
| 33.150943 | 113 | 0.663062 |
73eda3e81c28303bd0a00d715d0829aedc934182 | 815 | py | Python | main_app/migrations/0007_auto_20211031_2021.py | mdhowey/seagull | 8aabaf1b63966d3f1f76806536e0c2d78102ed66 | [
"Apache-2.0"
] | null | null | null | main_app/migrations/0007_auto_20211031_2021.py | mdhowey/seagull | 8aabaf1b63966d3f1f76806536e0c2d78102ed66 | [
"Apache-2.0"
] | null | null | null | main_app/migrations/0007_auto_20211031_2021.py | mdhowey/seagull | 8aabaf1b63966d3f1f76806536e0c2d78102ed66 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-31 20:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0006_user'),
]
operations = [
migrations.DeleteModel(
name='User',
),
migrations.AlterModelOptions(
name='product',
options={'ordering': ['name']},
),
migrations.AddField(
model_name='product',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
| 26.290323 | 108 | 0.614724 |
74c24ef582bcc6c29b7330c653208928b3f3b926 | 3,195 | py | Python | examples/basic_operations/update_ad_group.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | examples/basic_operations/update_ad_group.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | null | null | null | examples/basic_operations/update_ad_group.py | andy0937/google-ads-python | cb5da7f4a75076828d1fc3524b08cc167670435a | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | #!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates an ad group.
To get ad groups, run get_ad_groups.py.
"""
import argparse
import sys
import google.ads.google_ads.client
from google.api_core import protobuf_helpers
def main(client, customer_id, ad_group_id, bid_micro_amount):
ad_group_service = client.get_service('AdGroupService', version='v3')
# Create ad group operation.
ad_group_operation = client.get_type('AdGroupOperation', version='v3')
ad_group = ad_group_operation.update
ad_group.resource_name = ad_group_service.ad_group_path(
customer_id, ad_group_id)
ad_group.status = client.get_type('AdGroupStatusEnum', version='v3').PAUSED
ad_group.cpc_bid_micros.value = bid_micro_amount
fm = protobuf_helpers.field_mask(None, ad_group)
ad_group_operation.update_mask.CopyFrom(fm)
# Update the ad group.
try:
ad_group_response = ad_group_service.mutate_ad_groups(
customer_id, [ad_group_operation])
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
print('Updated ad group %s.' % ad_group_response.results[0].resource_name)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description=('Updates an ad group for specified customer and campaign '
'id with the given bid micro amount.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--ad_group_id', type=str,
required=True, help='The ad group ID.')
parser.add_argument('-b', '--bid_micro_amount', type=int,
required=True, help='The bid micro amount.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.ad_group_id,
args.bid_micro_amount)
| 40.961538 | 79 | 0.692958 |
66a93c0272c96785a290b2cbd765075cec5167b7 | 2,101 | py | Python | roche/wsgi.py | beijingren/roche-website | c5618045f56349f7a3733e80dd75574e33c874f0 | [
"MIT"
] | null | null | null | roche/wsgi.py | beijingren/roche-website | c5618045f56349f7a3733e80dd75574e33c874f0 | [
"MIT"
] | null | null | null | roche/wsgi.py | beijingren/roche-website | c5618045f56349f7a3733e80dd75574e33c874f0 | [
"MIT"
] | null | null | null | """
WSGI config for roche project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "roche.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "roche.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
django_wsgi_application = get_wsgi_application()
def application(environ, start_response):
# Get Docker link vars
os.environ['DOCKER_PASSWORD'] = environ['DOCKER_PASSWORD']
os.environ['DB_PORT_5432_TCP_ADDR'] = environ['DB_PORT_5432_TCP_ADDR']
os.environ['DB_PORT_5432_TCP_PORT'] = environ['DB_PORT_5432_TCP_PORT']
os.environ['XMLDB_PORT_8080_TCP_ADDR'] = environ['XMLDB_PORT_8080_TCP_ADDR']
os.environ['XMLDB_PORT_8080_TCP_PORT'] = environ['XMLDB_PORT_8080_TCP_PORT']
os.environ['SPARQL_PORT_3030_TCP_ADDR'] = environ['SPARQL_PORT_3030_TCP_ADDR']
os.environ['SPARQL_PORT_3030_TCP_PORT'] = environ['SPARQL_PORT_3030_TCP_PORT']
return django_wsgi_application(environ, start_response)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 45.673913 | 82 | 0.795812 |
4e4d824f8e7f51c1ac78db74f33ee7b46e53e121 | 4,328 | py | Python | quantum/plugins/nec/agent/nec_quantum_agent.py | r-mibu/neutron | 7aebe2468bdcc1befef7d09136fdedafcb0049ec | [
"Apache-2.0"
] | null | null | null | quantum/plugins/nec/agent/nec_quantum_agent.py | r-mibu/neutron | 7aebe2468bdcc1befef7d09136fdedafcb0049ec | [
"Apache-2.0"
] | null | null | null | quantum/plugins/nec/agent/nec_quantum_agent.py | r-mibu/neutron | 7aebe2468bdcc1befef7d09136fdedafcb0049ec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation.
# Based on ryu/openvswitch agents.
#
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import logging
import sys
import time
import socket
from quantum.agent.linux import ovs_lib
from quantum.common import config as logging_config
from quantum.common import topics
from quantum.openstack.common import context
from quantum.openstack.common import rpc
from quantum.plugins.nec.common import config
logging.basicConfig()
LOG = logging.getLogger(__name__)
class NECQuantumAgent(object):
def __init__(self, integ_br, root_helper, polling_interval):
'''Constructor.
:param integ_br: name of the integration bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to check the bridge.
'''
self.int_br = ovs_lib.OVSBridge(integ_br, root_helper)
self.polling_interval = polling_interval
self.host = socket.gethostname()
self.agent_id = 'nec-q-agent.%s' % self.host
self.datapath_id = "0x%s" % self.int_br.get_datapath_id()
# RPC network init
self.context = context.RequestContext('quantum', 'quantum',
is_admin=False)
self.conn = rpc.create_connection(new=True)
def update_ports(self, port_added=[], port_removed=[]):
"""RPC to update information of ports on Quantum Server"""
LOG.info("update ports: added=%s, removed=%s" %
(port_added, port_removed))
try:
rpc.call(self.context,
topics.PLUGIN,
{'method': 'update_ports',
'args': {'topic': topics.AGENT,
'agent_id': self.agent_id,
'datapath_id': self.datapath_id,
'port_added': port_added,
'port_removed': port_removed}})
except Exception as e:
LOG.warn("update_ports() failed.")
return
def _vif_port_to_port_info(self, vif_port):
return dict(id=vif_port.vif_id, port_no=vif_port.ofport,
mac=vif_port.vif_mac)
def daemon_loop(self):
"""Main processing loop for NEC Plugin Agent."""
old_ports = []
while True:
new_ports = []
port_added = []
for vif_port in self.int_br.get_vif_ports():
port_id = vif_port.vif_id
new_ports.append(port_id)
if port_id not in old_ports:
port_info = self._vif_port_to_port_info(vif_port)
port_added.append(port_info)
port_removed = []
for port_id in old_ports:
if port_id not in new_ports:
port_removed.append(port_id)
if port_added or port_removed:
self.update_ports(port_added, port_removed)
else:
LOG.debug("No port changed.")
old_ports = new_ports
time.sleep(self.polling_interval)
def main():
config.CONF(args=sys.argv, project='quantum')
logging_config.setup_logging(config.CONF)
# Determine which agent type to use.
integ_br = config.OVS.integration_bridge
root_helper = config.AGENT.root_helper
polling_interval = config.AGENT.polling_interval
agent = NECQuantumAgent(integ_br, root_helper, polling_interval)
# Start everything.
agent.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
| 33.038168 | 78 | 0.627311 |
8e12602123ff921487dc6c5f974c2a44bef16725 | 36,078 | py | Python | rlkit/torch/sac/awac_trainer.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | rlkit/torch/sac/awac_trainer.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | rlkit/torch/sac/awac_trainer.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | import pickle
from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from rlkit.torch.sac.policies import MakeDeterministic
from torch import nn as nn
import rlkit.torch.pytorch_util as ptu
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.torch.core import np_to_pytorch_batch
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.core import logger
from rlkit.core.logging import add_prefix
from rlkit.misc.ml_util import PiecewiseLinearSchedule, ConstantSchedule
import torch.nn.functional as F
from rlkit.torch.networks import LinearTransform
import time
class AWACTrainer(TorchTrainer):
def __init__(
self,
env,
policy,
qf1,
qf2,
target_qf1,
target_qf2,
buffer_policy=None,
discount=0.99,
reward_scale=1.0,
beta=1.0,
beta_schedule_kwargs=None,
policy_lr=1e-3,
qf_lr=1e-3,
policy_weight_decay=0,
q_weight_decay=0,
optimizer_class=optim.Adam,
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=0,
bc_batch_size=128,
alpha=1.0,
policy_update_period=1,
q_update_period=1,
weight_loss=True,
compute_bc=True,
use_awr_update=True,
use_reparam_update=False,
bc_weight=0.0,
rl_weight=1.0,
reparam_weight=1.0,
awr_weight=1.0,
post_pretrain_hyperparams=None,
post_bc_pretrain_hyperparams=None,
awr_use_mle_for_vf=False,
vf_K=1,
awr_sample_actions=False,
buffer_policy_sample_actions=False,
awr_min_q=False,
brac=False,
reward_transform_class=None,
reward_transform_kwargs=None,
terminal_transform_class=None,
terminal_transform_kwargs=None,
pretraining_logging_period=1000,
train_bc_on_rl_buffer=False,
use_automatic_beta_tuning=False,
beta_epsilon=1e-10,
normalize_over_batch=True,
normalize_over_state="advantage",
Z_K=10,
clip_score=None,
validation_qlearning=False,
mask_positive_advantage=False,
buffer_policy_reset_period=-1,
num_buffer_policy_train_steps_on_reset=100,
advantage_weighted_buffer_loss=True,
):
super().__init__()
self.env = env
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.target_qf1 = target_qf1
self.target_qf2 = target_qf2
self.buffer_policy = buffer_policy
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
self.use_awr_update = use_awr_update
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy:
self.target_entropy = target_entropy
else:
self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.awr_use_mle_for_vf = awr_use_mle_for_vf
self.vf_K = vf_K
self.awr_sample_actions = awr_sample_actions
self.awr_min_q = awr_min_q
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.optimizers = {}
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
weight_decay=policy_weight_decay,
lr=policy_lr,
)
self.optimizers[self.policy] = self.policy_optimizer
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(),
weight_decay=q_weight_decay,
lr=qf_lr,
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(),
weight_decay=q_weight_decay,
lr=qf_lr,
)
if buffer_policy and train_bc_on_rl_buffer:
self.buffer_policy_optimizer = optimizer_class(
self.buffer_policy.parameters(),
weight_decay=policy_weight_decay,
lr=policy_lr,
)
self.optimizers[self.buffer_policy] = self.buffer_policy_optimizer
self.optimizer_class=optimizer_class
self.policy_weight_decay=policy_weight_decay
self.policy_lr = policy_lr
self.use_automatic_beta_tuning = use_automatic_beta_tuning and buffer_policy and train_bc_on_rl_buffer
self.beta_epsilon=beta_epsilon
if self.use_automatic_beta_tuning:
self.log_beta = ptu.zeros(1, requires_grad=True)
self.beta_optimizer = optimizer_class(
[self.log_beta],
lr=policy_lr,
)
else:
self.beta = beta
self.beta_schedule_kwargs = beta_schedule_kwargs
if beta_schedule_kwargs is None:
self.beta_schedule = ConstantSchedule(beta)
else:
schedule_class = beta_schedule_kwargs.pop("schedule_class", PiecewiseLinearSchedule)
self.beta_schedule = schedule_class(**beta_schedule_kwargs)
self.discount = discount
self.reward_scale = reward_scale
self.eval_statistics = OrderedDict()
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
self.bc_num_pretrain_steps = bc_num_pretrain_steps
self.q_num_pretrain1_steps = q_num_pretrain1_steps
self.q_num_pretrain2_steps = q_num_pretrain2_steps
self.bc_batch_size = bc_batch_size
self.rl_weight = rl_weight
self.bc_weight = bc_weight
self.eval_policy = MakeDeterministic(self.policy)
self.compute_bc = compute_bc
self.alpha = alpha
self.q_update_period = q_update_period
self.policy_update_period = policy_update_period
self.weight_loss = weight_loss
self.reparam_weight = reparam_weight
self.awr_weight = awr_weight
self.post_pretrain_hyperparams = post_pretrain_hyperparams
self.post_bc_pretrain_hyperparams = post_bc_pretrain_hyperparams
self.update_policy = True
self.pretraining_logging_period = pretraining_logging_period
self.normalize_over_batch = normalize_over_batch
self.normalize_over_state = normalize_over_state
self.Z_K = Z_K
self.reward_transform_class = reward_transform_class or LinearTransform
self.reward_transform_kwargs = reward_transform_kwargs or dict(m=1, b=0)
self.terminal_transform_class = terminal_transform_class or LinearTransform
self.terminal_transform_kwargs = terminal_transform_kwargs or dict(m=1, b=0)
self.reward_transform = self.reward_transform_class(**self.reward_transform_kwargs)
self.terminal_transform = self.terminal_transform_class(**self.terminal_transform_kwargs)
self.use_reparam_update = use_reparam_update
self.clip_score = clip_score
self.buffer_policy_sample_actions = buffer_policy_sample_actions
self.train_bc_on_rl_buffer = train_bc_on_rl_buffer and buffer_policy
self.validation_qlearning = validation_qlearning
self.brac = brac
self.mask_positive_advantage = mask_positive_advantage
self.buffer_policy_reset_period = buffer_policy_reset_period
self.num_buffer_policy_train_steps_on_reset=num_buffer_policy_train_steps_on_reset
self.advantage_weighted_buffer_loss=advantage_weighted_buffer_loss
def get_batch_from_buffer(self, replay_buffer, batch_size):
batch = replay_buffer.random_batch(batch_size)
batch = np_to_pytorch_batch(batch)
return batch
def run_bc_batch(self, replay_buffer, policy):
batch = self.get_batch_from_buffer(replay_buffer, self.bc_batch_size)
o = batch["observations"]
u = batch["actions"]
# g = batch["resampled_goals"]
# og = torch.cat((o, g), dim=1)
og = o
# pred_u, *_ = self.policy(og)
dist = policy(og)
pred_u, log_pi = dist.rsample_and_logprob()
stats = dist.get_diagnostics()
mse = (pred_u - u) ** 2
mse_loss = mse.mean()
policy_logpp = dist.log_prob(u, )
logp_loss = -policy_logpp.mean()
policy_loss = logp_loss
return policy_loss, logp_loss, mse_loss, stats
def pretrain_policy_with_bc(self, policy, train_buffer, test_buffer, steps, label="policy", ):
logger.remove_tabular_output(
'progress.csv', relative_to_snapshot_dir=True,
)
logger.add_tabular_output(
'pretrain_%s.csv' % label, relative_to_snapshot_dir=True,
)
optimizer = self.optimizers[policy]
prev_time = time.time()
for i in range(steps):
train_policy_loss, train_logp_loss, train_mse_loss, train_stats = self.run_bc_batch(train_buffer, policy)
train_policy_loss = train_policy_loss * self.bc_weight
optimizer.zero_grad()
train_policy_loss.backward()
optimizer.step()
test_policy_loss, test_logp_loss, test_mse_loss, test_stats = self.run_bc_batch(test_buffer, policy)
test_policy_loss = test_policy_loss * self.bc_weight
if i % self.pretraining_logging_period==0:
stats = {
"pretrain_bc/batch": i,
"pretrain_bc/Train Logprob Loss": ptu.get_numpy(train_logp_loss),
"pretrain_bc/Test Logprob Loss": ptu.get_numpy(test_logp_loss),
"pretrain_bc/Train MSE": ptu.get_numpy(train_mse_loss),
"pretrain_bc/Test MSE": ptu.get_numpy(test_mse_loss),
"pretrain_bc/train_policy_loss": ptu.get_numpy(train_policy_loss),
"pretrain_bc/test_policy_loss": ptu.get_numpy(test_policy_loss),
"pretrain_bc/epoch_time":time.time()-prev_time,
}
logger.record_dict(stats)
logger.dump_tabular(with_prefix=True, with_timestamp=False)
pickle.dump(self.policy, open(logger.get_snapshot_dir() + '/bc_%s.pkl' % label, "wb"))
prev_time = time.time()
logger.remove_tabular_output(
'pretrain_%s.csv' % label, relative_to_snapshot_dir=True,
)
logger.add_tabular_output(
'progress.csv', relative_to_snapshot_dir=True,
)
if self.post_bc_pretrain_hyperparams:
self.set_algorithm_weights(**self.post_bc_pretrain_hyperparams)
def pretrain_q_with_bc_data(self):
logger.remove_tabular_output(
'progress.csv', relative_to_snapshot_dir=True
)
logger.add_tabular_output(
'pretrain_q.csv', relative_to_snapshot_dir=True
)
self.update_policy = False
# first train only the Q function
for i in range(self.q_num_pretrain1_steps):
self.eval_statistics = dict()
train_data = self.replay_buffer.random_batch(self.bc_batch_size)
train_data = np_to_pytorch_batch(train_data)
obs = train_data['observations']
next_obs = train_data['next_observations']
# goals = train_data['resampled_goals']
train_data['observations'] = obs # torch.cat((obs, goals), dim=1)
train_data['next_observations'] = next_obs # torch.cat((next_obs, goals), dim=1)
self.train_from_torch(train_data, pretrain=True)
if i%self.pretraining_logging_period == 0:
stats_with_prefix = add_prefix(self.eval_statistics, prefix="trainer/")
logger.record_dict(stats_with_prefix)
logger.dump_tabular(with_prefix=True, with_timestamp=False)
self.update_policy = True
# then train policy and Q function together
prev_time = time.time()
for i in range(self.q_num_pretrain2_steps):
self.eval_statistics = dict()
if i % self.pretraining_logging_period == 0:
self._need_to_update_eval_statistics=True
train_data = self.replay_buffer.random_batch(self.bc_batch_size)
train_data = np_to_pytorch_batch(train_data)
obs = train_data['observations']
next_obs = train_data['next_observations']
# goals = train_data['resampled_goals']
train_data['observations'] = obs # torch.cat((obs, goals), dim=1)
train_data['next_observations'] = next_obs # torch.cat((next_obs, goals), dim=1)
self.train_from_torch(train_data, pretrain=True)
if i%self.pretraining_logging_period==0:
self.eval_statistics["batch"] = i
self.eval_statistics["epoch_time"] = time.time()-prev_time
stats_with_prefix = add_prefix(self.eval_statistics, prefix="trainer/")
logger.record_dict(stats_with_prefix)
logger.dump_tabular(with_prefix=True, with_timestamp=False)
prev_time = time.time()
logger.remove_tabular_output(
'pretrain_q.csv',
relative_to_snapshot_dir=True,
)
logger.add_tabular_output(
'progress.csv',
relative_to_snapshot_dir=True,
)
self._need_to_update_eval_statistics = True
self.eval_statistics = dict()
if self.post_pretrain_hyperparams:
self.set_algorithm_weights(**self.post_pretrain_hyperparams)
def set_algorithm_weights(
self,
**kwargs
):
for key in kwargs:
self.__dict__[key] = kwargs[key]
def test_from_torch(self, batch):
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
weights = batch.get('weights', None)
if self.reward_transform:
rewards = self.reward_transform(rewards)
if self.terminal_transform:
terminals = self.terminal_transform(terminals)
"""
Policy and Alpha Loss
"""
dist = self.policy(obs)
new_obs_actions, log_pi = dist.rsample_and_logprob()
policy_mle = dist.mle_estimate()
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = self.alpha
q1_pred = self.qf1(obs, actions)
q2_pred = self.qf2(obs, actions)
# Make sure policy accounts for squashing functions like tanh correctly!
next_dist = self.policy(next_obs)
new_next_actions, new_log_pi = next_dist.rsample_and_logprob()
target_q_values = torch.min(
self.target_qf1(next_obs, new_next_actions),
self.target_qf2(next_obs, new_next_actions),
) - alpha * new_log_pi
q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
qf1_new_actions = self.qf1(obs, new_obs_actions)
qf2_new_actions = self.qf2(obs, new_obs_actions)
q_new_actions = torch.min(
qf1_new_actions,
qf2_new_actions,
)
policy_loss = (log_pi - q_new_actions).mean()
self.eval_statistics['validation/QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics['validation/QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics['validation/Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'validation/Q1 Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'validation/Q2 Predictions',
ptu.get_numpy(q2_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'validation/Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'validation/Log Pis',
ptu.get_numpy(log_pi),
))
policy_statistics = add_prefix(dist.get_diagnostics(), "validation/policy/")
self.eval_statistics.update(policy_statistics)
def train_from_torch(self, batch, train=True, pretrain=False,):
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
weights = batch.get('weights', None)
if self.reward_transform:
rewards = self.reward_transform(rewards)
if self.terminal_transform:
terminals = self.terminal_transform(terminals)
"""
Policy and Alpha Loss
"""
dist = self.policy(obs)
new_obs_actions, log_pi = dist.rsample_and_logprob()
policy_mle = dist.mle_estimate()
if self.brac:
buf_dist = self.buffer_policy(obs)
buf_log_pi = buf_dist.log_prob(actions)
rewards = rewards + buf_log_pi
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = self.alpha
"""
QF Loss
"""
q1_pred = self.qf1(obs, actions)
q2_pred = self.qf2(obs, actions)
# Make sure policy accounts for squashing functions like tanh correctly!
next_dist = self.policy(next_obs)
new_next_actions, new_log_pi = next_dist.rsample_and_logprob()
target_q_values = torch.min(
self.target_qf1(next_obs, new_next_actions),
self.target_qf2(next_obs, new_next_actions),
) - alpha * new_log_pi
q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values
qf1_loss = self.qf_criterion(q1_pred, q_target.detach())
qf2_loss = self.qf_criterion(q2_pred, q_target.detach())
"""
Policy Loss
"""
qf1_new_actions = self.qf1(obs, new_obs_actions)
qf2_new_actions = self.qf2(obs, new_obs_actions)
q_new_actions = torch.min(
qf1_new_actions,
qf2_new_actions,
)
# Advantage-weighted regression
if self.awr_use_mle_for_vf:
v1_pi = self.qf1(obs, policy_mle)
v2_pi = self.qf2(obs, policy_mle)
v_pi = torch.min(v1_pi, v2_pi)
else:
if self.vf_K > 1:
vs = []
for i in range(self.vf_K):
u = dist.sample()
q1 = self.qf1(obs, u)
q2 = self.qf2(obs, u)
v = torch.min(q1, q2)
# v = q1
vs.append(v)
v_pi = torch.cat(vs, 1).mean(dim=1)
else:
# v_pi = self.qf1(obs, new_obs_actions)
v1_pi = self.qf1(obs, new_obs_actions)
v2_pi = self.qf2(obs, new_obs_actions)
v_pi = torch.min(v1_pi, v2_pi)
if self.awr_sample_actions:
u = new_obs_actions
if self.awr_min_q:
q_adv = q_new_actions
else:
q_adv = qf1_new_actions
elif self.buffer_policy_sample_actions:
buf_dist = self.buffer_policy(obs)
u, _ = buf_dist.rsample_and_logprob()
qf1_buffer_actions = self.qf1(obs, u)
qf2_buffer_actions = self.qf2(obs, u)
q_buffer_actions = torch.min(
qf1_buffer_actions,
qf2_buffer_actions,
)
if self.awr_min_q:
q_adv = q_buffer_actions
else:
q_adv = qf1_buffer_actions
else:
u = actions
if self.awr_min_q:
q_adv = torch.min(q1_pred, q2_pred)
else:
q_adv = q1_pred
policy_logpp = dist.log_prob(u)
if self.use_automatic_beta_tuning:
buffer_dist = self.buffer_policy(obs)
beta = self.log_beta.exp()
kldiv = torch.distributions.kl.kl_divergence(dist, buffer_dist)
beta_loss = -1*(beta*(kldiv-self.beta_epsilon).detach()).mean()
self.beta_optimizer.zero_grad()
beta_loss.backward()
self.beta_optimizer.step()
else:
beta = self.beta_schedule.get_value(self._n_train_steps_total)
if self.normalize_over_state == "advantage":
score = q_adv - v_pi
if self.mask_positive_advantage:
score = torch.sign(score)
elif self.normalize_over_state == "Z":
buffer_dist = self.buffer_policy(obs)
K = self.Z_K
buffer_obs = []
buffer_actions = []
log_bs = []
log_pis = []
for i in range(K):
u = buffer_dist.sample()
log_b = buffer_dist.log_prob(u)
log_pi = dist.log_prob(u)
buffer_obs.append(obs)
buffer_actions.append(u)
log_bs.append(log_b)
log_pis.append(log_pi)
buffer_obs = torch.cat(buffer_obs, 0)
buffer_actions = torch.cat(buffer_actions, 0)
p_buffer = torch.exp(torch.cat(log_bs, 0).sum(dim=1, ))
log_pi = torch.cat(log_pis, 0)
log_pi = log_pi.sum(dim=1, )
q1_b = self.qf1(buffer_obs, buffer_actions)
q2_b = self.qf2(buffer_obs, buffer_actions)
q_b = torch.min(q1_b, q2_b)
q_b = torch.reshape(q_b, (-1, K))
adv_b = q_b - v_pi
# if self._n_train_steps_total % 100 == 0:
# import ipdb; ipdb.set_trace()
# Z = torch.exp(adv_b / beta).mean(dim=1, keepdim=True)
# score = torch.exp((q_adv - v_pi) / beta) / Z
# score = score / sum(score)
logK = torch.log(ptu.tensor(float(K)))
logZ = torch.logsumexp(adv_b/beta - logK, dim=1, keepdim=True)
logS = (q_adv - v_pi)/beta - logZ
# logZ = torch.logsumexp(q_b/beta - logK, dim=1, keepdim=True)
# logS = q_adv/beta - logZ
score = F.softmax(logS, dim=0) # score / sum(score)
else:
error
if self.clip_score is not None:
score = torch.clamp(score, max=self.clip_score)
if self.weight_loss and weights is None:
if self.normalize_over_batch == True:
weights = F.softmax(score / beta, dim=0)
elif self.normalize_over_batch == "whiten":
adv_mean = torch.mean(score)
adv_std = torch.std(score) + 1e-5
normalized_score = (score - adv_mean) / adv_std
weights = torch.exp(normalized_score / beta)
elif self.normalize_over_batch == "exp":
weights = torch.exp(score / beta)
elif self.normalize_over_batch == "step_fn":
weights = (score > 0).float()
elif self.normalize_over_batch == False:
weights = score
else:
error
weights = weights[:, 0]
policy_loss = alpha * log_pi.mean()
if self.use_awr_update and self.weight_loss:
policy_loss = policy_loss + self.awr_weight * (-policy_logpp * len(weights)*weights.detach()).mean()
elif self.use_awr_update:
policy_loss = policy_loss + self.awr_weight * (-policy_logpp).mean()
if self.use_reparam_update:
policy_loss = policy_loss + self.reparam_weight * (-q_new_actions).mean()
policy_loss = self.rl_weight * policy_loss
if self.compute_bc:
train_policy_loss, train_logp_loss, train_mse_loss, _ = self.run_bc_batch(self.demo_train_buffer, self.policy)
policy_loss = policy_loss + self.bc_weight * train_policy_loss
if not pretrain and self.buffer_policy_reset_period > 0 and self._n_train_steps_total % self.buffer_policy_reset_period==0:
del self.buffer_policy_optimizer
self.buffer_policy_optimizer = self.optimizer_class(
self.buffer_policy.parameters(),
weight_decay=self.policy_weight_decay,
lr=self.policy_lr,
)
self.optimizers[self.buffer_policy] = self.buffer_policy_optimizer
for i in range(self.num_buffer_policy_train_steps_on_reset):
if self.train_bc_on_rl_buffer:
if self.advantage_weighted_buffer_loss:
buffer_dist = self.buffer_policy(obs)
buffer_u = actions
buffer_new_obs_actions, _ = buffer_dist.rsample_and_logprob()
buffer_policy_logpp = buffer_dist.log_prob(buffer_u)
buffer_policy_logpp = buffer_policy_logpp[:, None]
buffer_q1_pred = self.qf1(obs, buffer_u)
buffer_q2_pred = self.qf2(obs, buffer_u)
buffer_q_adv = torch.min(buffer_q1_pred, buffer_q2_pred)
buffer_v1_pi = self.qf1(obs, buffer_new_obs_actions)
buffer_v2_pi = self.qf2(obs, buffer_new_obs_actions)
buffer_v_pi = torch.min(buffer_v1_pi, buffer_v2_pi)
buffer_score = buffer_q_adv - buffer_v_pi
buffer_weights = F.softmax(buffer_score / beta, dim=0)
buffer_policy_loss = self.awr_weight * (-buffer_policy_logpp * len(buffer_weights)*buffer_weights.detach()).mean()
else:
buffer_policy_loss, buffer_train_logp_loss, buffer_train_mse_loss, _ = self.run_bc_batch(
self.replay_buffer.train_replay_buffer, self.buffer_policy)
self.buffer_policy_optimizer.zero_grad()
buffer_policy_loss.backward(retain_graph=True)
self.buffer_policy_optimizer.step()
if self.train_bc_on_rl_buffer:
if self.advantage_weighted_buffer_loss:
buffer_dist = self.buffer_policy(obs)
buffer_u = actions
buffer_new_obs_actions, _ = buffer_dist.rsample_and_logprob()
buffer_policy_logpp = buffer_dist.log_prob(buffer_u)
buffer_policy_logpp = buffer_policy_logpp[:, None]
buffer_q1_pred = self.qf1(obs, buffer_u)
buffer_q2_pred = self.qf2(obs, buffer_u)
buffer_q_adv = torch.min(buffer_q1_pred, buffer_q2_pred)
buffer_v1_pi = self.qf1(obs, buffer_new_obs_actions)
buffer_v2_pi = self.qf2(obs, buffer_new_obs_actions)
buffer_v_pi = torch.min(buffer_v1_pi, buffer_v2_pi)
buffer_score = buffer_q_adv - buffer_v_pi
buffer_weights = F.softmax(buffer_score / beta, dim=0)
buffer_policy_loss = self.awr_weight * (-buffer_policy_logpp * len(buffer_weights)*buffer_weights.detach()).mean()
else:
buffer_policy_loss, buffer_train_logp_loss, buffer_train_mse_loss, _ = self.run_bc_batch(
self.replay_buffer.train_replay_buffer, self.buffer_policy)
"""
Update networks
"""
if self._n_train_steps_total % self.q_update_period == 0:
self.qf1_optimizer.zero_grad()
qf1_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.zero_grad()
qf2_loss.backward()
self.qf2_optimizer.step()
if self._n_train_steps_total % self.policy_update_period == 0 and self.update_policy:
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
if self.train_bc_on_rl_buffer and self._n_train_steps_total % self.policy_update_period == 0 :
self.buffer_policy_optimizer.zero_grad()
buffer_policy_loss.backward()
self.buffer_policy_optimizer.step()
"""
Soft Updates
"""
if self._n_train_steps_total % self.target_update_period == 0:
ptu.soft_update_from_to(
self.qf1, self.target_qf1, self.soft_target_tau
)
ptu.soft_update_from_to(
self.qf2, self.target_qf2, self.soft_target_tau
)
"""
Save some statistics for eval
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
policy_loss = (log_pi - q_new_actions).mean()
self.eval_statistics['QF1 Loss'] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics['QF2 Loss'] = np.mean(ptu.get_numpy(qf2_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q1 Predictions',
ptu.get_numpy(q1_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q2 Predictions',
ptu.get_numpy(q2_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'rewards',
ptu.get_numpy(rewards),
))
self.eval_statistics.update(create_stats_ordered_dict(
'terminals',
ptu.get_numpy(terminals),
))
policy_statistics = add_prefix(dist.get_diagnostics(), "policy/")
self.eval_statistics.update(policy_statistics)
self.eval_statistics.update(create_stats_ordered_dict(
'Advantage Weights',
ptu.get_numpy(weights),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Advantage Score',
ptu.get_numpy(score),
))
if self.normalize_over_state == "Z":
self.eval_statistics.update(create_stats_ordered_dict(
'logZ',
ptu.get_numpy(logZ),
))
if self.use_automatic_entropy_tuning:
self.eval_statistics['Alpha'] = alpha.item()
self.eval_statistics['Alpha Loss'] = alpha_loss.item()
if self.compute_bc:
test_policy_loss, test_logp_loss, test_mse_loss, _ = self.run_bc_batch(self.demo_test_buffer, self.policy)
self.eval_statistics.update({
"bc/Train Logprob Loss": ptu.get_numpy(train_logp_loss),
"bc/Test Logprob Loss": ptu.get_numpy(test_logp_loss),
"bc/Train MSE": ptu.get_numpy(train_mse_loss),
"bc/Test MSE": ptu.get_numpy(test_mse_loss),
"bc/train_policy_loss": ptu.get_numpy(train_policy_loss),
"bc/test_policy_loss": ptu.get_numpy(test_policy_loss),
})
if self.train_bc_on_rl_buffer:
_, buffer_train_logp_loss, _, _ = self.run_bc_batch(
self.replay_buffer.train_replay_buffer,
self.buffer_policy)
_, buffer_test_logp_loss, _, _ = self.run_bc_batch(
self.replay_buffer.validation_replay_buffer,
self.buffer_policy)
buffer_dist = self.buffer_policy(obs)
kldiv = torch.distributions.kl.kl_divergence(dist, buffer_dist)
_, train_offline_logp_loss, _, _ = self.run_bc_batch(
self.demo_train_buffer,
self.buffer_policy)
_, test_offline_logp_loss, _, _ = self.run_bc_batch(
self.demo_test_buffer,
self.buffer_policy)
self.eval_statistics.update({
"buffer_policy/Train Online Logprob": -1 * ptu.get_numpy(buffer_train_logp_loss),
"buffer_policy/Test Online Logprob": -1 * ptu.get_numpy(buffer_test_logp_loss),
"buffer_policy/Train Offline Logprob": -1 * ptu.get_numpy(train_offline_logp_loss),
"buffer_policy/Test Offline Logprob": -1 * ptu.get_numpy(test_offline_logp_loss),
"buffer_policy/train_policy_loss": ptu.get_numpy(buffer_policy_loss),
# "buffer_policy/test_policy_loss": ptu.get_numpy(buffer_test_policy_loss),
"buffer_policy/kl_div": ptu.get_numpy(kldiv.mean()),
})
if self.use_automatic_beta_tuning:
self.eval_statistics.update({
"adaptive_beta/beta":ptu.get_numpy(beta.mean()),
"adaptive_beta/beta loss": ptu.get_numpy(beta_loss.mean()),
})
if self.validation_qlearning:
train_data = self.replay_buffer.validation_replay_buffer.random_batch(self.bc_batch_size)
train_data = np_to_pytorch_batch(train_data)
obs = train_data['observations']
next_obs = train_data['next_observations']
# goals = train_data['resampled_goals']
train_data['observations'] = obs # torch.cat((obs, goals), dim=1)
train_data['next_observations'] = next_obs # torch.cat((next_obs, goals), dim=1)
self.test_from_torch(train_data)
self._n_train_steps_total += 1
def get_diagnostics(self):
stats = super().get_diagnostics()
stats.update(self.eval_statistics)
return stats
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
nets = [
self.policy,
self.qf1,
self.qf2,
self.target_qf1,
self.target_qf2,
]
if self.buffer_policy:
nets.append(self.buffer_policy)
return nets
def get_snapshot(self):
return dict(
policy=self.policy,
qf1=self.qf1,
qf2=self.qf2,
target_qf1=self.qf1,
target_qf2=self.qf2,
buffer_policy=self.buffer_policy,
)
| 40.131257 | 138 | 0.60347 |
3483bcbc46cc2b1218621203547ee28b44ecabec | 446 | py | Python | Lectures/Lecture 1/task2.py | tochanenko/MetaProgramming | d37f21432483e39e135fd0dc4f8767836eea1609 | [
"MIT"
] | null | null | null | Lectures/Lecture 1/task2.py | tochanenko/MetaProgramming | d37f21432483e39e135fd0dc4f8767836eea1609 | [
"MIT"
] | null | null | null | Lectures/Lecture 1/task2.py | tochanenko/MetaProgramming | d37f21432483e39e135fd0dc4f8767836eea1609 | [
"MIT"
] | null | null | null | # Check if three dots can create a triangle
(x1, y1) = float(input("x1:")), float(input("y1:"))
(x2, y2) = float(input("x2:")), float(input("y2:"))
(x3, y3) = float(input("x3:")), float(input("y3:"))
print('A[', x1, ';', y1, '], B[', x2, ';', y2, '], C[', x3, ';', y3, ']')
triangle_value = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)
if triangle_value > 0:
print('Can create a triangle')
else:
print('Can\'t create a triangle')
| 29.733333 | 73 | 0.533632 |
641b9a0a0e80a4b5f93825da660fe5f6de3b152d | 17,876 | py | Python | deribit_api_python/openapi_client/models/types.py | jDally987/deribit-simple-gui | 60c91f8e11b541b0e59cbd23625639a9b9f0dd43 | [
"MIT"
] | 5 | 2019-06-06T04:48:34.000Z | 2019-10-14T00:31:21.000Z | deribit_api_python/openapi_client/models/types.py | jDally987/deribit-simple-gui | 60c91f8e11b541b0e59cbd23625639a9b9f0dd43 | [
"MIT"
] | 1 | 2019-10-15T08:55:21.000Z | 2019-10-15T08:55:21.000Z | deribit_api_python/openapi_client/models/types.py | jDally987/deribit-simple-gui | 60c91f8e11b541b0e59cbd23625639a9b9f0dd43 | [
"MIT"
] | 4 | 2019-07-27T16:50:14.000Z | 2019-11-13T21:03:50.000Z | # coding: utf-8
"""
Deribit API
#Overview Deribit provides three different interfaces to access the API: * [JSON-RPC over Websocket](#json-rpc) * [JSON-RPC over HTTP](#json-rpc) * [FIX](#fix-api) (Financial Information eXchange) With the API Console you can use and test the JSON-RPC API, both via HTTP and via Websocket. To visit the API console, go to __Account > API tab > API Console tab.__ ##Naming Deribit tradeable assets or instruments use the following system of naming: |Kind|Examples|Template|Comments| |----|--------|--------|--------| |Future|<code>BTC-25MAR16</code>, <code>BTC-5AUG16</code>|<code>BTC-DMMMYY</code>|<code>BTC</code> is currency, <code>DMMMYY</code> is expiration date, <code>D</code> stands for day of month (1 or 2 digits), <code>MMM</code> - month (3 first letters in English), <code>YY</code> stands for year.| |Perpetual|<code>BTC-PERPETUAL</code> ||Perpetual contract for currency <code>BTC</code>.| |Option|<code>BTC-25MAR16-420-C</code>, <code>BTC-5AUG16-580-P</code>|<code>BTC-DMMMYY-STRIKE-K</code>|<code>STRIKE</code> is option strike price in USD. Template <code>K</code> is option kind: <code>C</code> for call options or <code>P</code> for put options.| # JSON-RPC JSON-RPC is a light-weight remote procedure call (RPC) protocol. The [JSON-RPC specification](https://www.jsonrpc.org/specification) defines the data structures that are used for the messages that are exchanged between client and server, as well as the rules around their processing. JSON-RPC uses JSON (RFC 4627) as data format. JSON-RPC is transport agnostic: it does not specify which transport mechanism must be used. The Deribit API supports both Websocket (preferred) and HTTP (with limitations: subscriptions are not supported over HTTP). ## Request messages > An example of a request message: ```json { \"jsonrpc\": \"2.0\", \"id\": 8066, \"method\": \"public/ticker\", \"params\": { \"instrument\": \"BTC-24AUG18-6500-P\" } } ``` According to the JSON-RPC sepcification the requests must be JSON objects with the following fields. |Name|Type|Description| |----|----|-----------| |jsonrpc|string|The version of the JSON-RPC spec: \"2.0\"| |id|integer or string|An identifier of the request. If it is included, then the response will contain the same identifier| |method|string|The method to be invoked| |params|object|The parameters values for the method. The field names must match with the expected parameter names. The parameters that are expected are described in the documentation for the methods, below.| <aside class=\"warning\"> The JSON-RPC specification describes two features that are currently not supported by the API: <ul> <li>Specification of parameter values by position</li> <li>Batch requests</li> </ul> </aside> ## Response messages > An example of a response message: ```json { \"jsonrpc\": \"2.0\", \"id\": 5239, \"testnet\": false, \"result\": [ { \"currency\": \"BTC\", \"currencyLong\": \"Bitcoin\", \"minConfirmation\": 2, \"txFee\": 0.0006, \"isActive\": true, \"coinType\": \"BITCOIN\", \"baseAddress\": null } ], \"usIn\": 1535043730126248, \"usOut\": 1535043730126250, \"usDiff\": 2 } ``` The JSON-RPC API always responds with a JSON object with the following fields. |Name|Type|Description| |----|----|-----------| |id|integer|This is the same id that was sent in the request.| |result|any|If successful, the result of the API call. The format for the result is described with each method.| |error|error object|Only present if there was an error invoking the method. The error object is described below.| |testnet|boolean|Indicates whether the API in use is actually the test API. <code>false</code> for production server, <code>true</code> for test server.| |usIn|integer|The timestamp when the requests was received (microseconds since the Unix epoch)| |usOut|integer|The timestamp when the response was sent (microseconds since the Unix epoch)| |usDiff|integer|The number of microseconds that was spent handling the request| <aside class=\"notice\"> The fields <code>testnet</code>, <code>usIn</code>, <code>usOut</code> and <code>usDiff</code> are not part of the JSON-RPC standard. <p>In order not to clutter the examples they will generally be omitted from the example code.</p> </aside> > An example of a response with an error: ```json { \"jsonrpc\": \"2.0\", \"id\": 8163, \"error\": { \"code\": 11050, \"message\": \"bad_request\" }, \"testnet\": false, \"usIn\": 1535037392434763, \"usOut\": 1535037392448119, \"usDiff\": 13356 } ``` In case of an error the response message will contain the error field, with as value an object with the following with the following fields: |Name|Type|Description |----|----|-----------| |code|integer|A number that indicates the kind of error.| |message|string|A short description that indicates the kind of error.| |data|any|Additional data about the error. This field may be omitted.| ## Notifications > An example of a notification: ```json { \"jsonrpc\": \"2.0\", \"method\": \"subscription\", \"params\": { \"channel\": \"deribit_price_index.btc_usd\", \"data\": { \"timestamp\": 1535098298227, \"price\": 6521.17, \"index_name\": \"btc_usd\" } } } ``` API users can subscribe to certain types of notifications. This means that they will receive JSON-RPC notification-messages from the server when certain events occur, such as changes to the index price or changes to the order book for a certain instrument. The API methods [public/subscribe](#public-subscribe) and [private/subscribe](#private-subscribe) are used to set up a subscription. Since HTTP does not support the sending of messages from server to client, these methods are only availble when using the Websocket transport mechanism. At the moment of subscription a \"channel\" must be specified. The channel determines the type of events that will be received. See [Subscriptions](#subscriptions) for more details about the channels. In accordance with the JSON-RPC specification, the format of a notification is that of a request message without an <code>id</code> field. The value of the <code>method</code> field will always be <code>\"subscription\"</code>. The <code>params</code> field will always be an object with 2 members: <code>channel</code> and <code>data</code>. The value of the <code>channel</code> member is the name of the channel (a string). The value of the <code>data</code> member is an object that contains data that is specific for the channel. ## Authentication > An example of a JSON request with token: ```json { \"id\": 5647, \"method\": \"private/get_subaccounts\", \"params\": { \"access_token\": \"67SVutDoVZSzkUStHSuk51WntMNBJ5mh5DYZhwzpiqDF\" } } ``` The API consists of `public` and `private` methods. The public methods do not require authentication. The private methods use OAuth 2.0 authentication. This means that a valid OAuth access token must be included in the request, which can get achived by calling method [public/auth](#public-auth). When the token was assigned to the user, it should be passed along, with other request parameters, back to the server: |Connection type|Access token placement |----|-----------| |**Websocket**|Inside request JSON parameters, as an `access_token` field| |**HTTP (REST)**|Header `Authorization: bearer ```Token``` ` value| ### Additional authorization method - basic user credentials <span style=\"color:red\"><b> ! Not recommended - however, it could be useful for quick testing API</b></span></br> Every `private` method could be accessed by providing, inside HTTP `Authorization: Basic XXX` header, values with user `ClientId` and assigned `ClientSecret` (both values can be found on the API page on the Deribit website) encoded with `Base64`: <code>Authorization: Basic BASE64(`ClientId` + `:` + `ClientSecret`)</code> ### Additional authorization method - Deribit signature credentials The Derbit service provides dedicated authorization method, which harness user generated signature to increase security level for passing request data. Generated value is passed inside `Authorization` header, coded as: <code>Authorization: deri-hmac-sha256 id=```ClientId```,ts=```Timestamp```,sig=```Signature```,nonce=```Nonce```</code> where: |Deribit credential|Description |----|-----------| |*ClientId*|Can be found on the API page on the Deribit website| |*Timestamp*|Time when the request was generated - given as **miliseconds**. It's valid for **60 seconds** since generation, after that time any request with an old timestamp will be rejected.| |*Signature*|Value for signature calculated as described below | |*Nonce*|Single usage, user generated initialization vector for the server token| The signature is generated by the following formula: <code> Signature = HEX_STRING( HMAC-SHA256( ClientSecret, StringToSign ) );</code></br> <code> StringToSign = Timestamp + \"\\n\" + Nonce + \"\\n\" + RequestData;</code></br> <code> RequestData = UPPERCASE(HTTP_METHOD()) + \"\\n\" + URI() + \"\\n\" + RequestBody + \"\\n\";</code></br> e.g. (using shell with ```openssl``` tool): <code> ClientId=AAAAAAAAAAA</code></br> <code> ClientSecret=ABCD</code></br> <code> Timestamp=$( date +%s000 )</code></br> <code> Nonce=$( cat /dev/urandom | tr -dc 'a-z0-9' | head -c8 )</code></br> <code> URI=\"/api/v2/private/get_account_summary?currency=BTC\"</code></br> <code> HttpMethod=GET</code></br> <code> Body=\"\"</code></br></br> <code> Signature=$( echo -ne \"${Timestamp}\\n${Nonce}\\n${HttpMethod}\\n${URI}\\n${Body}\\n\" | openssl sha256 -r -hmac \"$ClientSecret\" | cut -f1 -d' ' )</code></br></br> <code> echo $Signature</code></br></br> <code> shell output> ea40d5e5e4fae235ab22b61da98121fbf4acdc06db03d632e23c66bcccb90d2c (**WARNING**: Exact value depends on current timestamp and client credentials</code></br></br> <code> curl -s -X ${HttpMethod} -H \"Authorization: deri-hmac-sha256 id=${ClientId},ts=${Timestamp},nonce=${Nonce},sig=${Signature}\" \"https://www.deribit.com${URI}\"</code></br></br> ### Additional authorization method - signature credentials (WebSocket API) When connecting through Websocket, user can request for authorization using ```client_credential``` method, which requires providing following parameters (as a part of JSON request): |JSON parameter|Description |----|-----------| |*grant_type*|Must be **client_signature**| |*client_id*|Can be found on the API page on the Deribit website| |*timestamp*|Time when the request was generated - given as **miliseconds**. It's valid for **60 seconds** since generation, after that time any request with an old timestamp will be rejected.| |*signature*|Value for signature calculated as described below | |*nonce*|Single usage, user generated initialization vector for the server token| |*data*|**Optional** field, which contains any user specific value| The signature is generated by the following formula: <code> StringToSign = Timestamp + \"\\n\" + Nonce + \"\\n\" + Data;</code></br> <code> Signature = HEX_STRING( HMAC-SHA256( ClientSecret, StringToSign ) );</code></br> e.g. (using shell with ```openssl``` tool): <code> ClientId=AAAAAAAAAAA</code></br> <code> ClientSecret=ABCD</code></br> <code> Timestamp=$( date +%s000 ) # e.g. 1554883365000 </code></br> <code> Nonce=$( cat /dev/urandom | tr -dc 'a-z0-9' | head -c8 ) # e.g. fdbmmz79 </code></br> <code> Data=\"\"</code></br></br> <code> Signature=$( echo -ne \"${Timestamp}\\n${Nonce}\\n${Data}\\n\" | openssl sha256 -r -hmac \"$ClientSecret\" | cut -f1 -d' ' )</code></br></br> <code> echo $Signature</code></br></br> <code> shell output> e20c9cd5639d41f8bbc88f4d699c4baf94a4f0ee320e9a116b72743c449eb994 (**WARNING**: Exact value depends on current timestamp and client credentials</code></br></br> You can also check the signature value using some online tools like, e.g: [https://codebeautify.org/hmac-generator](https://codebeautify.org/hmac-generator) (but don't forget about adding *newline* after each part of the hashed text and remember that you **should use** it only with your **test credentials**). Here's a sample JSON request created using the values from the example above: <code> { </br> \"jsonrpc\" : \"2.0\", </br> \"id\" : 9929, </br> \"method\" : \"public/auth\", </br> \"params\" : </br> { </br> \"grant_type\" : \"client_signature\", </br> \"client_id\" : \"AAAAAAAAAAA\", </br> \"timestamp\": \"1554883365000\", </br> \"nonce\": \"fdbmmz79\", </br> \"data\": \"\", </br> \"signature\" : \"e20c9cd5639d41f8bbc88f4d699c4baf94a4f0ee320e9a116b72743c449eb994\" </br> } </br> } </br> </code> ### Access scope When asking for `access token` user can provide the required access level (called `scope`) which defines what type of functionality he/she wants to use, and whether requests are only going to check for some data or also to update them. Scopes are required and checked for `private` methods, so if you plan to use only `public` information you can stay with values assigned by default. |Scope|Description |----|-----------| |*account:read*|Access to **account** methods - read only data| |*account:read_write*|Access to **account** methods - allows to manage account settings, add subaccounts, etc.| |*trade:read*|Access to **trade** methods - read only data| |*trade:read_write*|Access to **trade** methods - required to create and modify orders| |*wallet:read*|Access to **wallet** methods - read only data| |*wallet:read_write*|Access to **wallet** methods - allows to withdraw, generate new deposit address, etc.| |*wallet:none*, *account:none*, *trade:none*|Blocked access to specified functionality| <span style=\"color:red\">**NOTICE:**</span> Depending on choosing an authentication method (```grant type```) some scopes could be narrowed by the server. e.g. when ```grant_type = client_credentials``` and ```scope = wallet:read_write``` it's modified by the server as ```scope = wallet:read```\" ## JSON-RPC over websocket Websocket is the prefered transport mechanism for the JSON-RPC API, because it is faster and because it can support [subscriptions](#subscriptions) and [cancel on disconnect](#private-enable_cancel_on_disconnect). The code examples that can be found next to each of the methods show how websockets can be used from Python or Javascript/node.js. ## JSON-RPC over HTTP Besides websockets it is also possible to use the API via HTTP. The code examples for 'shell' show how this can be done using curl. Note that subscriptions and cancel on disconnect are not supported via HTTP. #Methods # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Types(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""Types - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Types):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 210.305882 | 15,697 | 0.672298 |
d216215c8ba8dfb6ef0df91140676c39cb765257 | 829 | py | Python | Google/Google kickstart round c 2021/Alien Generator (7pts, 12pts).py | Harry-kp/Cp | 94e687a3a5256913467f50d8f757b12640529513 | [
"MIT"
] | 2 | 2020-11-19T19:21:24.000Z | 2021-04-22T10:53:16.000Z | Google/Google kickstart round c 2021/Alien Generator (7pts, 12pts).py | Harry-kp/Cp | 94e687a3a5256913467f50d8f757b12640529513 | [
"MIT"
] | null | null | null | Google/Google kickstart round c 2021/Alien Generator (7pts, 12pts).py | Harry-kp/Cp | 94e687a3a5256913467f50d8f757b12640529513 | [
"MIT"
] | 1 | 2021-12-02T06:03:17.000Z | 2021-12-02T06:03:17.000Z | from sys import stdin, stdout
import math
# from collections import deque,Counter,OrderedDict
# from collections import defaultdict
# from itertools import permutations,combinations,combinations_with_replacement
# from operator import itemgetter
# import heapq
# from functools import reduce
def ii(): return int(stdin.readline())
def mi(): return map(int, stdin.readline().split())
def li(): return list(mi())
def si(): return stdin.readline()
def solve(n, g):
if ((2*g - n*n + n) % (2*n) == 0):
return True
return False
def max_val(g):
tmp = math.sqrt(1+4*2*g)
tmp = tmp-1
tmp = int(tmp//2)+1
return tmp
t = 1
t = ii()
for _ in range(t):
g = ii()
c = 0
max1 = max_val(g)
for i in range(1, max1):
if solve(i, g):
c += 1
print(f'Case #{_+1}: {c}')
| 20.219512 | 79 | 0.623643 |
d31464a52c7fe59725c1061e229b39d8268c9480 | 798 | py | Python | model-optimizer/mo/ops/assign.py | monroid/openvino | 8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6 | [
"Apache-2.0"
] | 2,406 | 2020-04-22T15:47:54.000Z | 2022-03-31T10:27:37.000Z | model-optimizer/mo/ops/assign.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 4,948 | 2020-04-22T15:12:39.000Z | 2022-03-31T18:45:42.000Z | model-optimizer/mo/ops/assign.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 991 | 2020-04-23T18:21:09.000Z | 2022-03-31T18:40:57.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from mo.graph.graph import Graph, Node
from mo.ops.op import Op
class Assign(Op):
op = 'Assign'
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': self.op,
'op': self.op,
'version': 'opset6',
'infer': self.infer,
'in_ports_count': 1,
'out_ports_count': 1,
}, attrs)
def backend_attrs(self):
return ['variable_id']
@staticmethod
def infer(node: Node):
assert node.has_valid('variable_id'), \
"There is no required attribute variable_id in Assign op with name " + node.id
node.out_port(0).data.set_shape(node.in_port(0).data.get_shape())
| 27.517241 | 90 | 0.593985 |
46b88232e43e77fe9cba827a796567e180d9f8ce | 3,302 | py | Python | tests/algorithms/test_mixup.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | tests/algorithms/test_mixup.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | tests/algorithms/test_mixup.py | ajaysaini725/composer | 00fbf95823cd50354b2410fbd88f06eaf0481662 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 MosaicML. All Rights Reserved.
import pytest
import torch
from composer.algorithms import MixUpHparams
from composer.algorithms.mixup.mixup import gen_interpolation_lambda, mixup_batch
from composer.core.types import Event
from composer.models.base import MosaicClassifier
from composer.trainer.trainer_hparams import TrainerHparams
from tests.utils.trainer_fit import train_model
# (N, C, d1, d2, n_classes)
@pytest.fixture(params=[(7, 11, 3, 5, 10)])
def fake_data(request):
# Generate some fake data
N, C, d1, d2, n_classes = request.param
torch.manual_seed(0)
x_fake = torch.randn(N, C, d1, d2)
y_fake = torch.randint(n_classes, size=(N,))
indices = torch.randperm(N)
return x_fake, y_fake, indices
def validate_mixup_batch(x, y, indices, x_mix, int_lamb):
# Create shuffled version of y_fake for reference checking
y_perm = y[indices]
# Explicitly check that the batches and labels have been mixed correctly.
for i in range(x.size(0)): # Grab N
j = indices[i]
# Check the input data
x_mix_test = (1 - int_lamb) * x[i] + int_lamb * x[j]
torch.testing.assert_allclose(x_mix_test, x_mix[i])
# Check the label
perm_label = y[j]
torch.testing.assert_allclose(perm_label, y_perm[i])
@pytest.mark.parametrize('alpha', [.2, 1])
class TestMixUp:
def test_mixup_batch(self, fake_data, alpha):
# Generate fake data
x_fake, y_fake, indices = fake_data
# Get interpolation lambda based on alpha hparam
interpolation_lambda = gen_interpolation_lambda(alpha)
# Apply mixup
x_mix, _, _ = mixup_batch(
x=x_fake,
y=y_fake,
interpolation_lambda=interpolation_lambda,
n_classes=x_fake.size(1), # Grab C
indices=indices)
# Validate results
validate_mixup_batch(x_fake, y_fake, indices, x_mix, interpolation_lambda)
def test_mixup_algorithm(self, fake_data, alpha, dummy_state, dummy_logger):
# Generate fake data
x_fake, y_fake, _ = fake_data
algorithm = MixUpHparams(alpha=alpha).initialize_object()
state = dummy_state
state.model = MosaicClassifier
state.model.num_classes = x_fake.size(1) # Grab C
state.batch = (x_fake, y_fake)
algorithm.apply(Event.INIT, state, dummy_logger)
# Apply algo, use test hooks to specify indices and override internally generated interpolation lambda for testability
algorithm.apply(Event.AFTER_DATALOADER, state, dummy_logger)
x, _ = state.batch
# Use algorithm generated indices and interpolation_lambda for validation
validate_mixup_batch(x_fake, y_fake, algorithm.indices, x, algorithm.interpolation_lambda)
@pytest.mark.xfail
def test_mixup_nclasses(dummy_state, dummy_logger):
algorithm = MixUpHparams(alpha=0.2).initialize_object()
state = dummy_state
state.model = MosaicClassifier
state.model.num_classes = None # This should flag AttributeError
algorithm.apply(Event.AFTER_DATALOADER, state, dummy_logger)
def test_mixup_trains(mosaic_trainer_hparams: TrainerHparams):
mosaic_trainer_hparams.algorithms = [MixUpHparams(alpha=0.2)]
train_model(mosaic_trainer_hparams)
| 35.12766 | 126 | 0.705633 |
6991eec13e7ace802d2f65b14f8452e42a70252f | 8,400 | py | Python | oscar/lib/python2.7/site-packages/django/utils/ipv6.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/utils/ipv6.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/utils/ipv6.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. https://github.com/google/ipaddr-py
# Licensed under the Apache License, Version 2.0 (the "License").
import re
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
if '.' not in hextets[index]:
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
symbols_re = re.compile(r'^[0-9a-fA-F:.]+$')
if not symbols_re.match(ip_str):
return False
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
| 30.656934 | 78 | 0.584405 |
ffe94f9687c14ae6b50eafee34dbc1bbfd01e670 | 38,714 | py | Python | Revolution Rotation Calculator.py | dragonelf260/bars | 7debb2338b3df32426f3188fb4f92bb97d2d874c | [
"MIT"
] | 27 | 2017-11-11T11:14:02.000Z | 2021-06-09T10:57:28.000Z | Revolution Rotation Calculator.py | dragonelf260/bars | 7debb2338b3df32426f3188fb4f92bb97d2d874c | [
"MIT"
] | 25 | 2017-11-09T23:57:37.000Z | 2019-03-25T23:48:09.000Z | Revolution Rotation Calculator.py | dragonelf260/bars | 7debb2338b3df32426f3188fb4f92bb97d2d874c | [
"MIT"
] | 12 | 2017-11-07T23:47:47.000Z | 2020-09-21T19:45:16.000Z | #!/usr/bin/env python3
import itertools
import math
import sys
import time
from typing import List, Dict, Tuple
abilities: List[str] = ["ASPHYXIATE", "ASSAULT", "BACKHAND", "BARGE", "BERSERK", "BINDING SHOT", "BLOOD TENDRILS",
"BOMBARDMENT", "CHAIN", "CLEAVE", "COMBUST", "CONCENTRATED BLAST", "CORRUPTION BLAST",
"CORRUPTION SHOT", "DAZING SHOT", "DEADSHOT", "DEATH'S SWIFTNESS", "DEBILITATE", "DECIMATE",
"DEEP IMPACT", "DESTROY", "DETONATE", "DISMEMBER", "DRAGON BREATH", "FLURRY",
"FORCEFUL BACKHAND", "FRAGMENTATION SHOT", "FRENZY", "FURY", "HAVOC", "HURRICANE", "IMPACT",
"KICK", "MASSACRE", "METAMORPHOSIS", "NEEDLE STRIKE", "OMNIPOWER", "ONSLAUGHT", "OVERPOWER",
"PIERCING SHOT", "PULVERISE", "PUNISH", "QUAKE", "RAPID FIRE", "RICOCHET", "SACRIFICE", "SEVER",
"SHADOW TENDRILS", "SHATTER", "SLAUGHTER", "SLICE", "SMASH", "SMOKE TENDRILS", "SNAP SHOT",
"SNIPE", "SONIC WAVE", "STOMP", "STORM SHARDS", "SUNSHINE", "TIGHT BINDINGS", "TSUNAMI",
"TUSKA'S WRATH", "UNLOAD", "WILD MAGIC", "WRACK"]
# --- Defining how abilities work --- #
# Define cooldowns for cases where no abilities may be used
attack_speed_cooldowns: Dict[str, float] = {"FASTEST": 2.4, "FAST": 3.0, "AVERAGE": 3.6, "SLOW": 4.2, "SLOWEST": 7.2}
# Define ability damage for every ability
ability_damage: Dict[str, float] = {"ASPHYXIATE": 451.2, "ASSAULT": 525.6, "BACKHAND": 60, "BARGE": 75, "BERSERK": 0,
"BINDING SHOT": 60, "BLOOD TENDRILS": 324, "BOMBARDMENT": 131.4, "CHAIN": 60,
"CLEAVE": 112.8, "COMBUST": 241.2, "CONCENTRATED BLAST": 152.8,
"CORRUPTION BLAST": 200, "CORRUPTION SHOT": 200, "DAZING SHOT": 94.2,
"DEADSHOT": 426.13, "DEATH'S SWIFTNESS": 0, "DEBILITATE": 60, "DECIMATE": 112.8,
"DEEP IMPACT": 120, "DESTROY": 451.2, "DETONATE": 225, "DISMEMBER": 120.6,
"DRAGON BREATH": 112.8, "FLURRY": 204, "FORCEFUL BACKHAND": 120,
"FRAGMENTATION SHOT": 120.6, "FRENZY": 610, "FURY": 152.8, "HAVOC": 94.2,
"HURRICANE": 265, "IMPACT": 60, "KICK": 60, "MASSACRE": 426.13, "METAMORPHOSIS": 0,
"NEEDLE STRIKE": 94.2, "OMNIPOWER": 300, "ONSLAUGHT": 532, "OVERPOWER": 300,
"PIERCING SHOT": 56.4, "PULVERISE": 300, "PUNISH": 56.4, "QUAKE": 131.4,
"RAPID FIRE": 451.2, "RICOCHET": 60, "SACRIFICE": 60, "SEVER": 112.8,
"SHADOW TENDRILS": 283, "SHATTER": 0, "SLAUGHTER": 145, "SLICE": 75, "SMASH": 94.2,
"SMOKE TENDRILS": 345, "SNAP SHOT": 265, "SNIPE": 172, "SONIC WAVE": 94.2,
"STOMP": 120, "STORM SHARDS": 0, "SUNSHINE": 0, "TIGHT BINDINGS": 120,
"TSUNAMI": 250, "TUSKA'S WRATH": 5940, "UNLOAD": 610, "WILD MAGIC": 265,
"WRACK": 56.4}
# Define the cooldown of abilities (in seconds)
ability_cooldown: Dict[str, float] = {"ASPHYXIATE": 20.4, "ASSAULT": 30, "BACKHAND": 15, "BARGE": 20.4, "BERSERK": 60,
"BINDING SHOT": 15, "BLOOD TENDRILS": 45, "BOMBARDMENT": 30, "CHAIN": 10.2,
"CLEAVE": 7.2, "COMBUST": 15, "CONCENTRATED BLAST": 5.4, "CORRUPTION BLAST": 15,
"CORRUPTION SHOT": 15, "DAZING SHOT": 5.4, "DEADSHOT": 30,
"DEATH'S SWIFTNESS": 60, "DEBILITATE": 30, "DECIMATE": 7.2, "DEEP IMPACT": 15,
"DESTROY": 20.4, "DETONATE": 30, "DISMEMBER": 15, "DRAGON BREATH": 10.2,
"FLURRY": 20.4, "FORCEFUL BACKHAND": 15, "FRAGMENTATION SHOT": 15, "FRENZY": 60,
"FURY": 5.4, "HAVOC": 10.2, "HURRICANE": 20.4, "IMPACT": 15, "KICK": 15,
"MASSACRE": 60, "METAMORPHOSIS": 60, "NEEDLE STRIKE": 5.4, "OMNIPOWER": 30,
"ONSLAUGHT": 120, "OVERPOWER": 60, "PIERCING SHOT": 3, "PULVERISE": 60,
"PUNISH": 3, "QUAKE": 20.4, "RAPID FIRE": 20.4, "RICOCHET": 10.2, "SACRIFICE": 30,
"SEVER": 15, "SHADOW TENDRILS": 45, "SHATTER": 120, "SLAUGHTER": 30, "SLICE": 3,
"SMASH": 10.2, "SMOKE TENDRILS": 45, "SNAP SHOT": 20.4, "SNIPE": 10.2,
"SONIC WAVE": 5.4, "STOMP": 15, "STORM SHARDS": 30, "SUNSHINE": 60,
"TIGHT BINDINGS": 15, "TSUNAMI": 60, "TUSKA'S WRATH": 120, "UNLOAD": 60,
"WILD MAGIC": 20.4, "WRACK": 3}
# How long it takes to use each ability
ability_time: Dict[str, float] = {"ASPHYXIATE": 5.4, "ASSAULT": 5.4, "BACKHAND": 1.8, "BARGE": 1.8, "BERSERK": 1.8,
"BINDING SHOT": 1.8, "BLOOD TENDRILS": 1.8, "BOMBARDMENT": 1.8, "CHAIN": 1.8,
"CLEAVE": 1.8, "COMBUST": 1.8, "CONCENTRATED BLAST": 3.6, "CORRUPTION BLAST": 1.8,
"CORRUPTION SHOT": 1.8, "DAZING SHOT": 1.8, "DEADSHOT": 1.8, "DEATH'S SWIFTNESS": 1.8,
"DEBILITATE": 1.8, "DECIMATE": 1.8, "DEEP IMPACT": 1.8, "DESTROY": 4.2,
"DETONATE": 3.6, "DISMEMBER": 1.8, "DRAGON BREATH": 1.8, "FLURRY": 5.4,
"FORCEFUL BACKHAND": 1.8, "FRAGMENTATION SHOT": 1.8, "FRENZY": 4.2, "FURY": 3.6,
"HAVOC": 1.8, "HURRICANE": 1.8, "IMPACT": 1.8, "KICK": 1.8, "MASSACRE": 1.8,
"METAMORPHOSIS": 1.8, "NEEDLE STRIKE": 1.8, "OMNIPOWER": 1.8, "ONSLAUGHT": 4.8,
"OVERPOWER": 1.8, "PIERCING SHOT": 1.8, "PULVERISE": 1.8, "PUNISH": 1.8, "QUAKE": 1.8,
"RAPID FIRE": 5.4, "RICOCHET": 1.8, "SACRIFICE": 1.8, "SEVER": 1.8,
"SHADOW TENDRILS": 1.8, "SHATTER": 1.8, "SLAUGHTER": 1.8, "SLICE": 1.8, "SMASH": 1.8,
"SMOKE TENDRILS": 5.4, "SNAP SHOT": 1.8, "SNIPE": 3.6, "SONIC WAVE": 1.8,
"STOMP": 1.8, "STORM SHARDS": 1.8, "SUNSHINE": 1.8, "TIGHT BINDINGS": 1.8,
"TSUNAMI": 1.8, "TUSKA'S WRATH": 1.8, "UNLOAD": 4.2, "WILD MAGIC": 4.8, "WRACK": 1.8}
# Define the type of abilities (B = basic, T = threshold, U = ultimate)
ability_type: Dict[str, str] = {"ASPHYXIATE": "T", "ASSAULT": "T", "BACKHAND": "B", "BARGE": "B", "BERSERK": "U",
"BINDING SHOT": "B", "BLOOD TENDRILS": "T", "BOMBARDMENT": "T", "CHAIN": "B",
"CLEAVE": "B", "COMBUST": "B", "CONCENTRATED BLAST": "B", "CORRUPTION BLAST": "B",
"CORRUPTION SHOT": "B", "DAZING SHOT": "B", "DEADSHOT": "U", "DEATH'S SWIFTNESS": "U",
"DEBILITATE": "T", "DECIMATE": "B", "DEEP IMPACT": "T", "DESTROY": "T", "DETONATE": "T",
"DISMEMBER": "B", "DRAGON BREATH": "B", "FLURRY": "T", "FORCEFUL BACKHAND": "T",
"FRAGMENTATION SHOT": "B", "FRENZY": "U", "FURY": "B", "HAVOC": "B", "HURRICANE": "U",
"IMPACT": "B", "KICK": "B", "MASSACRE": "U", "METAMORPHOSIS": "U", "NEEDLE STRIKE": "B",
"OMNIPOWER": "U", "ONSLAUGHT": "U", "OVERPOWER": "U", "PIERCING SHOT": "B",
"PULVERISE": "U", "PUNISH": "B", "QUAKE": "T", "RAPID FIRE": "T", "RICOCHET": "B",
"SACRIFICE": "B", "SEVER": "B", "SHADOW TENDRILS": "T", "SHATTER": "T",
"SLAUGHTER": "T", "SLICE": "B", "SMASH": "B", "SMOKE TENDRILS": "T", "SNAP SHOT": "T",
"SNIPE": "B", "SONIC WAVE": "B", "STOMP": "T", "STORM SHARDS": "B", "SUNSHINE": "U",
"TIGHT BINDINGS": "T", "TSUNAMI": "U", "TUSKA'S WRATH": "B", "UNLOAD": "U",
"WILD MAGIC": "T", "WRACK": "B"}
# Define a flag to decide if you can use abilities (based on adrenaline)
ability_ready: Dict[str, bool] = {"ASPHYXIATE": False, "ASSAULT": False, "BACKHAND": True, "BARGE": True,
"BERSERK": False, "BINDING SHOT": True, "BLOOD TENDRILS": False, "BOMBARDMENT": False,
"CHAIN": True, "CLEAVE": True, "COMBUST": True, "CONCENTRATED BLAST": True,
"CORRUPTION BLAST": True, "CORRUPTION SHOT": True, "DAZING SHOT": True,
"DEADSHOT": False, "DEATH'S SWIFTNESS": False, "DEBILITATE": False, "DECIMATE": True,
"DEEP IMPACT": False, "DESTROY": False, "DETONATE": False, "DISMEMBER": True,
"DRAGON BREATH": True, "FLURRY": False, "FORCEFUL BACKHAND": False,
"FRAGMENTATION SHOT": True, "FRENZY": True, "FURY": True, "HAVOC": True,
"HURRICANE": False, "IMPACT": True, "KICK": True, "MASSACRE": False,
"METAMORPHOSIS": False, "NEEDLE STRIKE": True, "OMNIPOWER": False, "ONSLAUGHT": False,
"OVERPOWER": False, "PIERCING SHOT": True, "PULVERISE": False, "PUNISH": True,
"QUAKE": False, "RAPID FIRE": False, "RICOCHET": True, "SACRIFICE": True,
"SEVER": True, "SHADOW TENDRILS": False, "SHATTER": False, "SLAUGHTER": False,
"SLICE": True, "SMASH": True, "SMOKE TENDRILS": False, "SNAP SHOT": False,
"SNIPE": True, "SONIC WAVE": True, "STOMP": False, "STORM SHARDS": True,
"SUNSHINE": False, "TIGHT BINDINGS": False, "TSUNAMI": False, "TUSKA'S WRATH": True,
"UNLOAD": False, "WILD MAGIC": False, "WRACK": True}
# Define the time DOT abilities last (in seconds)
bleeds: Dict[str, float] = {"BLOOD TENDRILS": 4.8, "COMBUST": 6, "CORRUPTION BLAST": 6, "CORRUPTION SHOT": 6,
"DEADSHOT": 6, "DISMEMBER": 6, "FRAGMENTATION SHOT": 6, "MASSACRE": 6,
"SHADOW TENDRILS": 1.8, "SLAUGHTER": 6, "SMOKE TENDRILS": 5.4}
# Define damage multiplier of walking bleeds
walking_bleeds: Dict[str, float] = {"COMBUST": 1, "FRAGMENTATION SHOT": 1, "SLAUGHTER": 1.5}
# Define bleed abilities that have their first hit affected by damage modifying abilities
special_bleeds: List[str] = ["DEADSHOT", "MASSACRE", "SMOKE TENDRILS"]
# Define abilities that take longer than 1.8 seconds to use but will still have full impact from abilities in the
# crit_boost list
special_abilities: List[str] = ["DETONATE", "SNIPE"]
# How long stuns, DPS increases .. etc last
buff_time: Dict[str, float] = {"BARGE": 6.6, "BERSERK": 19.8, "BINDING SHOT": 9.6, "CONCENTRATED BLAST": 5.4,
"DEATH'S SWIFTNESS": 30, "DEEP IMPACT": 3.6, "FORCEFUL BACKHAND": 3.6, "FURY": 5.4,
"METAMORPHOSIS": 15, "NEEDLE STRIKE": 3.6, "RAPID FIRE": 6, "STOMP": 3.6, "SUNSHINE": 30,
"TIGHT BINDINGS": 9.6}
# Define the Multiplier for boosted damage
buff_effect: Dict[str, float] = {"BERSERK": 2, "CONCENTRATED BLAST": 1.1, "DEATH'S SWIFTNESS": 1.5, "FURY": 1.1,
"METAMORPHOSIS": 1.625, "NEEDLE STRIKE": 1.07, "PIERCING SHOT": 2, "PUNISH": 2,
"SLICE": 1.506, "SUNSHINE": 1.5, "WRACK": 2}
# Define crit-boosting abilities
crit_boost: List[str] = ["BERSERK", "CONCENTRATED BLAST", "DEATH'S SWIFTNESS", "FURY", "METAMORPHOSIS", "NEEDLE STRIKE",
"SUNSHINE"]
# Define the abilities that do extra damage when the target is stun or bound
punishing: List[str] = ["PIERCING SHOT", "PUNISH", "SLICE", "WRACK"]
# Define abilities that can stun or bind the target
debilitating: List[str] = ["BARGE", "BINDING SHOT", "DEEP IMPACT", "FORCEFUL BACKHAND", "RAPID FIRE", "STOMP",
"TIGHT BINDINGS"]
# Define abilities that bind the target
binds: List[str] = ["BARGE", "BINDING SHOT", "DEEP IMPACT", "TIGHT BINDINGS"]
# Define area of effect abilities
aoe: List[str] = ["BOMBARDMENT", "CHAIN", "CLEAVE", "CORRUPTION BLAST", "CORRUPTION SHOT", "DRAGON BREATH", "FLURRY",
"HURRICANE", "QUAKE", "RICOCHET", "TSUNAMI"]
start_adrenaline: int
gain: int
attack_speed: str
activate_bleeds: bool
my_abilities: List[str]
auto_adrenaline: int
cycle_duration: float
# Will return how much damage an ability bar will do over a given time
def ability_rotation(permutation: List[str]) -> float:
# Will check if an auto attack is needed to be used
def auto_available() -> bool:
for ability in track_cooldown:
if (ability_cooldown[ability] - track_cooldown[ability]) < attack_speed_cooldowns[attack_speed]:
return False
return True
# Decreases Cooldowns of abilities and buffs as well as modifying and damage multipliers
def adjust_cooldowns(current_buff: float, adrenaline: int, cooldown_time: float) -> float:
for ability in track_cooldown:
track_cooldown[ability] += cooldown_time
track_cooldown[ability] = round(track_cooldown[ability], 1)
if track_cooldown[ability] >= ability_cooldown[ability]:
track_cooldown[ability] = 0
if ability in threshold_ability_list:
if adrenaline >= 50:
ability_ready[ability] = True
elif ability in ultimate_ability_list:
if adrenaline == 100:
ability_ready[ability] = True
else:
ability_ready[ability] = True
for ability in permutation:
if ability in track_cooldown and track_cooldown[ability] == 0:
del track_cooldown[ability]
for ability in track_buff:
track_buff[ability] += cooldown_time
track_buff[ability] = round(track_buff[ability], 1)
if track_buff[ability] >= buff_time[ability]:
track_buff[ability] = 0
for ability in permutation:
if ability in track_buff and track_buff[ability] == 0:
del track_buff[ability]
if (ability not in punishing) and (ability in buff_effect):
current_buff = current_buff / buff_effect[ability]
return current_buff
# Determines if enemy vulnerable to extra damage due to stuns or binds
def buff_available() -> bool:
for ability in debilitating:
if ability in track_buff:
return True
return False
def modify_time(cycle_duration: float, time_elapsed: float, ability: str) -> float:
if (ability in bleeds) and (ability != "SHADOW TENDRILS") and (
(cycle_duration - time_elapsed) < bleeds[ability]):
return (cycle_duration - time_elapsed) / bleeds[ability]
else:
if (ability not in special_abilities) and (ability_time[ability] > 1.8) and (
(cycle_duration - time_elapsed) < ability_time[ability]):
return (cycle_duration - time_elapsed) / ability_time[ability]
return 1
# --- Defining Variables --- #
damage_dealt: float = 0
current_buff: float = 1
time_elapsed: float = 0
shards: float = 0
adrenaline = start_adrenaline
# --- Calculations begin here --- #
ability_path.append(f"AUTO D: {round(damage_dealt, 1)} T: {round(time_elapsed, 1)} A: {adrenaline}")
damage_dealt += 50
adrenaline += auto_adrenaline
if adrenaline >= 100:
adrenaline = 100
for tracked_ability in ultimate_ability_list:
ability_ready[tracked_ability] = True
for tracked_ability in threshold_ability_list:
ability_ready[tracked_ability] = True
elif adrenaline >= 50:
for tracked_ability in threshold_ability_list:
ability_ready[tracked_ability] = True
elif adrenaline < 0:
adrenaline = 0
time_elapsed += 0.6
time_elapsed = round(time_elapsed, 1)
while time_elapsed < cycle_duration:
for ability in permutation:
# Checks if ability can be used TODO: Check if this is necessary
if time_elapsed < cycle_duration and ability_ready[ability] is True:
ability_ready[ability] = False
# --- Modifying adrenaline as required --- #
ability_path.append(f"{ability} D: {round(damage_dealt, 1)} T: {round(time_elapsed, 1)}"
f" A: {adrenaline}")
if ability in basic_ability_list:
adrenaline += 8
elif ability in threshold_ability_list:
adrenaline -= 15
else:
adrenaline = gain
if adrenaline > 100:
adrenaline = 100
# --- Adding shards if they are used, or using them if activated --- #
if ability == "STORM SHARDS":
if shards < 10:
shards += 1
elif ability == "SHATTER":
damage_dealt += round(shards * 85, 1)
shards = 0
# --- Calculating how much damage abilities should do --- #
more_binds: bool = False
altered_bleeds: bool = False
modified_damage: bool = False
damage_multiplier: float = 1 # Multiplier for damage due to damage boosting abilities
bleed_multiplier: float = 1 # Multiplier in case target is bound (and bind about to run out)
for tracked_ability in track_buff:
if tracked_ability in crit_boost:
if ((buff_time[tracked_ability] - track_buff[tracked_ability]) < ability_time[ability]) and (
(ability not in special_abilities) and (ability_time[ability] > 1.8)):
damage_multiplier *= (
(((buff_time[tracked_ability] - track_buff[tracked_ability]) / ability_time[ability]) *
(buff_effect[tracked_ability] - 1)) + 1)
else:
damage_multiplier *= buff_effect[tracked_ability]
elif (tracked_ability in binds) and (activate_bleeds is True) and (ability in walking_bleeds) and (
len(debilitating) > 0):
if (more_binds is False) and (
buff_time[tracked_ability] - track_buff[tracked_ability] < bleeds[ability]):
bleed_multiplier = walking_bleeds[ability] * (
1 + (buff_time[tracked_ability] - track_buff[tracked_ability]) / bleeds[ability])
else:
bleed_multiplier = 1
more_binds = True
altered_bleeds = True
if (activate_bleeds is True) and (ability in walking_bleeds) and (altered_bleeds is False):
bleed_multiplier = walking_bleeds[ability] * 2
time_multiplier = modify_time(cycle_duration, time_elapsed, ability)
if ability in bleeds:
if ability in special_bleeds:
if ability == "SMOKE TENDRILS":
damage_dealt += (ability_damage[ability] * damage_multiplier)
else:
ability_damage[ability] = ((112.8 * damage_multiplier) + 313.33)
modified_damage = True
damage_dealt += round(ability_damage[ability] * bleed_multiplier * time_multiplier, 1)
if modified_damage is True:
ability_damage[ability] = 426.13
elif (ability in punishing) and (buff_available() is True):
damage_dealt += round(ability_damage[ability] * buff_effect[ability] * damage_multiplier *
time_multiplier, 1)
else:
damage_dealt += round(ability_damage[ability] * damage_multiplier * time_multiplier, 1)
# --- Increasing rotation duration and managing cooldowns --- #
time_elapsed += ability_time[ability]
time_elapsed = round(time_elapsed, 1)
track_cooldown[ability] = float(0)
if ability in buff_time and ability not in punishing:
track_buff[ability] = 0
if ability in buff_effect:
current_buff = current_buff * buff_effect[ability]
# Will also manage cooldowns
current_buff = adjust_cooldowns(current_buff, adrenaline, ability_time[ability])
break
# --- Determines whether thresholds or ultimates may be used --- #
if time_elapsed < cycle_duration:
if adrenaline == 100:
for tracked_ability in (a for a in ultimate_ability_list if a not in track_cooldown):
ability_ready[tracked_ability] = True
for tracked_ability in (a for a in threshold_ability_list if a not in track_cooldown):
ability_ready[tracked_ability] = True
elif adrenaline >= 50:
for tracked_ability in (a for a in threshold_ability_list if a not in track_cooldown):
ability_ready[tracked_ability] = True
elif adrenaline < 50:
for tracked_ability in threshold_ability_list:
ability_ready[tracked_ability] = False
if adrenaline != 100:
for tracked_ability in ultimate_ability_list:
ability_ready[tracked_ability] = False
# --- Determines if any abilities available/ whether auto attacks must be used --- #
if time_elapsed < cycle_duration:
ability_available = False
for _ in (a for a in permutation if ability_ready[a]):
ability_available = True
break
if ability_available is False:
if auto_available() is True:
if (time_elapsed + attack_speed_cooldowns[attack_speed]) <= cycle_duration:
time_elapsed += attack_speed_cooldowns[attack_speed]
else:
time_elapsed += (cycle_duration - time_elapsed)
break
ability_path.append(f"AUTO D: {round(damage_dealt, 1)} T: {round(time_elapsed, 1)} A: {adrenaline}")
if float(cycle_duration - time_elapsed) >= 0.6:
damage_dealt += round(50 * current_buff, 1)
else:
damage_dealt += round(float(50 * round(float((cycle_duration - time_elapsed) / 0.6), 1)) *
current_buff, 1)
adrenaline += auto_adrenaline
time_elapsed += 0.6
if adrenaline > 100:
adrenaline = 100
# Will also manage cooldowns
current_buff = adjust_cooldowns(current_buff, adrenaline, (attack_speed_cooldowns[attack_speed] +
0.6))
else:
time_elapsed += 0.6
current_buff = adjust_cooldowns(current_buff, adrenaline, 0.6)
time_elapsed = round(time_elapsed, 1)
return damage_dealt
def setup_config() -> None:
global start_adrenaline, gain, attack_speed, activate_bleeds, debilitating, my_abilities, auto_adrenaline, cycle_duration
def compare(lines) -> bool:
# configuration followed by line number
correct_data: Dict[str, int] = {"Adrenaline": 2, "Gain": 3, "AttackSpeed": 4, "Bleeds": 5, "Stuns": 6,
"Abilities": 7, "Style": 8, "Time": 9, "units": 13}
for setting in correct_data:
if setting != lines[correct_data[setting]]:
return False
return True
def validate(configurations) -> List[str]:
error_log: List[str] = []
empty_field: bool = False
for config in configurations:
if config == "":
empty_field = True
if empty_field is True:
error_log.append("One or more settings have been left empty.")
try:
setting: int = int(configurations[0])
if not (0 <= setting <= 100):
error_log.append("Adrenaline must be between 0 and 100 inclusive.")
except ValueError:
error_log.append("Adrenaline must be an integer.")
try:
setting: int = int(configurations[1])
if not (0 <= setting <= 100):
error_log.append("Gain must be a positive integer between 0 and 100 inclusive.")
except ValueError:
error_log.append("Gain must be an integer.")
if configurations[2].upper() not in ("SLOWEST", "SLOW", "AVERAGE", "FAST", "FASTEST"):
error_log.append("AttackSpeed must either be one of the following options: ('slowest, slow, average, fast,"
" fastest').")
setting: str = configurations[3]
if not ((setting.lower() == "false") or (setting.lower() == "true")):
error_log.append("Bleeds must be true or false.")
setting: str = configurations[4]
if not ((setting.lower() == "false") or (setting.lower() == "true")):
error_log.append("Stuns must be true or false.")
setting: str = configurations[5]
if setting[0] == "[" and setting[-1] == "]":
setting = setting[1:-1].split(",")
counter: Dict[str, int] = {}
if len(setting) > 0:
for ability in setting:
ability = ability.upper().strip()
if (ability not in abilities) and (ability not in counter):
error_log.append(f"{ability.strip()} is not a recognised ability, or is not included in this "
f"calculator.")
if ability in counter:
counter[ability] += 1
if counter[ability] == 2:
error_log.append(f"{(ability.strip())} is referenced 2 or more times within array. Ensure "
f"it is only referenced once.")
else:
counter[ability] = 1
else:
error_log.append("No abilities were added")
else:
error_log.append("Abilities must be surrounded by square brackets [], and separated by comma's (,).")
setting: str = configurations[6]
if setting[0] == "(" and setting[-1] == ")":
setting = setting[1:-1].split(",")
if setting[0].upper() not in ("MAGIC", "RANGED", "MELEE"):
error_log.append("First style option must be 'magic', 'ranged' or 'melee' (without quotes).")
if setting[1] not in ("1", "2"):
error_log.append("Second style option must be 1 or 2 (for 1 handed / 2 handed weapon)3")
else:
error_log.append("Style must start and end with round brackets (), with each option separated by a single "
"comma (,).")
try:
setting: float = float(configurations[7])
if not (setting > 0):
error_log.append("Time must be a number greater than zero.")
except ValueError:
error_log.append("Time must be a number.")
if configurations[8].upper() not in ("SECONDS", "TICKS"):
error_log.append("Units must be either 'seconds' or 'ticks' (without quotes).")
return error_log
def repair_config_file() -> None:
repair: str = input("Configurations.txt has been modified, perform repair? (Y/N).\n>> ").upper()
if (repair == "Y") or (repair == "YES"):
import os
correct_data: List[str] = ["# Rotation Parameters", "", "Adrenaline: ", "Gain: ", "AttackSpeed: ",
"Bleeds: ", "Stuns: ", "Abilities: [,,,]", "Style: (,)", "Time: ", "", "# Mode",
"", "units: seconds"]
if os.path.exists("Configurations.txt"):
os.remove("Configurations.txt")
with open("Configurations.txt", "w") as settings:
for line in correct_data:
settings.write(line + str("\n"))
input("Repair successful! fill out settings in Configurations.txt before running calculator again. "
"Press enter to exit.\n>> ")
sys.exit()
# --- Gets data for setup --- #
filedata: List[str] = []
configurations: List[str] = []
try:
with open("Configurations.txt", "r") as settings:
for line in settings:
filedata.append(line.split(":")[0])
if ":" in line:
configurations.append(line.split(":")[1].strip())
if compare(filedata) is False:
repair_config_file()
except:
repair_config_file()
error_log = validate(configurations)
if len(error_log) > 0:
print("Errors were found!!!\n")
for error in error_log:
print(error)
input("\nCould not complete setup, please change fields accordingly and run the calculator again. "
"Press enter to exit.\n>> ")
sys.exit()
start_adrenaline = int(configurations[0])
gain = int(configurations[1])
attack_speed = configurations[2].upper()
activate_bleeds = configurations[3]
bound: str = configurations[4]
if bound == "False":
debilitating = []
my_abilities = []
for ability in configurations[5][1:-1].split(","):
my_abilities.append(ability.strip().upper())
# --- Different styles of combat tree give varying amounts of adrenaline from auto attacks --- #
style: Tuple[str, str] = tuple(configurations[6][1:-1].split(","))
if style[0] == "MAGIC":
auto_adrenaline = 2
else:
if style[1] != "2":
auto_adrenaline = 2
else:
auto_adrenaline = 3
cycle_duration = float(configurations[7])
units: str = configurations[8]
if units == "ticks":
cycle_duration *= 0.6
def main() -> None:
global basic_ability_list, threshold_ability_list, ultimate_ability_list, track_cooldown, track_buff, ability_path, ability_ready
# Converts raw seconds into Years, Weeks, etc...
def get_time(seconds: int) -> str:
years: int = int(seconds / 31449600)
seconds -= years * 31449600
weeks: int = int(seconds / 604800)
seconds -= weeks * 604800
days: int = int(seconds / 86400)
seconds -= days * 86400
hours: int = int(seconds / 3600)
seconds -= hours * 3600
minutes: int = int(seconds / 60)
seconds -= minutes * 60
eta: str = f"{years} years, {weeks} weeks, {days} days, {hours} hours, {minutes} minutes and {seconds} seconds."
return eta
# Removes abilities from lists and dictionaries not being used to save runtime and memory
def remove() -> Dict[str, bool]:
for ability in abilities:
if not (ability in my_abilities):
del ability_damage[ability]
del ability_cooldown[ability]
del ability_type[ability]
del ability_time[ability]
del ability_ready[ability]
if ability in walking_bleeds:
del walking_bleeds[ability]
if ability in bleeds:
del bleeds[ability]
if ability in buff_time:
del buff_time[ability]
if ability in buff_effect:
del buff_effect[ability]
if ability in punishing:
punishing.remove(ability)
if ability in debilitating:
debilitating.remove(ability)
if ability in crit_boost:
crit_boost.remove(ability)
if ability in special_bleeds:
special_bleeds.remove(ability)
if ability in special_abilities:
special_abilities.remove(ability)
if ability in aoe:
aoe.remove(ability)
return dict(ability_ready)
setup_config()
# --- Dictionaries, lists and other data types laid out here --- #
print("Starting process ...")
copy_of_ready = remove()
basic_ability_list = [a for a in ability_type if ability_type[a] == "B"]
threshold_ability_list = [a for a in ability_type if ability_type[a] == "T"]
ultimate_ability_list = [a for a in ability_type if ability_type[a] == "U"]
track_cooldown = {}
track_buff = {}
ability_path = []
best_rotation: List[str] = []
worst_rotation: List[str] = []
# --- Calculations for estimation of time remaining --- #
permutation_count: int = math.factorial(len(my_abilities))
time_remaining_calculation: int = permutation_count / 10000
runthrough: int = 0
# --- Tracking of highest and lowest damaging ability bars --- #
current_highest: float = 0
current_lowest: float = float("inf")
# Define the amount of targets affected by area of effect attacks
aoe_average_targets_hit: float = 2.5
# --- Gets rotation length --- #
while True:
try:
if len(aoe) > 0: # Only ask if AoE abilities are in my_abilities
aoe_average_targets_hit = float(input("How many targets on average will your AoE abilities hit? "))
if aoe_average_targets_hit < 1:
print("Area of effect abilities should hit at least 1 target per use.")
continue
break
except:
print("Invalid Input.")
if aoe_average_targets_hit > 1:
for ability in my_abilities:
if ability in aoe:
ability_damage[ability] = ability_damage[ability] * aoe_average_targets_hit
print("Startup Complete! Warning, the more the abilities, and the higher the cycle time, the more time it will take"
" to process. A better processor will improve this speed.")
choice: str = input("Start Calculations? (Y/N) ").upper()
if (choice != "Y") and (choice != "YES"):
sys.exit()
# --- Calculations start here --- #
start: int = int(time.time()) # Record time since epoch (UTC) (in seconds)
try: # Will keep running until Control C (or other) is pressed to end process
for permutation in itertools.permutations(my_abilities):
damage_dealt: float = ability_rotation(permutation)
# --- Reset data ready for next ability bar to be tested
# and check if any better/worse bars have been found --- #
ability_ready = dict(copy_of_ready)
track_cooldown = {}
track_buff = {}
if round(damage_dealt, 1) > current_highest:
current_highest = round(damage_dealt, 1)
best_rotation = []
best_rotation = list(ability_path)
best_bar: List[str] = list(permutation)
print(f"New best bar with damage {current_highest}: {best_bar}")
if round(damage_dealt, 1) < current_lowest:
current_lowest = round(damage_dealt, 1)
worst_rotation: List[str] = []
worst_rotation: List[str] = list(ability_path)
worst_bar = list(permutation)
ability_path = []
runthrough += 1
# --- Time Remaining estimation calculations every 10,000 bars analysed --- #
if runthrough == 10000:
end_estimation = int(time_remaining_calculation * (time.time() - start))
if runthrough % 10000 == 0:
print(f"\r===== {round(float(runthrough / permutation_count) * 100, 3)}"
f"% ===== Estimated time remaining: {get_time(int(end_estimation - (time.time() - start)))}"
f"; Best found: {current_highest}%" + (" " * 22), end="")
time_remaining_calculation -= 1
end_estimation = int(time_remaining_calculation * (time.time() - start))
start = time.time()
except KeyboardInterrupt:
print("\nProcess terminated!")
# --- Display results --- #
print(f"\n\nHighest ability damage: {current_highest}%")
print(f"Best ability bar found: {best_bar}")
print(f"{best_rotation}\n")
print(f"Lowest ability damage: {current_lowest}%")
print(f"Worst ability bar found: {worst_bar}")
print(worst_rotation)
input("\nPress enter to exit\n")
# Execute main() function
if __name__ == "__main__":
main()
| 58.304217 | 134 | 0.529628 |
bc7c508a95829ee47a8c3dec47d98edac852f921 | 394 | py | Python | apps/cmsauth/urls.py | ForsetiRe/seven_cms | e7ffaf6ca9de5f5237f3db0bbce3d11904cf8f93 | [
"Apache-2.0"
] | null | null | null | apps/cmsauth/urls.py | ForsetiRe/seven_cms | e7ffaf6ca9de5f5237f3db0bbce3d11904cf8f93 | [
"Apache-2.0"
] | null | null | null | apps/cmsauth/urls.py | ForsetiRe/seven_cms | e7ffaf6ca9de5f5237f3db0bbce3d11904cf8f93 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
app_name = 'cmsauth'
urlpatterns = [
path('login/', views.login_view, name='login'),
path('logout/', views.logout_view, name='logout'),
path('image_captcha/', views.image_captcha, name='image_captcha'),
path('sms_captcha/', views.sms_captcha, name='sms_captcha'),
path('register/', views.register_view, name='register'),
]
| 30.307692 | 70 | 0.695431 |
6dd9a9eb9525840a5b4c673c07ec8f73f4ace8e0 | 14,250 | py | Python | qboost.py | frtibble/qboost | b7c68f2f001161694d797b768b9aaaaa1bb3dd47 | [
"Apache-2.0"
] | null | null | null | qboost.py | frtibble/qboost | b7c68f2f001161694d797b768b9aaaaa1bb3dd47 | [
"Apache-2.0"
] | null | null | null | qboost.py | frtibble/qboost | b7c68f2f001161694d797b768b9aaaaa1bb3dd47 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 D-Wave Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http: // www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from azure.quantum.optimization import Problem, Term, ProblemType, SimulatedAnnealing
import dimod
from dwave.system import LeapHybridSampler
from tabulate import tabulate
class DecisionStumpClassifier:
"""Decision tree classifier that operates on a single feature with a single splitting rule.
The index of the feature used in the decision rule is stored
relative to the original data frame.
"""
def __init__(self, X, y, feature_index):
"""Initialize and fit the classifier.
Args:
X (array):
2D array of feature vectors. Note that the array
contains all features, while the weak classifier
itself uses only a single feature.
y (array):
1D array of class labels, as ints. Labels should be
+/- 1.
feature_index (int):
Index for the feature used by the weak classifier,
relative to the overall data frame.
"""
self.i = feature_index
self.clf = DecisionTreeClassifier(max_depth=1)
self.clf.fit(X[:, [feature_index]], y)
def predict(self, X):
"""Predict class.
Args:
X (array):
2D array of feature vectors. Note that the array
contains all features, while the weak classifier
itself will make a prediction based only a single
feature.
Returns:
Array of class labels.
"""
return self.clf.predict(X[:, [self.i]])
def _build_H(classifiers, X, output_scale):
"""Construct matrix of weak classifier predictions on given set of input vectors."""
H = np.array([clf.predict(X) for clf in classifiers], dtype=float).T
# Rescale H
H *= output_scale
return H
class EnsembleClassifier:
"""Ensemble of weak classifiers."""
def __init__(self, weak_classifiers, weights, weak_classifier_scaling, offset=1e-9):
"""Initialize ensemble from list of weak classifiers and weights.
Args:
weak_classifiers (list):
List of classifier instances.
weights (array):
Weights associated with the weak classifiers.
weak_classifier_scaling (float):
Scaling for weak classifier outputs.
offset (float):
Offset value for ensemble classifier. The default
value is a small positive number used to prevent
ambiguous 0 predictions when weak classifiers exactly
balance each other out.
"""
self.classifiers = weak_classifiers
self.w = weights
self.weak_clf_scale = weak_classifier_scaling
self.offset = offset
def predict(self, X):
"""Compute ensemble prediction.
Note that this function returns the numerical value of the
ensemble predictor, not the class label. The predicted class
is sign(predict()).
"""
H = _build_H(self.classifiers, X, self.weak_clf_scale)
# If we've already filtered out those with w=0 and we are only
# using binary weights, this is just a sum
preds = np.dot(H, self.w)
return preds - self.offset
def predict_class(self, X):
"""Compute ensemble prediction of class label."""
preds = self.predict(X)
# Add a small perturbation to any predictions that are exactly
# 0, because these will not count towards either class when
# passed through the sign function. Such zero predictions can
# happen when the weak classifiers exactly balance each other
# out.
preds[preds == 0] = 1e-9
return np.sign(preds)
def score(self, X, y):
"""Compute accuracy score on given data."""
if sum(self.w) == 0:
# Avoid difficulties that occur with handling this below
return 0.0
return accuracy_score(y, self.predict_class(X))
def squared_error(self, X, y):
"""Compute squared error between predicted and true labels.
Provided for testing purposes.
"""
p = self.predict(X)
return sum((p - y)**2)
def fit_offset(self, X):
"""Fit offset value based on class-balanced feature vectors.
Currently, this assumes that the feature vectors in X
correspond to an even split between both classes.
"""
self.offset = 0.0
# Todo: review whether it would be appropriate to subtract
# mean(y) here to account for unbalanced classes.
self.offset = np.mean(self.predict(X))
def get_selected_features(self):
"""Return list of features corresponding to the selected weak classifiers."""
return [clf.i for clf, w in zip(self.classifiers, self.w) if w > 0]
class AllStumpsClassifier(EnsembleClassifier):
"""Ensemble classifier with one decision stump for each feature."""
def __init__(self, X, y):
if not all(np.isin(y, [-1, 1])):
raise ValueError("Class labels should be +/- 1")
num_featuers = np.size(X, 1)
classifiers = [DecisionStumpClassifier(
X, y, i) for i in range(num_featuers)]
# Note: the weak classifier output scaling is arbitrary in
# this case and does not affect the predictions.
super().__init__(classifiers, np.ones(num_featuers), 1/num_featuers)
self.fit_offset(X)
def _build_bqm(H, y, lam):
"""Build BQM.
Args:
H (array):
2D array of weak classifier predictions. Each row is a
sample point, each column is a classifier.
y (array):
Outputs
lam (float):
Coefficient that controls strength of regularization term
(larger values encourage decreased model complexity).
"""
n_samples = np.size(H, 0)
n_classifiers = np.size(H, 1)
# samples_factor is a factor that appears in front of the squared
# loss term in the objective. In theory, it does not affect the
# problem solution, but it does affect the relative weighting of
# the loss and regularization terms, which is otherwise absorbed
# into the lambda parameter.
# Using an average seems to be more intuitive, otherwise, lambda
# is sample-size dependent.
samples_factor = 1.0 / n_samples
bqm = dimod.BQM('BINARY')
bqm.offset = samples_factor * n_samples
for i in range(n_classifiers):
# Note: the last term with h_i^2 is part of the first term in
# Eq. (12) of Neven et al. (2008), where i=j.
bqm.add_variable(i, lam - 2.0 * samples_factor *
np.dot(H[:, i], y) + samples_factor * np.dot(H[:, i], H[:, i]))
for i in range(n_classifiers):
for j in range(i+1, n_classifiers):
# Relative to Eq. (12) from Neven et al. (2008), the
# factor of 2 appears here because each term appears twice
# in a sum over all i,j.
bqm.add_interaction(
i, j, 2.0 * samples_factor * np.dot(H[:, i], H[:, j]))
return bqm
def from_bqm(bqm):
terms = []
# Create a dictionary of variable names to indices
index_mappings = dict()
for variable_name in bqm.variables:
index_mappings[variable_name] = len(index_mappings)
# Add constant value
terms += [Term(c=bqm.offset, indices=[])]
# Add linear terms
for variable in bqm.linear:
index = index_mappings[variable]
value = bqm.linear[variable]
terms += [Term(c=value, indices=[index])]
# Add quadratic terms
for (var1, var2) in bqm.quadratic:
id1 = index_mappings[var1]
id2 = index_mappings[var2]
value = bqm.quadratic[(var1, var2)]
terms += [Term(c=value, indices=[id1, id2])]
if bqm.vartype == "SPIN":
return Problem(name="bqm", terms=terms, problem_type=ProblemType.ising)
else:
return Problem(name="bqm", terms=terms, problem_type=ProblemType.pubo)
def update_variables(config, bqm):
# Create a dictionary of variable names to indices
bqm_mappings = dict()
for variable_name in bqm.variables:
bqm_mappings[str(len(bqm_mappings))] = variable_name
sample = dict()
for c in config:
sample[bqm_mappings[c]] = config[c]
return sample
def _minimize_squared_loss_binary_azure(H, y, lam, workspace):
"""Minimize squared loss using binary weight variables."""
bqm = _build_bqm(H, y, lam)
# Convert the BQM and create an Azure Quantum Problem
problem = from_bqm(bqm)
# Select an Azure Quantum solver
solver = SimulatedAnnealing(workspace, timeout=100)
# Submit the problem
result = solver.optimize(problem)
config = result["configuration"]
sample = update_variables(config, bqm)
weights = np.array(list(sample.values()))
energy = result["cost"]
return weights, energy
def _minimize_squared_loss_binary(H, y, lam):
"""Minimize squared loss using binary weight variables."""
bqm = _build_bqm(H, y, lam)
sampler = LeapHybridSampler()
results = sampler.sample(bqm, label='Example - QBoost')
weights = np.array(list(results.first.sample.values()))
energy = results.first.energy
return weights, energy
class QBoostClassifier(EnsembleClassifier):
"""Construct an ensemble classifier using quadratic loss minimization.
"""
def __init__(self, workspace, X, y, lam, weak_clf_scale=None, drop_unused=True):
"""Initialize and fit QBoost classifier.
X should already include all candidate features (e.g., interactions).
Args:
X (array):
2D array of feature vectors.
y (array):
1D array of class labels (+/- 1).
lam (float):
regularization parameter.
weak_clf_scale (float or None):
scale factor to apply to weak classifier outputs. If
None, scale by 1/num_classifiers.
drop_unused (bool):
if True, only retain the nonzero weighted classifiers.
"""
if not all(np.isin(y, [-1, 1])):
raise ValueError("Class labels should be +/- 1")
num_features = np.size(X, 1)
if weak_clf_scale is None:
weak_clf_scale = 1 / num_features
wclf_candidates = [DecisionStumpClassifier(
X, y, i) for i in range(num_features)]
H = _build_H(wclf_candidates, X, weak_clf_scale)
# For reference, store individual weak classifier scores.
# Note: we don't check equality h==y here because H might be rescaled.
self.weak_scores = np.array([np.mean(np.sign(h) * y > 0) for h in H.T])
weights, self.energy = _minimize_squared_loss_binary_azure(H, y, lam, workspace)
# Store only the selected classifiers
if drop_unused:
weak_classifiers = [wclf for wclf, w in zip(
wclf_candidates, weights) if w > 0]
weights = weights[weights > 0]
else:
weak_classifiers = wclf_candidates
super().__init__(weak_classifiers, weights, weak_clf_scale)
self.fit_offset(X)
# Save candidates so we can provide a baseline accuracy report.
self._wclf_candidates = wclf_candidates
def report_baseline(self, X, y):
"""Report accuracy of weak classifiers.
This provides context for interpreting the performance of the boosted
classifier.
"""
scores = np.array([accuracy_score(y, clf.predict(X))
for clf in self._wclf_candidates])
data = [[len(scores), scores.min(), scores.mean(), scores.max(), scores.std()]]
headers = ['count', 'min', 'mean', 'max', 'std']
print('Accuracy of weak classifiers (score on test set):')
print(tabulate(data, headers=headers, floatfmt='.3f'))
def qboost_lambda_sweep(X, y, lambda_vals, val_fraction=0.4, verbose=False, **kwargs):
"""Run QBoost using a series of lambda values and check accuracy against a validation set.
Args:
X (array):
2D array of feature vectors.
y (array):
1D array of class labels (+/- 1).
lambda_vals (array):
Array of values for regularization parameter, lambda.
val_fraction (float):
Fraction of given data to set aside for validation.
verbose (bool):
Print out diagnostic information to screen.
kwargs:
Passed to QBoost.__init__.
Returns:
QBoostClassifier:
QBoost instance with best validation score.
lambda:
Lambda value corresponding to the best validation score.
"""
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=val_fraction)
best_score = -1
best_lambda = None
best_clf = None
if verbose:
print('{:7} {} {}:'.format('lambda', 'n_features', 'score'))
for lam in lambda_vals:
qb = QBoostClassifier(X_train, y_train, lam, **kwargs)
score = qb.score(X_val, y_val)
if verbose:
print('{:<7.4f} {:<10} {:<6.3f}'.format(
lam, len(qb.get_selected_features()), score))
if score > best_score:
best_score = score
best_clf = qb
best_lambda = lam
return best_clf, lam
| 34.009547 | 95 | 0.625263 |
3fe9e5f8ca33ac6eace3b0eec44a09ed4d22bd89 | 1,604 | py | Python | 2015/python/src/day3.py | johncoleman83/aoc-solutions | 0c8c09a9fc94af0722e028a24e4795bba8f952ce | [
"MIT"
] | 1 | 2018-12-06T06:15:59.000Z | 2018-12-06T06:15:59.000Z | 2015/python/src/day3.py | johncoleman83/aoc-solutions | 0c8c09a9fc94af0722e028a24e4795bba8f952ce | [
"MIT"
] | null | null | null | 2015/python/src/day3.py | johncoleman83/aoc-solutions | 0c8c09a9fc94af0722e028a24e4795bba8f952ce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Advent of Code 2015: Day #
"""
import os
from shared.readdayinput import readdayinput
def change_coords(h, coords):
"""
updates coords of home based on directions from elf
"""
if h == '^':
coords[1] += 1
elif h == '>':
coords[0] += 1
elif h == 'v':
coords[1] -= 1
elif h == '<':
coords[0] -= 1
return coords
def first_half(dayinput):
"""
first half solver:
"""
houses = { (0,0): 1 }
coords = [0, 0]
for h in dayinput:
coords = change_coords(h, coords)
home = (coords[0], coords[1])
if houses.get(home, None):
houses[home] += 1
else:
houses[home] = 1
return len(houses)
def second_half(dayinput):
"""
second half solver:
"""
houses = { (0,0): 1 }
santa_coords = [0, 0]
robo_coords = [0, 0]
santa = True
for h in dayinput:
if santa:
santa_coords = change_coords(h, santa_coords)
coords = santa_coords
else:
robo_coords = change_coords(h, robo_coords)
coords = robo_coords
home = (coords[0], coords[1])
if houses.get(home, None) is not None:
houses[home] += 1
else:
houses[home] = 1
santa = not santa
return len(houses)
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
half_two = second_half(dayinput)
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
| 21.675676 | 57 | 0.527431 |
fafafb08033f018d9b03660841c03fde43f398ed | 2,049 | py | Python | canoser/map_t.py | yuan-xy/canoser-python | 9edf61b7230781a8b9e450a037608e5a375475a6 | [
"MIT"
] | 12 | 2019-09-21T12:36:41.000Z | 2020-10-17T19:24:15.000Z | canoser/map_t.py | yuan-xy/canoser-python | 9edf61b7230781a8b9e450a037608e5a375475a6 | [
"MIT"
] | 2 | 2019-09-26T09:13:34.000Z | 2020-03-31T03:28:42.000Z | canoser/map_t.py | yuan-xy/canoser-python | 9edf61b7230781a8b9e450a037608e5a375475a6 | [
"MIT"
] | 1 | 2019-09-24T00:03:36.000Z | 2019-09-24T00:03:36.000Z | from canoser.base import Base
from canoser.int_type import Uint32
class MapT(Base):
def __init__(self, ktype, vtype):
self.ktype = ktype
self.vtype = vtype
def encode(self, kvs):
output = b""
output += Uint32.serialize_uint32_as_uleb128(len(kvs))
odict = {}
for k, v in kvs.items():
odict[self.ktype.encode(k)] = self.vtype.encode(v)
for name in sorted(odict.keys()):
output += name
output += odict[name]
return output
def decode(self, cursor):
kvs = {}
size = Uint32.parse_uint32_from_uleb128(cursor)
for _ in range(size):
k = self.ktype.decode(cursor)
v = self.vtype.decode(cursor)
if isinstance(k, list) and isinstance(k[0], int):
# python doesn't support list as key in dict, so we change list to bytes
kvs[bytes(k)] = v
else:
kvs[k] = v
# TODO: check the key order of kvs, because lcs has order when serialize map.
return kvs
def check_value(self, kvs):
if not isinstance(kvs, dict):
raise TypeError(f"{kvs} is not a dict.")
for k, v in kvs.items():
if isinstance(self.ktype, list) or \
(hasattr(self.ktype, 'delegate_type') and isinstance(self.ktype.delegate_type, list)):
from canoser.types import BytesT
BytesT().check_value(k)
else:
self.ktype.check_value(k)
self.vtype.check_value(v)
def __eq__(self, other):
if not isinstance(other, MapT):
return False
return self.ktype == other.ktype and self.vtype == other.vtype
def to_json_serializable(cls, obj):
amap = {}
for k, v in obj.items():
kk = cls.ktype.to_json_serializable(k)
vv = cls.vtype.to_json_serializable(v)
amap[kk] = vv
return amap
| 34.15 | 107 | 0.540752 |
88261be9c5bf31bd6b6daf414e77b8f525133dbe | 6,965 | py | Python | main_demo_GenLaneNet_ext.py | xiaoMrzhang/Pytorch_Generalized_3D_Lane_Detection | 8fc3efe08698fa29a823e36139d62b3155c35997 | [
"Apache-2.0"
] | 186 | 2020-04-14T18:51:50.000Z | 2022-03-31T09:33:45.000Z | main_demo_GenLaneNet_ext.py | koughua/Pytorch_Generalized_3D_Lane_Detection | 5bb190ed9f0dc211cd6265ab30c151d3699d3ed4 | [
"Apache-2.0"
] | 15 | 2020-04-27T08:49:22.000Z | 2022-03-13T14:21:37.000Z | main_demo_GenLaneNet_ext.py | koughua/Pytorch_Generalized_3D_Lane_Detection | 5bb190ed9f0dc211cd6265ab30c151d3699d3ed4 | [
"Apache-2.0"
] | 57 | 2020-04-14T18:52:38.000Z | 2022-03-24T02:46:14.000Z | """
A demo for Gen-LaneNet with new anchor extension. It predicts 3D lanes from a single image.
Author: Yuliang Guo (33yuliangguo@gmail.com)
Date: March, 2020
"""
import numpy as np
import torch
import torch.optim
import glob
from tqdm import tqdm
from dataloader.Load_Data_3DLane_ext import *
from networks import GeoNet3D_ext, erfnet
from tools.utils import *
from tools.visualize_pred import lane_visualizer
def unormalize_lane_anchor(anchor, num_y_steps, anchor_dim, x_off_std, z_std, num_types=3):
for i in range(num_types):
anchor[:, i*anchor_dim:i*anchor_dim + num_y_steps] = \
np.multiply(anchor[:, i*anchor_dim: i*anchor_dim + num_y_steps], x_off_std)
anchor[:, i*anchor_dim + num_y_steps: i*anchor_dim + 2*num_y_steps] = \
np.multiply(anchor[:, i*anchor_dim + num_y_steps: i*anchor_dim + 2*num_y_steps], z_std)
def load_my_state_dict(model, state_dict): # custom function to load model when not all dict elements
own_state = model.state_dict()
ckpt_name = []
cnt = 0
for name, param in state_dict.items():
if name[7:] not in list(own_state.keys()) or 'output_conv' in name:
ckpt_name.append(name)
# continue
own_state[name[7:]].copy_(param)
cnt += 1
print('#reused param: {}'.format(cnt))
return model
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = define_args()
args = parser.parse_args()
# manual settings
image_file = './example/0000101.jpg'
cam_file = './example/0000101_cam.json'
args.mod = 'Gen_LaneNet_ext' # model name
pretrained_feat_model = 'pretrained/erfnet_model_sim3d.tar'
trained_geo_model = 'pretrained/gen_lanenet_geo_model.tar'
anchor_std_file = 'pretrained/geo_anchor_std.json'
# load configuration for the model
sim3d_config(args)
args.y_ref = 5
args.batch_size = 1
anchor_y_steps = args.anchor_y_steps
num_y_steps = len(anchor_y_steps)
anchor_dim = 3 * num_y_steps + 1
x_min = args.top_view_region[0, 0]
x_max = args.top_view_region[1, 0]
anchor_x_steps = np.linspace(x_min, x_max, np.int(args.ipm_w / 8), endpoint=True)
# Check GPU availability
if not args.no_cuda and not torch.cuda.is_available():
raise Exception("No gpu available for usage")
torch.backends.cudnn.benchmark = args.cudnn
# Define network
model_seg = erfnet.ERFNet(2) # 2-class model
model_geo = GeoNet3D_ext.Net(args)
define_init_weights(model_geo, args.weight_init)
if not args.no_cuda:
# Load model on gpu before passing params to optimizer
model_seg = model_seg.cuda()
model_geo = model_geo.cuda()
# load segmentation model
checkpoint = torch.load(pretrained_feat_model)
model_seg = load_my_state_dict(model_seg, checkpoint['state_dict'])
model_seg.eval() # do not back propagate to model1
# load geometry model
if os.path.isfile(trained_geo_model):
print("=> loading checkpoint '{}'".format(trained_geo_model))
checkpoint = torch.load(trained_geo_model)
model_geo.load_state_dict(checkpoint['state_dict'])
model_geo.eval()
else:
print("=> no checkpoint found at '{}'".format(trained_geo_model))
# load anchor std saved from training
with open(anchor_std_file) as f:
anchor_std = json.load(f)
x_off_std = np.array(anchor_std['x_off_std'])
z_std = np.array(anchor_std['z_std'])
# load image
with open(image_file, 'rb') as f:
image = (Image.open(f).convert('RGB'))
# image preprocess
w, h = image.size
image = F.crop(image, args.crop_y, 0, args.org_h - args.crop_y, w)
image = F.resize(image, size=(args.resize_h, args.resize_w), interpolation=Image.BILINEAR)
image = transforms.ToTensor()(image).float()
image = transforms.Normalize(args.vgg_mean, args.vgg_std)(image)
image.unsqueeze_(0)
image = torch.cat(list(torch.split(image, 1, dim=0)) * args.batch_size)
if not args.no_cuda:
image = image.cuda()
# image = image.contiguous()
# image = torch.autograd.Variable(image)
# update camera setting os the model
with open(cam_file) as f:
cam_params = json.load(f)
gt_pitch = torch.tensor([cam_params['cameraPitch']], dtype=torch.float32)
gt_hcam = torch.tensor([cam_params['cameraHeight']], dtype=torch.float32)
model_geo.update_projection(args, gt_hcam, gt_pitch)
with torch.no_grad():
# deploy model
try:
output_seg = model_seg(image, no_lane_exist=True)
# output1 = F.softmax(output1, dim=1)
output_seg = output_seg.softmax(dim=1)
output_seg = output_seg / torch.max(torch.max(output_seg, dim=2, keepdim=True)[0], dim=3, keepdim=True)[0]
output_seg = output_seg[:, 1:, :, :]
output_geo, pred_hcam, pred_pitch = model_geo(output_seg)
except RuntimeError as e:
print(e)
output_geo = output_geo[0].data.cpu().numpy()
# unormalize lane outputs
unormalize_lane_anchor(output_geo, num_y_steps, anchor_dim, x_off_std, z_std, num_types=3)
# compute 3D lanes from network output, geometric transformation is involved
lanelines_pred, centerlines_pred, lanelines_prob, centerlines_prob = \
compute_3d_lanes_all_prob(output_geo, anchor_dim, anchor_x_steps, anchor_y_steps, cam_params['cameraHeight'])
# visualize predicted lanes
# args.top_view_region = np.array([[-10, 80], [10, 80], [-10, 3], [10, 3]])
vs = lane_visualizer(args)
vs.dataset_dir = './'
fig = plt.figure()
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233, projection='3d')
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236, projection='3d')
# draw lanes
vs.visualize_lanes(lanelines_pred, image_file, cam_params['cameraHeight'], cam_params['cameraPitch'], ax1, ax2, ax3)
vs.visualize_lanes(centerlines_pred, image_file, cam_params['cameraHeight'], cam_params['cameraPitch'], ax4, ax5, ax6)
ax1.set_xticks([])
ax1.set_yticks([])
ax2.set_xticks([])
ax2.set_yticks([])
bottom, top = ax3.get_zlim()
left, right = ax3.get_xlim()
ax3.set_zlim(min(bottom, -0.1), max(top, 0.1))
ax3.set_xlim(left, right)
ax3.set_ylim(0, 80)
ax3.locator_params(nbins=5, axis='x')
ax3.locator_params(nbins=5, axis='z')
ax3.tick_params(pad=18)
ax4.set_xticks([])
ax4.set_yticks([])
ax5.set_xticks([])
ax5.set_yticks([])
bottom, top = ax6.get_zlim()
left, right = ax6.get_xlim()
ax6.set_zlim(min(bottom, -0.1), max(top, 0.1))
ax6.set_xlim(left, right)
ax6.set_ylim(0, 80)
ax6.locator_params(nbins=5, axis='x')
ax6.locator_params(nbins=5, axis='z')
ax6.tick_params(pad=18)
fig.subplots_adjust(wspace=0, hspace=0.01)
fig.savefig('test.png')
plt.close(fig)
| 35.902062 | 122 | 0.674659 |
ba9ecb008a28cf01b15ce849b29692af96efa7ee | 4,045 | py | Python | uncle_archie.py | dcppc/uncle-archie | 10fb2f7181042016267ebd202ce973f237759f89 | [
"Apache-2.0"
] | 1 | 2018-09-14T17:52:08.000Z | 2018-09-14T17:52:08.000Z | uncle_archie.py | dcppc/uncle-archie | 10fb2f7181042016267ebd202ce973f237759f89 | [
"Apache-2.0"
] | 36 | 2018-08-18T13:43:59.000Z | 2018-12-07T18:50:51.000Z | uncle_archie.py | dcppc/uncle-archie | 10fb2f7181042016267ebd202ce973f237759f89 | [
"Apache-2.0"
] | 2 | 2020-11-04T06:18:49.000Z | 2020-11-06T11:01:17.000Z | import os
import logging
import subprocess
from tempfile import mkstemp
from ipaddress import ip_address, ip_network
import hmac
from hashlib import sha1
from os import access, remove, fdopen
import requests
import json
from flask import Flask, request, abort
from process_payload import process_payload
"""
Uncle Archie - Flask Server
This is a webhook flask server that serves as a frontend
for Uncle Archie, our home-brewed continuous integration
server.
"""
app = Flask(__name__)
count = 0
subprocess.call(['mkdir','-p','/tmp/archie'])
logging.basicConfig(filename='/tmp/archie/uncle_archie.log',
filemode='a',
level=logging.INFO)
@app.route('/webhook', methods=['GET', 'POST'])
def index():
"""
Main WSGI application entry.
"""
print("Uncle Archie got a visitor!")
path = os.path.dirname(os.path.abspath(__file__))
# -----
# Implement a nice hello world landing page
if request.method != 'POST':
# We really need to make a Jinja template instead.
return('<h2>Hello World! This is Uncle Archie, your local home-brewed CI server.</h2>')
# -----
# Load config
try:
pth = os.path.join(path, 'config.json')
with open(pth, 'r') as cfg:
config = json.loads(cfg.read())
except FileNotFoundError:
logging.error("ERROR: No config file found at %s"%(pth))
abort(501)
# -----
# Implement ping/pong
event = request.headers.get('X-GitHub-Event', 'ping')
if event == 'ping':
return json.dumps({'msg': 'pong'})
# Gather data
try:
payload = request.get_json()
except Exception:
logging.warning('Request parsing failed')
abort(400)
# Enforce secret
secret = config.get('enforce_secret', '')
if secret:
# Only SHA1 is supported
header_signature = request.headers.get('X-Hub-Signature')
if header_signature is None:
abort(403)
sha_name, signature = header_signature.split('=')
if sha_name != 'sha1':
abort(501)
# HMAC requires the key to be bytes, but data is string
mac = hmac.new(str.encode(secret), msg=request.data, digestmod='sha1')
if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):
logging.error(' XXXXXXXX A webhook with an invalid secret was received.')
abort(403)
# -----
# Determining the branch is tricky, as it only appears for certain event
# types an at different levels
branch = ''
try:
# Case 1: a ref_type indicates the type of ref.
# This true for create and delete events.
if 'ref_type' in payload:
if payload['ref_type'] == 'branch':
branch = payload['ref']
# Case 2: a pull_request object is involved. This is pull_request and
# pull_request_review_comment events.
elif 'pull_request' in payload:
# This is the TARGET branch for the pull-request, not the source
# branch
branch = payload['pull_request']['base']['ref']
elif event in ['push']:
# Push events provide a full Git ref in 'ref' and not a 'ref_type'.
branch = payload['ref'].split('/', 2)[2]
except KeyError:
# If the payload structure isn't what we expect,
# we'll live without the branch name
pass
# All current events have a repository, but some legacy events do not,
# so let's be safe
name = payload['repository']['name'] if 'repository' in payload else None
meta = {
'name': name,
'branch': branch,
'event': event
}
##############################
# Here, we pass off the hook info
# to user-defined python functions
process_payload(payload,meta,config)
# And done.
##############################
# Clean up
return json.dumps({'status':'done'})
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5005)
| 25.764331 | 95 | 0.603708 |
255b8af4b59ebeab2861f0bc16a5d8fc5e080948 | 4,596 | py | Python | test/functional/wallet_importprunedfunds.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | test/functional/wallet_importprunedfunds.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | test/functional/wallet_importprunedfunds.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Coinbit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from test_framework.test_framework import CoinbitTestFramework
from test_framework.util import *
class ImportPrunedFundsTest(CoinbitTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(101)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
#Check only one address
address_info = self.nodes[0].validateaddress(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),101)
#Address Test - before import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
#Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
#Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance("", 0, True)
assert_equal(balance1, Decimal(0))
#Import with affiliated address with no rescan
self.nodes[1].importaddress(address2, "add2", False)
self.nodes[1].importprunedfunds(rawtxn2, proof2)
balance2 = self.nodes[1].getbalance("add2", 0, True)
assert_equal(balance2, Decimal('0.05'))
#Import with private key with no rescan
self.nodes[1].importprivkey(privkey=address3_privkey, label="add3", rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
balance3 = self.nodes[1].getbalance("add3", 0, False)
assert_equal(balance3, Decimal('0.025'))
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.075'))
#Addresses Test - after import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
#Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
balance1 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance1, Decimal('0.075'))
self.nodes[1].removeprunedfunds(txnid2)
balance2 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance2, Decimal('0.025'))
self.nodes[1].removeprunedfunds(txnid3)
balance3 = self.nodes[1].getbalance("*", 0, True)
assert_equal(balance3, Decimal('0.0'))
if __name__ == '__main__':
ImportPrunedFundsTest().main()
| 39.965217 | 117 | 0.659487 |
0cde3fa4787b04deb601b6464d3e8e1f37eb7cec | 495 | py | Python | testing_suite/test_dataloader.py | ReciprocalSpace/caracnl | 23891414ae4e808b5eeee6c2c72f7a7459324344 | [
"MIT"
] | null | null | null | testing_suite/test_dataloader.py | ReciprocalSpace/caracnl | 23891414ae4e808b5eeee6c2c72f7a7459324344 | [
"MIT"
] | null | null | null | testing_suite/test_dataloader.py | ReciprocalSpace/caracnl | 23891414ae4e808b5eeee6c2c72f7a7459324344 | [
"MIT"
] | null | null | null | import caracnl
import numpy as np
class TestingSuite:
def test_read_file_without_amp(self):
directory = ("C:\\Users\\utric\\Documents\\BioMaps\\4. Analyse VNA\\DATA\\"
"2018-III-Temperature_vs_puissance-80K\\Span=1MHz")
omega, S11, P_VNA = caracnl.dataloader.get_s11_data_from_file(directory)
caracnl.display_s11(P_VNA,omega, S11)
if __name__ == "__main__":
testing_suite = TestingSuite()
testing_suite.test_read_file_without_amp()
| 27.5 | 83 | 0.69697 |
f106312755e73172212aa071678ae7dbc4e340f9 | 196 | py | Python | homeworks/alexander_sidorov/lesson09/level02.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | homeworks/alexander_sidorov/lesson09/level02.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | 8 | 2019-11-15T18:15:56.000Z | 2020-02-03T18:05:05.000Z | homeworks/alexander_sidorov/lesson09/level02.py | tgrx/Z22 | b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff | [
"Apache-2.0"
] | null | null | null | def rotate_left(lst, revs):
result = lst[:]
if len(result) < 2:
return result
for _ in range(revs):
result.append(result[0])
result.pop(0)
return result
| 16.333333 | 32 | 0.561224 |
cbbaa749e033daab74f4ed8916ce2e76e72ae723 | 23,639 | py | Python | librw/krw.py | DBGilles/retrowrite | 0842649de1d2ef767fcf973b3147e904cd7611a9 | [
"MIT"
] | 478 | 2019-06-19T09:33:50.000Z | 2022-03-25T09:34:24.000Z | librw/krw.py | DBGilles/retrowrite | 0842649de1d2ef767fcf973b3147e904cd7611a9 | [
"MIT"
] | 30 | 2019-07-12T09:38:43.000Z | 2022-03-28T04:53:31.000Z | librw/krw.py | DBGilles/retrowrite | 0842649de1d2ef767fcf973b3147e904cd7611a9 | [
"MIT"
] | 62 | 2019-06-25T16:41:04.000Z | 2022-02-22T15:47:35.000Z | import argparse
import json
from collections import defaultdict
from capstone import CS_OP_IMM, CS_GRP_JUMP, CS_GRP_CALL, CS_OP_MEM
from capstone.x86_const import X86_REG_RIP
from elftools.elf.descriptions import describe_reloc_type
from elftools.elf.enums import ENUM_RELOC_TYPE_x64
from elftools.elf.constants import SH_FLAGS
from .kcontainer import Address
class Rewriter():
GCC_FUNCTIONS = [
"_start",
"__libc_start_main",
"__libc_csu_fini",
"__libc_csu_init",
"__lib_csu_fini",
"_init",
"__libc_init_first",
"_fini",
"_rtld_fini",
"_exit",
"__get_pc_think_bx",
"__do_global_dtors_aux",
"__gmon_start",
"frame_dummy",
"__do_global_ctors_aux",
"__register_frame_info",
"deregister_tm_clones",
"register_tm_clones",
"__do_global_dtors_aux",
"__frame_dummy_init_array_entry",
"__init_array_start",
"__do_global_dtors_aux_fini_array_entry",
"__init_array_end",
"__stack_chk_fail",
"__cxa_atexit",
"__cxa_finalize",
]
def __init__(self, container, outfile):
self.container = container
self.outfile = outfile
# Load data sections
for sec, section in self.container.sections.items():
section.load()
# Disassemble all functions
for function in container.iter_functions():
if function.name in Rewriter.GCC_FUNCTIONS:
continue
# print('Disassembling %s' % function.name)
function.disasm()
def symbolize(self):
symb = Symbolizer()
symb.symbolize_data_sections(self.container, None)
symb.symbolize_code_sections(self.container, None)
def dump_cf_info(self, f):
# need: list of successors of each instruction
# registers read by each instruction
# registers written to by each instruction
# schema:
# file contains a dict of functions keyed by function name
# function contains entry point and list of instructions
# instruction contains the address of the instruction, the list of the
# indices of the successors, the list of registers written, and
# the list of registers read
cf_info = defaultdict(dict)
for function in self.container.iter_functions():
instructions_info = []
bbstarts = [str(bbs) for bbs in function.bbstarts]
for instruction_idx, instruction in enumerate(function.cache):
instructions_info.append({
'address': instruction.address.as_dict(),
'successors': function.next_of(instruction_idx),
'regs_written': instruction.reg_writes(),
'regs_read': instruction.reg_reads(),
})
cf_info[function.name] = {
'address': function.address.as_dict(),
'instructions': instructions_info,
'bbstarts': bbstarts,
}
json.dump(cf_info, f)
def dump(self):
results = list()
# Emit rewritten functions
for function in self.container.iter_functions():
if function.name in Rewriter.GCC_FUNCTIONS:
continue
results.append('.section %s,"ax",@progbits' % function.address.section.name)
results.append(".align 16")
results.append("%s" % function)
# Emit rewritten data sections
for sec, section in sorted(
self.container.sections.items(), key=lambda x: x[1].base):
results.append("%s" % (section))
# Write the final output
with open(self.outfile, 'w') as outfd:
outfd.write("\n".join(results + ['']))
class Symbolizer():
RELOCATION_SIZES = {
ENUM_RELOC_TYPE_x64['R_X86_64_64']: 8,
ENUM_RELOC_TYPE_x64['R_X86_64_GOT32']: 4,
ENUM_RELOC_TYPE_x64['R_X86_64_32']: 4,
ENUM_RELOC_TYPE_x64['R_X86_64_32S']: 4,
ENUM_RELOC_TYPE_x64['R_X86_64_16']: 2,
ENUM_RELOC_TYPE_x64['R_X86_64_8']: 2,
ENUM_RELOC_TYPE_x64['R_X86_64_PC64']: 8,
ENUM_RELOC_TYPE_x64['R_X86_64_PC32']: 4,
ENUM_RELOC_TYPE_x64['R_X86_64_PLT32']: 4,
ENUM_RELOC_TYPE_x64['R_X86_64_PC16']: 2,
ENUM_RELOC_TYPE_x64['R_X86_64_PC8']: 1,
ENUM_RELOC_TYPE_x64['R_X86_64_JUMP_SLOT']: 8,
}
def __init__(self):
self.bases = set()
self.pot_sw_bases = defaultdict(set)
self.symbolized_imm = set()
self.symbolized_mem = set()
def apply_mem_op_symbolization(self, instruction, target, relocation=None):
op_mem, _ = instruction.get_mem_access_op()
assert op_mem
if op_mem.segment != 0:
# Segment offsets don't use this form, instead they are like %gs:offset
instruction.op_str = instruction.op_str.replace(
':{}'.format(op_mem.disp),
':{}'.format(target))
elif op_mem.base == 0 and op_mem.index == 0:
# Absolute call ds:offset, or in at&t callq *addr. Yes this is a thing in the kernel
# if instruction.mnemonic == 'movq' and instruction.op_str == '$0, 0(, %rax, 8)':
if relocation:
replacement = '*({} - {})'.format(target,
Symbolizer.RELOCATION_SIZES[relocation['type']])
else:
replacement = '*{}'.format(target)
instruction.op_str = instruction.op_str.replace(
'*{}'.format(op_mem.disp),
replacement
)
else:
instruction.op_str = instruction.op_str.replace('{}('.format(op_mem.disp), str(target) + '(')
self.symbolized_mem.add(instruction.address)
def apply_code_relocation(self, instruction, relocation, container):
# DEBUG
# if instruction.address.section.name == '.text' and instruction.address.offset == 0xbff21:
# import pdb; pdb.set_trace()
if relocation['symbol_address'] is None:
# This relocation refers to an imported symbol
if (relocation['type'] in [
ENUM_RELOC_TYPE_x64['R_X86_64_64'],
ENUM_RELOC_TYPE_x64['R_X86_64_GOT32'],
ENUM_RELOC_TYPE_x64['R_X86_64_32'],
ENUM_RELOC_TYPE_x64['R_X86_64_32S'],
ENUM_RELOC_TYPE_x64['R_X86_64_16'],
ENUM_RELOC_TYPE_x64['R_X86_64_8'],
]):
add = relocation['addend']
elif (relocation['type'] in [
ENUM_RELOC_TYPE_x64['R_X86_64_PC64'],
ENUM_RELOC_TYPE_x64['R_X86_64_PC32'],
ENUM_RELOC_TYPE_x64['R_X86_64_PLT32'],
ENUM_RELOC_TYPE_x64['R_X86_64_PC16'],
ENUM_RELOC_TYPE_x64['R_X86_64_PC8'],
]):
add = relocation['addend'] + \
instruction.address.offset + instruction.sz - relocation['address'].offset
else:
assert False, 'Unknown relocation type'
relocation_target = '{} + {}'.format(relocation['name'], add)
else:
if (relocation['type'] in [
ENUM_RELOC_TYPE_x64['R_X86_64_64'],
ENUM_RELOC_TYPE_x64['R_X86_64_GOT32'],
ENUM_RELOC_TYPE_x64['R_X86_64_32'],
ENUM_RELOC_TYPE_x64['R_X86_64_32S'],
ENUM_RELOC_TYPE_x64['R_X86_64_16'],
ENUM_RELOC_TYPE_x64['R_X86_64_8'],
]):
section_offset = relocation['symbol_address'].offset + relocation['addend']
elif (relocation['type'] in [
ENUM_RELOC_TYPE_x64['R_X86_64_PC64'],
ENUM_RELOC_TYPE_x64['R_X86_64_PC32'],
ENUM_RELOC_TYPE_x64['R_X86_64_PLT32'],
ENUM_RELOC_TYPE_x64['R_X86_64_PC16'],
ENUM_RELOC_TYPE_x64['R_X86_64_PC8'],
]):
section_offset = relocation['symbol_address'].offset + relocation['addend'] + \
instruction.address.offset + instruction.sz - relocation['address'].offset
else:
assert False, 'Unknown relocation type'
symbol_section_name = relocation['symbol_address'].section.name
# The target symbol is in this binary
if symbol_section_name in container.sections:
# The relocation points to a data section, it can point to the middle of something
# e.g. if the compiler uses it in a loop termination condition
closest_non_ignored_offset = container.sections[symbol_section_name].get_closest_non_ignored_offset(section_offset)
relocation_target = '.LC{}{:x} + {}'.format(symbol_section_name,
closest_non_ignored_offset, section_offset - closest_non_ignored_offset)
else:
# The relocation points to code, it should never point to the
# middle of an instruction. If it does, we have a problem
relocation_target = '.LC{}{:x}'.format(symbol_section_name,
section_offset)
rel_offset_inside_instruction = relocation['address'].offset - instruction.address.offset
op_imm, op_imm_idx = instruction.get_imm_op()
op_mem, op_mem_idx = instruction.get_mem_access_op()
is_jmp = CS_GRP_JUMP in instruction.cs.groups
is_call = CS_GRP_CALL in instruction.cs.groups
# We cannot just replace the value of the field with the target (e.g.
# '0' -> .LC.text.0) because what happens if we have movq $0, 0(%rdi)?
# both would be replaced which is wrong
if op_imm is not None and rel_offset_inside_instruction == instruction.cs.imm_offset:
# Relocation writes to immediate
if is_jmp or is_call:
# Direct branch targets are not prefixed with $
instruction.op_str = relocation_target
else:
instruction.op_str = instruction.op_str.replace('${}'.format(op_imm), '$' + relocation_target)
self.symbolized_imm.add(instruction.address)
elif op_mem is not None and rel_offset_inside_instruction == instruction.cs.disp_offset:
# Relocation writes to displacement
self.apply_mem_op_symbolization(instruction, relocation_target, relocation)
else:
assert False, "Relocation doesn't write to disp or imm"
# symbolize_code_sections symbolizes all code and data references located in
# the code sections.
# There are 4 categories of references that need to be symbolized:
# 1 - anything that uses relocations. In x86_64 PIE usermode binaries
# these are used for imports (got entries) and init_array. In kernel
# modules these are used for anything that references a different
# section or a symbol in the main kernel binary or another module.
#
# 2 - Direct calls and jumps. These all use an offset relative to the
# next instruction, and don't use RIP-relative addressing
#
# 3 - RIP-relative data references. There can be no direct data references
# because the executable is position-indepent. Indirect jumps and
# calls can also have data references
#
#
def symbolize_code_sections(self, container, context):
# Symbolize relocations
for section in container.loader.elffile.iter_sections():
# Only look for functions in sections that contain code
if (section['sh_flags'] & SH_FLAGS.SHF_EXECINSTR) == 0:
continue
for rel in container.code_relocations[section.name]:
target_address = rel['address']
fn = container.function_of_address(target_address)
if not fn or fn.name in Rewriter.GCC_FUNCTIONS:
# Relocation doesn't point into a function
continue
inst = fn.instruction_of_address(target_address)
if not inst:
# Relocation doesn't point to an instruction
continue
self.apply_code_relocation(inst, rel, container)
# Symbolize direct branches
self.symbolize_direct_branches(container, context)
# Symbolize memory accesses
self.symbolize_mem_accesses(container, context)
# Symbolize direct branches
def symbolize_direct_branches(self, container, context=None):
for function in container.iter_functions():
offset_to_idx = dict()
for inst_idx, instruction in enumerate(function.cache):
offset_to_idx[instruction.address.offset] = inst_idx
for inst_idx, instruction in enumerate(function.cache):
is_jmp = CS_GRP_JUMP in instruction.cs.groups
is_call = CS_GRP_CALL in instruction.cs.groups
# Ignore everything except jumps and calls
if not (is_jmp or is_call):
if instruction.mnemonic.startswith('ret'):
# This instruction is a return, treat specially
function.nexts[inst_idx].append('ret')
instruction.cf_leaves_fn = True
else:
# This instruction doesn't alter control flow, it only
# has one successor (the next instruction)
function.nexts[inst_idx].append(inst_idx + 1)
continue
instruction.cf_leaves_fn = False
imm_op = instruction.get_imm_op()[0]
# Indirect jump/call
if imm_op is None:
if is_call:
# Indirect call
function.nexts[inst_idx].append('call')
function.nexts[inst_idx].append(inst_idx + 1)
instruction.cf_leaves_fn = True
else:
# Indirect jump
function.nexts[inst_idx].append('undef')
continue
# Ignore targets that were already symbolized with relocations
if instruction.address in self.symbolized_imm:
if is_jmp:
# Direct jump to an external symbol
if not instruction.mnemonic.startswith('jmp'):
# Conditional direct jump
function.nexts[inst_idx].append(inst_idx + 1)
function.nexts[inst_idx].append('undef')
instruction.cf_leaves_fn = True
else:
# Direct call to an external symbol
function.nexts[inst_idx].append('call')
function.nexts[inst_idx].append(inst_idx + 1)
instruction.cf_leaves_fn = True
continue
# Capstone should have already computed the right address
# (in terms of offset from the start of the section)
target = container.adjust_address(Address(instruction.address.section, imm_op))
if is_jmp:
# Direct jump
if not instruction.mnemonic.startswith('jmp'):
# Conditional direct jump, this could also fall through to the next instruction
function.bbstarts.add(Address(instruction.address.section, instruction.address.offset + instruction.sz))
function.nexts[inst_idx].append(inst_idx + 1)
if (target.section.name == function.address.section.name and
function.address.offset <= target.offset < function.address.offset + function.sz):
# The jump stays inside the same function
function.bbstarts.add(target)
function.nexts[inst_idx].append(offset_to_idx[target.offset])
else:
# Direct jump that leaves the function (typically a tail call)
function.nexts[inst_idx].append('undef')
instruction.cf_leaves_fn = True
else:
# Direct call
function.nexts[inst_idx].append('call')
function.nexts[inst_idx].append(inst_idx + 1)
instruction.cf_leaves_fn = True
instruction.op_str = '.LC%s' % str(target)
# Symbolize memory accesses
def symbolize_mem_accesses(self, container, context):
for function in container.iter_functions():
for instruction in function.cache:
mem_access, _ = instruction.get_mem_access_op()
# Ignore instructions that don't access memory
if not mem_access:
continue
# Ignore non-RIP relative references
if mem_access.base != X86_REG_RIP:
continue
if instruction.address in self.symbolized_mem:
continue
target = container.adjust_address(
Address(instruction.address.section,
instruction.address.offset + instruction.sz +
mem_access.disp
)
)
self.apply_mem_op_symbolization(instruction, target)
def label_for_address(self, container, address):
fn = container.function_of_address(address)
# Assume that relocations that point into data sections never point into
# the middle of something
if not fn:
return '.LC%s' % str(address)
insn = fn.instruction_of_address(address)
# Check if the relocation refers to the start of an instruction
if insn.address.offset == address.offset:
return '.LC%s' % str(address)
# The relocation refers to the middle of an instruction
return '.LC%s + %d' % (str(insn.address), address.offset - insn.address.offset)
def apply_data_relocation(self, container, section, relocation):
reloc_type = relocation['type']
if reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_COPY"]:
# NOP
return
relocation_size = Symbolizer.RELOCATION_SIZES[relocation['type']]
relocation_target = None
if relocation['symbol_address'] is None:
# This relocation refers to an imported symbol
relocation_target = '{} + {}'.format(relocation['name'], relocation['addend'])
# PC32 and PLT32 are more or less the same in the kernel, but not in user space
if reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_PC32"] or reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_PLT32"]:
if not relocation_target:
value = relocation['symbol_address'].offset + relocation['addend']
relocation_target = self.label_for_address(container, Address(relocation['symbol_address'].section, value))
relocation_target += ' - .'
elif reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_PC64"]:
if not relocation_target:
value = relocation['symbol_address'].offset + relocation['addend']
relocation_target = self.label_for_address(container, Address(relocation['symbol_address'].section, value))
relocation_target += ' - .'
elif reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_32S"]:
if not relocation_target:
value = relocation['symbol_address'].offset + relocation['addend']
relocation_target = self.label_for_address(container, Address(relocation['symbol_address'].section, value))
elif reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_64"]:
if not relocation_target:
value = relocation['symbol_address'].offset + relocation['addend']
relocation_target = self.label_for_address(container, Address(relocation['symbol_address'].section, value))
elif reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_RELATIVE"]:
if not relocation_target:
value = relocation['addend']
relocation_target = self.label_for_address(container, Address(relocation['symbol_address'].section, value))
elif reloc_type == ENUM_RELOC_TYPE_x64["R_X86_64_JUMP_SLOT"]:
if not relocation_target:
value = relocation['symbol_address'].offset
relocation_target = self.label_for_address(container, Address(relocation['symbol_address'].section, value))
else:
print("[*] Unhandled relocation {}".format(
describe_reloc_type(reloc_type, container.loader.elffile)))
if relocation_size:
section.replace(relocation['address'].offset, relocation_size, relocation_target)
def symbolize_data_sections(self, container, context=None):
# Section specific relocation
for secname, section in container.sections.items():
for relocation in section.relocations:
self.apply_data_relocation(container, section, relocation)
def is_data_section(sname, sval, container):
# A data section should be present in memory (SHF_ALLOC), and its size should
# be greater than 0. There are some code sections in kernel modules that
# only contain short trampolines and don't have any function relocations
# in them. The easiest way to deal with them for now is to treat them as
# data sections but this is a bit of a hack because they could contain
# references that need to be symbolized
return (
(sval['flags'] & SH_FLAGS.SHF_ALLOC) != 0 and (
(sval['flags'] & SH_FLAGS.SHF_EXECINSTR) == 0 or sname not in container.code_section_names
) and sval['sz'] > 0
)
def is_readonly_data_section(section):
return (
(section['sh_flags'] & SH_FLAGS.SHF_ALLOC) != 0 and
(section['sh_flags'] & SH_FLAGS.SHF_EXECINSTR) == 0 and
(section['sh_flags'] & SH_FLAGS.SHF_WRITE) == 0
)
if __name__ == "__main__":
from .kloader import Loader
from .analysis import kregister
argp = argparse.ArgumentParser()
argp.add_argument("bin", type=str, help="Input binary to load")
argp.add_argument("outfile", type=str, help="Symbolized ASM output")
argp.add_argument("--ignore-no-pie", dest="ignore_no_pie", action='store_true', help="Ignore position-independent-executable check (use with caution)")
argp.set_defaults(ignore_no_pie=False)
args = argp.parse_args()
loader = Loader(args.bin)
if loader.is_pie() == False and args.ignore_no_pie == False:
print("RetroWrite requires a position-independent module.")
print("It looks like %s is not position independent" % args.bin)
sys.Exit(1)
flist = loader.flist_from_symtab()
loader.load_functions(flist)
slist = loader.slist_from_symtab()
loader.load_data_sections(slist, is_data_section)
reloc_list = loader.reloc_list_from_symtab()
loader.load_relocations(reloc_list)
global_list = loader.global_data_list_from_symtab()
loader.load_globals_from_glist(global_list)
loader.container.attach_loader(loader)
rw = Rewriter(loader.container, args.outfile)
rw.symbolize()
rw.dump()
| 43.53407 | 155 | 0.607217 |
75fbed5085379c215101dedd2c9745512d237f4d | 5,520 | py | Python | highway_env/envs/merge_env2.py | lxpr/highway-env | 58e7b457d1e488a967b667276e344f7fdffe9f7c | [
"MIT"
] | null | null | null | highway_env/envs/merge_env2.py | lxpr/highway-env | 58e7b457d1e488a967b667276e344f7fdffe9f7c | [
"MIT"
] | null | null | null | highway_env/envs/merge_env2.py | lxpr/highway-env | 58e7b457d1e488a967b667276e344f7fdffe9f7c | [
"MIT"
] | null | null | null | import numpy as np
from gym.envs.registration import register
from highway_env import utils
from highway_env.envs.common.abstract import AbstractEnv
from highway_env.road.lane import LineType, StraightLane, SineLane
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.controller import ControlledVehicle
from highway_env.vehicle.objects import Obstacle
class MergeEnv(AbstractEnv):
"""
A highway merge negotiation environment.
The ego-vehicle is driving on a highway and approached a merge, with some vehicles incoming on the access ramp.
It is rewarded for maintaining a high speed and avoiding collisions, but also making room for merging
vehicles.
"""
COLLISION_REWARD: float = -1
RIGHT_LANE_REWARD: float = 0.1
HIGH_SPEED_REWARD: float = 0.2
MERGING_SPEED_REWARD: float = -0.5
LANE_CHANGE_REWARD: float = -0.05
def _reward(self, action: int) -> float:
"""
The vehicle is rewarded for driving with high speed on lanes to the right and avoiding collisions
But an additional altruistic penalty is also suffered if any vehicle on the merging lane has a low speed.
:param action: the action performed
:return: the reward of the state-action transition
"""
action_reward = {0: self.LANE_CHANGE_REWARD,
1: 0,
2: self.LANE_CHANGE_REWARD,
3: 0,
4: 0}
reward = self.COLLISION_REWARD * self.vehicle.crashed \
+ self.RIGHT_LANE_REWARD * self.vehicle.lane_index[2] / 1 \
+ self.HIGH_SPEED_REWARD * self.vehicle.speed_index / (self.vehicle.SPEED_COUNT - 1)
# Altruistic penalty
for vehicle in self.road.vehicles:
if vehicle.lane_index == ("b", "c", 2) and isinstance(vehicle, ControlledVehicle):
reward += self.MERGING_SPEED_REWARD * \
(vehicle.target_speed - vehicle.speed) / vehicle.target_speed
return utils.lmap(action_reward[action] + reward,
[self.COLLISION_REWARD + self.MERGING_SPEED_REWARD,
self.HIGH_SPEED_REWARD + self.RIGHT_LANE_REWARD],
[0, 1])
def _is_terminal(self) -> bool:
"""The episode is over when a collision occurs or when the access ramp has been passed."""
return self.vehicle.crashed or self.vehicle.position[0] > 370
def _reset(self) -> None:
self._make_road()
self._make_vehicles()
def _make_road(self) -> None:
"""
Make a road composed of a straight highway and a merging lane.
:return: the road
"""
net = RoadNetwork()
# Highway lanes
ends = [150, 80, 80, 150] # Before, converging, merge, after
c, s, n = LineType.CONTINUOUS_LINE, LineType.STRIPED, LineType.NONE
y = [0, StraightLane.DEFAULT_WIDTH]
line_type = [[c, s], [n, c]]
line_type_merge = [[c, s], [n, s]]
for i in range(2):
net.add_lane("a", "b", StraightLane([0, y[i]], [sum(ends[:2]), y[i]], line_types=line_type[i]))
net.add_lane("b", "c", StraightLane([sum(ends[:2]), y[i]], [sum(ends[:3]), y[i]], line_types=line_type_merge[i]))
net.add_lane("c", "d", StraightLane([sum(ends[:3]), y[i]], [sum(ends), y[i]], line_types=line_type[i]))
# Merging lane
amplitude = 3.25
ljk = StraightLane([0, 6.5 + 4 + 4], [ends[0], 6.5 + 4 + 4], line_types=[c, c], forbidden=True)
lkb = SineLane(ljk.position(ends[0], -amplitude), ljk.position(sum(ends[:2]), -amplitude),
amplitude, 2 * np.pi / (2*ends[1]), np.pi / 2, line_types=[c, c], forbidden=True)
lbc = StraightLane(lkb.position(ends[1], 0), lkb.position(ends[1], 0) + [ends[2], 0],
line_types=[n, c], forbidden=True)
net.add_lane("j", "k", ljk)
net.add_lane("k", "b", lkb)
net.add_lane("b", "c", lbc)
road = Road(network=net, np_random=self.np_random, record_history=self.config["show_trajectories"])
road.objects.append(Obstacle(road, lbc.position(ends[2], 0)))
self.road = road
def _make_vehicles(self) -> None:
"""
Populate a road with several vehicles on the highway and on the merging lane, as well as an ego-vehicle.
:return: the ego-vehicle
"""
road = self.road
ego_vehicle = self.action_type.vehicle_class(road,
road.network.get_lane(("a", "b", 1)).position(30, 0),
speed=30)
road.vehicles.append(ego_vehicle)
other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 0)).position(90, 0), speed=29))
road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 1)).position(70, 0), speed=31))
road.vehicles.append(other_vehicles_type(road, road.network.get_lane(("a", "b", 0)).position(5, 0), speed=31.5))
merging_v = other_vehicles_type(road, road.network.get_lane(("j", "k", 0)).position(110, 0), speed=20)
merging_v.target_speed = 30
road.vehicles.append(merging_v)
self.vehicle = ego_vehicle
register(
id='merge-v1',
entry_point='highway_env.envs:MergeEnv2',
)
| 44.16 | 125 | 0.608696 |
aab7cc7ab2ef365b063652bc51558b577b40f21d | 3,506 | py | Python | inference/inference_api_test/python_api_test/tests/gpu/test_v1_faster_rcnn_r50_gpu.py | zjjlivein/continuous_integration | c8825f32136fdd425389702c37ded08d6fd28a26 | [
"Apache-2.0"
] | 14 | 2020-03-04T07:52:07.000Z | 2022-02-14T01:39:14.000Z | inference/inference_api_test/python_api_test/tests/gpu/test_v1_faster_rcnn_r50_gpu.py | zjjlivein/continuous_integration | c8825f32136fdd425389702c37ded08d6fd28a26 | [
"Apache-2.0"
] | 19 | 2020-03-04T03:52:10.000Z | 2021-12-23T07:02:07.000Z | inference/inference_api_test/python_api_test/tests/gpu/test_v1_faster_rcnn_r50_gpu.py | zjjlivein/continuous_integration | c8825f32136fdd425389702c37ded08d6fd28a26 | [
"Apache-2.0"
] | 26 | 2020-03-04T05:39:09.000Z | 2022-02-14T01:43:28.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import pytest
import numpy as np
import image_preprocess
from PIL import Image
from paddle.inference import Config
from paddle.inference import create_predictor
from test_src import test_gpu_model_jetson
def inference_faster_rcnn_r50(img, model_path, params_path):
"""
inference_faster_rcnn_r50
Args:
img: numpy img
model_path: model path
params_path: params path
Returns:
results : paddle inference output data
"""
batch_size = 1
config = Config(model_path, params_path)
config.enable_use_gpu(0)
config.switch_ir_optim(True)
config.switch_use_feed_fetch_ops(False)
config.switch_specify_input_names(True)
config.enable_memory_optim()
predictor = create_predictor(config)
input_names = predictor.get_input_names()
# input_handle0 = predictor.get_input_handle(input_names[0])
# input_handle1 = predictor.get_input_handle(input_names[1])
# input_handle2 = predictor.get_input_handle(input_names[2])
im_size = 608
data = image_preprocess.preprocess(img, im_size)
scale_factor = np.array([im_size * 1. / img.shape[0], im_size *
1. / img.shape[1]]).reshape((1, 2)).astype(np.float32)
im_shape = np.array([im_size, im_size]).reshape((1, 2)).astype(np.float32)
data_input = [im_shape, data, scale_factor]
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(data_input[i].shape)
input_tensor.copy_from_cpu(data_input[i].copy())
# do the inference
predictor.run()
results = []
# get out data from output tensor
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
@pytest.mark.p0
def test_faster_rcnn_r50():
"""
test_faster_rcnn_r50
Args:
None
Returns:
None
"""
diff_standard = 1e-3
model_name = "faster_rcnn_r50"
test_model = test_gpu_model_jetson(model_name=model_name)
model_path, params_path = test_model.test_comb_model_path(
"cv_detect_model")
#with_lr_data = inference_yolov3_r50vd(model_path,params_path,ir_optim=True)
img_name = 'kite.jpg'
image_path = test_model.test_readdata(
path="cv_detect_model", data_name=img_name)
img = cv2.imread(image_path)
with_lr_data = inference_faster_rcnn_r50(img, model_path, params_path)
npy_result = test_model.npy_result_path("cv_detect_model")
test_model.test_diff(npy_result, with_lr_data[0], diff_standard)
# det image with box
# np.save("faster_rcnn_r50.npy",with_lr_data[0])
# image_preprocess.draw_bbox(image_path, with_lr_data[0], save_name="faster_rcnn_r50.jpg")
| 34.372549 | 94 | 0.723046 |
5d9b38daeecbbe83ae0ca9d76e2e835f7706c136 | 800 | py | Python | Hyechan/questionTest/questionTest.py | adomaatobrah/simplewebpage | 41b3d78d03d15d7aaac5dda1a592c7e850bb70d2 | [
"MIT"
] | null | null | null | Hyechan/questionTest/questionTest.py | adomaatobrah/simplewebpage | 41b3d78d03d15d7aaac5dda1a592c7e850bb70d2 | [
"MIT"
] | null | null | null | Hyechan/questionTest/questionTest.py | adomaatobrah/simplewebpage | 41b3d78d03d15d7aaac5dda1a592c7e850bb70d2 | [
"MIT"
] | null | null | null | import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
prompt_text = """
It was a dark and stormy night.
Dark black crows fluttered from dead tree branch to dead tree branch.
Note: The weather is stormy.
The breeze was nice, the sun was shining.
A green turtle lazily swam across the surf.
Note:
"""
for i in range(0, 10):
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors="pt")
prediction_scores, past = model.forward(encoded_prompt)
next_word = tokenizer.decode(prediction_scores[0, -1].topk(1).indices[0].item())
prompt_text += next_word
print(prompt_text) | 33.333333 | 97 | 0.76625 |
e18cc3213e1e0f0c47ca443e18c368589ac1ea00 | 6,118 | py | Python | web/addons/sales_team/sales_team.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/sales_team/sales_team.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/sales_team/sales_team.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
from openerp import tools
from openerp.osv import fields, osv
class crm_case_section(osv.osv):
_name = "crm.case.section"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Teams"
_order = "complete_name"
_period_number = 5
def get_full_name(self, cr, uid, ids, field_name, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
month_begin = date.today().replace(day=1)
section_result = [{
'value': 0,
'tooltip': (month_begin + relativedelta.relativedelta(months=-i)).strftime('%B %Y'),
} for i in range(self._period_number - 1, -1, -1)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if obj.fields_get(cr, uid, groupby_field)[groupby_field]['type'] == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern)
month_delta = relativedelta.relativedelta(month_begin, group_begin_date)
section_result[self._period_number - (month_delta.months + 1)] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field, 0)}
return section_result
_columns = {
'name': fields.char('Sales Team', size=64, required=True, translate=True),
'complete_name': fields.function(get_full_name, type='char', size=256, readonly=True, store=True),
'code': fields.char('Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to "\
"true, it will allow you to hide the sales team without removing it."),
'change_responsible': fields.boolean('Reassign Escalated', help="When escalating to this team override the salesman with the team leader."),
'user_id': fields.many2one('res.users', 'Team Leader'),
'member_ids': fields.many2many('res.users', 'sale_member_rel', 'section_id', 'member_id', 'Team Members'),
'reply_to': fields.char('Reply-To', size=64, help="The email address put in the 'Reply-To' of all emails sent by Odoo about cases in this sales team"),
'parent_id': fields.many2one('crm.case.section', 'Parent Team'),
'child_ids': fields.one2many('crm.case.section', 'parent_id', 'Child Teams'),
'note': fields.text('Description'),
'working_hours': fields.float('Working Hours', digits=(16, 2)),
'color': fields.integer('Color Index'),
}
_defaults = {
'active': 1,
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code of the sales team must be unique !')
]
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive Sales team.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
"""Overrides orm name_get method"""
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
reads = self.read(cr, uid, ids, ['name', 'parent_id'], context)
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1] + ' / ' + name
res.append((record['id'], name))
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'default_section_id': fields.many2one('crm.case.section', 'Default Sales Team'),
}
def __init__(self, pool, cr):
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['default_section_id'])
return init_res
| 46.70229 | 167 | 0.610657 |
67523af83efd83fad257b2f6eecb09cf423f2929 | 2,391 | py | Python | scripts/xml_to_csv.py | joshualemmon/db-error-detection-gan | 27d49e0eb0b54b0b3a03efac8803355b2c0455bd | [
"MIT"
] | 1 | 2021-01-06T16:34:48.000Z | 2021-01-06T16:34:48.000Z | scripts/xml_to_csv.py | joshualemmon/db-error-detection-gan | 27d49e0eb0b54b0b3a03efac8803355b2c0455bd | [
"MIT"
] | null | null | null | scripts/xml_to_csv.py | joshualemmon/db-error-detection-gan | 27d49e0eb0b54b0b3a03efac8803355b2c0455bd | [
"MIT"
] | null | null | null | from xml.etree import ElementTree
import os
import csv
import argparse
import numpy as np
def get_xml_vals(infile):
tree = ElementTree.parse(infile)
root = tree.getroot()
item = ""
attr = []
# Get attribute names
for r in root:
item = r.tag
for c in r:
attr.append(c.tag)
attr = set(attr)
# Get attribute values for each item
vals = []
for p in list(root):
v = []
for a in attr:
v.append(p.find(a).text)
vals.append(v)
return vals
def add_attributes(xml_data):
data = []
# Load possible interests and countries
interests = [i.rstrip('\n') for i in open('interests.txt', 'r').readlines()]
countries = [c.rstrip('\n') for c in open('countries.txt', 'r').readlines()]
# Generate random valid values for each tuple
for x in xml_data:
x.append(gen_age())
x.append(gen_salary())
x.append(gen_height())
x.append(gen_interest(interests))
x.append(gen_country(countries))
x.append(0)
data.append(x)
return data
def gen_age(mean=30, std=15, max_age=120, min_age=18):
age = int(np.random.normal(mean, std, 1))
if age < min_age:
return min_age
elif age > max_age:
return max_age
else:
return age
def gen_salary(mean=50000, std=10000, max_sal=100000, min_sal=20000):
sal = int(np.random.normal(mean, std, 1))
if sal < min_sal:
return min_sal
elif sal > max_sal:
return max_sal
else:
return sal
def gen_height(mean=168, std=10, max_height=200, min_height=155):
height = int(np.random.normal(mean, std, 1))
if height < min_height:
return min_height
elif height > max_height:
return max_height
else:
return height
def gen_interest(ints):
return ints[np.random.randint(low=0, high=len(ints))]
def gen_country(countries):
return countries[np.random.randint(low=0, high=len(countries))]
def main(args):
infile = args.infile
out = args.out
# Read XML data
xml_data = get_xml_vals(infile)
full_data = add_attributes(xml_data)
with open(out, 'w') as f:
writer = csv.writer(f, delimiter=',')
# f_name, l_name, email, age, salary, height, interest, country, clean/dirty
writer.writerow(['l_name', 'email', 'f_name', 'age', 'salary', 'height', 'interest', 'country', 'is_dirty'])
writer.writerows(full_data)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('--infile', '-i', type=str, required=True)
ap.add_argument('--out', '-o', type=str, required=True)
main(ap.parse_args()) | 24.151515 | 110 | 0.695107 |
e299fa18961340bbae280f85855a14966c552029 | 178,053 | py | Python | cinder/volume/drivers/hpe/hpe_3par_common.py | wzhou007/stx-cinder | bdc6cc8ae5466f218de5af835e9ec040d537c541 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/hpe/hpe_3par_common.py | wzhou007/stx-cinder | bdc6cc8ae5466f218de5af835e9ec040d537c541 | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/hpe/hpe_3par_common.py | wzhou007/stx-cinder | bdc6cc8ae5466f218de5af835e9ec040d537c541 | [
"Apache-2.0"
] | null | null | null | # (c) Copyright 2012-2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver common utilities for HPE 3PAR Storage array
The 3PAR drivers requires 3.1.3 firmware on the 3PAR array.
You will need to install the python hpe3parclient module.
sudo pip install python-3parclient
The drivers uses both the REST service and the SSH
command line to correctly operate. Since the
ssh credentials and the REST credentials can be different
we need to have settings for both.
The drivers requires the use of the san_ip, san_login,
san_password settings for ssh connections into the 3PAR
array. It also requires the setting of
hpe3par_api_url, hpe3par_username, hpe3par_password
for credentials to talk to the REST service on the 3PAR
array.
"""
import ast
import json
import math
import pprint
import re
import six
import uuid
from oslo_serialization import base64
from oslo_utils import importutils
hpe3parclient = importutils.try_import("hpe3parclient")
if hpe3parclient:
from hpe3parclient import client
from hpe3parclient import exceptions as hpeexceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume import configuration
from cinder.volume import qos_specs
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
import taskflow.engines
from taskflow.patterns import linear_flow
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '4.2.0'
DEDUP_API_VERSION = 30201120
FLASH_CACHE_API_VERSION = 30201200
COMPRESSION_API_VERSION = 30301215
SRSTATLD_API_VERSION = 30201200
REMOTE_COPY_API_VERSION = 30202290
hpe3par_opts = [
cfg.StrOpt('hpe3par_api_url',
default='',
help="3PAR WSAPI Server Url like "
"https://<3par ip>:8080/api/v1",
deprecated_name='hp3par_api_url'),
cfg.StrOpt('hpe3par_username',
default='',
help="3PAR username with the 'edit' role",
deprecated_name='hp3par_username'),
cfg.StrOpt('hpe3par_password',
default='',
help="3PAR password for the user specified in hpe3par_username",
secret=True,
deprecated_name='hp3par_password'),
cfg.ListOpt('hpe3par_cpg',
default=["OpenStack"],
help="List of the CPG(s) to use for volume creation",
deprecated_name='hp3par_cpg'),
cfg.StrOpt('hpe3par_cpg_snap',
default="",
help="The CPG to use for Snapshots for volumes. "
"If empty the userCPG will be used.",
deprecated_name='hp3par_cpg_snap'),
cfg.StrOpt('hpe3par_snapshot_retention',
default="",
help="The time in hours to retain a snapshot. "
"You can't delete it before this expires.",
deprecated_name='hp3par_snapshot_retention'),
cfg.StrOpt('hpe3par_snapshot_expiration',
default="",
help="The time in hours when a snapshot expires "
" and is deleted. This must be larger than expiration",
deprecated_name='hp3par_snapshot_expiration'),
cfg.BoolOpt('hpe3par_debug',
default=False,
help="Enable HTTP debugging to 3PAR",
deprecated_name='hp3par_debug'),
cfg.ListOpt('hpe3par_iscsi_ips',
default=[],
help="List of target iSCSI addresses to use.",
deprecated_name='hp3par_iscsi_ips'),
cfg.BoolOpt('hpe3par_iscsi_chap_enabled',
default=False,
help="Enable CHAP authentication for iSCSI connections.",
deprecated_name='hp3par_iscsi_chap_enabled'),
]
CONF = cfg.CONF
CONF.register_opts(hpe3par_opts, group=configuration.SHARED_CONF_GROUP)
# Input/output (total read/write) operations per second.
THROUGHPUT = 'throughput'
# Data processed (total read/write) per unit time: kilobytes per second.
BANDWIDTH = 'bandwidth'
# Response time (total read/write): microseconds.
LATENCY = 'latency'
# IO size (total read/write): kilobytes.
IO_SIZE = 'io_size'
# Queue length for processing IO requests
QUEUE_LENGTH = 'queue_length'
# Average busy percentage
AVG_BUSY_PERC = 'avg_busy_perc'
class HPE3PARCommon(object):
"""Class that contains common code for the 3PAR drivers.
Version history:
.. code-block:: none
1.2.0 - Updated hp3parclient API use to 2.0.x
1.2.1 - Check that the VVS exists
1.2.2 - log prior to raising exceptions
1.2.3 - Methods to update key/value pair bug #1258033
1.2.4 - Remove deprecated config option hp3par_domain
1.2.5 - Raise Ex when deleting snapshot with dependencies bug #1250249
1.2.6 - Allow optional specifying n:s:p for vlun creation bug #1269515
This update now requires 3.1.2 MU3 firmware
1.3.0 - Removed all SSH code. We rely on the hp3parclient now.
2.0.0 - Update hp3parclient API uses 3.0.x
2.0.1 - Updated to use qos_specs, added new qos settings and personas
2.0.2 - Add back-end assisted volume migrate
2.0.3 - Allow deleting missing snapshots bug #1283233
2.0.4 - Allow volumes created from snapshots to be larger bug #1279478
2.0.5 - Fix extend volume units bug #1284368
2.0.6 - use loopingcall.wait instead of time.sleep
2.0.7 - Allow extend volume based on snapshot bug #1285906
2.0.8 - Fix detach issue for multiple hosts bug #1288927
2.0.9 - Remove unused 3PAR driver method bug #1310807
2.0.10 - Fixed an issue with 3PAR vlun location bug #1315542
2.0.11 - Remove hp3parclient requirement from unit tests #1315195
2.0.12 - Volume detach hangs when host is in a host set bug #1317134
2.0.13 - Added support for managing/unmanaging of volumes
2.0.14 - Modified manage volume to use standard 'source-name' element.
2.0.15 - Added support for volume retype
2.0.16 - Add a better log during delete_volume time. Bug #1349636
2.0.17 - Added iSCSI CHAP support
This update now requires 3.1.3 MU1 firmware
and hp3parclient 3.1.0
2.0.18 - HP 3PAR manage_existing with volume-type support
2.0.19 - Update default persona from Generic to Generic-ALUA
2.0.20 - Configurable SSH missing key policy and known hosts file
2.0.21 - Remove bogus invalid snapCPG=None exception
2.0.22 - HP 3PAR drivers should not claim to have 'infinite' space
2.0.23 - Increase the hostname size from 23 to 31 Bug #1371242
2.0.24 - Add pools (hp3par_cpg now accepts a list of CPGs)
2.0.25 - Migrate without losing type settings bug #1356608
2.0.26 - Don't ignore extra-specs snap_cpg when missing cpg #1368972
2.0.27 - Fixing manage source-id error bug #1357075
2.0.28 - Removing locks bug #1381190
2.0.29 - Report a limitless cpg's stats better bug #1398651
2.0.30 - Update the minimum hp3parclient version bug #1402115
2.0.31 - Removed usage of host name cache #1398914
2.0.32 - Update LOG usage to fix translations. bug #1384312
2.0.33 - Fix host persona to match WSAPI mapping bug #1403997
2.0.34 - Fix log messages to match guidelines. bug #1411370
2.0.35 - Fix default snapCPG for manage_existing bug #1393609
2.0.36 - Added support for dedup provisioning
2.0.37 - Added support for enabling Flash Cache
2.0.38 - Add stats for hp3par goodness_function and filter_function
2.0.39 - Added support for updated detach_volume attachment.
2.0.40 - Make the 3PAR drivers honor the pool in create bug #1432876
2.0.41 - Only log versions at startup. bug #1447697
2.0.42 - Fix type for snapshot config settings. bug #1461640
2.0.43 - Report the capability of supporting multiattach
2.0.44 - Update help strings to reduce the 3PAR user role requirements
2.0.45 - Python 3 fixes
2.0.46 - Improved VLUN creation and deletion logic. #1469816
2.0.47 - Changed initialize_connection to use getHostVLUNs. #1475064
2.0.48 - Adding changes to support 3PAR iSCSI multipath.
2.0.49 - Added client CPG stats to driver volume stats. bug #1482741
2.0.50 - Add over subscription support
2.0.51 - Adds consistency group support
2.0.52 - Added update_migrated_volume. bug #1492023
2.0.53 - Fix volume size conversion. bug #1513158
3.0.0 - Rebranded HP to HPE.
3.0.1 - Fixed find_existing_vluns bug #1515033
3.0.2 - Python 3 support
3.0.3 - Remove db access for consistency groups
3.0.4 - Adds v2 managed replication support
3.0.5 - Adds v2 unmanaged replication support
3.0.6 - Adding manage/unmanage snapshot support
3.0.7 - Enable standard capabilities based on 3PAR licenses
3.0.8 - Optimize array ID retrieval
3.0.9 - Bump minimum API version for volume replication
3.0.10 - Added additional volumes checks to the manage snapshot API
3.0.11 - Fix the image cache capability bug #1491088
3.0.12 - Remove client version checks for replication
3.0.13 - Support creating a cg from a source cg
3.0.14 - Comparison of WWNs now handles case difference. bug #1546453
3.0.15 - Update replication to version 2.1
3.0.16 - Use same LUN ID for each VLUN path #1551994
3.0.17 - Don't fail on clearing 3PAR object volume key. bug #1546392
3.0.18 - create_cloned_volume account for larger size. bug #1554740
3.0.19 - Remove metadata that tracks the instance ID. bug #1572665
3.0.20 - Fix lun_id of 0 issue. bug #1573298
3.0.21 - Driver no longer fails to initialize if
System Reporter license is missing. bug #1568078
3.0.22 - Rework delete_vlun. Bug #1582922
3.0.23 - Fix CG create failures with long display name or special
characters. bug #1573647
3.0.24 - Fix terminate connection on failover
3.0.25 - Fix delete volume when online clone is active. bug #1349639
3.0.26 - Fix concurrent snapshot delete conflict. bug #1600104
3.0.27 - Fix snapCPG error during backup of attached volume.
Bug #1646396 and also ,Fix backup of attached ISCSI
and CHAP enabled volume.bug #1644238.
3.0.28 - Remove un-necessary snapshot creation of source volume
while doing online copy in create_cloned_volume call.
Bug #1661541
3.0.29 - Fix convert snapshot volume to base volume type. bug #1656186
3.0.30 - Handle manage and unmanage hosts present. bug #1648067
3.0.31 - Enable HPE-3PAR Compression Feature.
3.0.32 - Add consistency group capability to generic volume group
in HPE-3APR
3.0.33 - Added replication feature in retype flow. bug #1680313
3.0.34 - Add cloned volume to vvset in online copy. bug #1664464
3.0.35 - Add volume to consistency group if flag enabled. bug #1702317
3.0.36 - Swap volume name in migration. bug #1699733
3.0.37 - Fixed image cache enabled capability. bug #1686985
3.0.38 - Fixed delete operation of replicated volume which is part
of QOS. bug #1717875
3.0.39 - Added check to modify host after volume detach. bug #1730720
"""
VERSION = "3.0.39"
stats = {}
# TODO(Ramy): move these to the 3PAR Client
VLUN_TYPE_EMPTY = 1
VLUN_TYPE_PORT = 2
VLUN_TYPE_HOST = 3
VLUN_TYPE_MATCHED_SET = 4
VLUN_TYPE_HOST_SET = 5
THIN = 2
DEDUP = 6
CONVERT_TO_THIN = 1
CONVERT_TO_FULL = 2
CONVERT_TO_DEDUP = 3
# v2 replication constants
SYNC = 1
PERIODIC = 2
EXTRA_SPEC_REP_MODE = "replication:mode"
EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period"
RC_ACTION_CHANGE_TO_PRIMARY = 7
DEFAULT_REP_MODE = 'periodic'
DEFAULT_SYNC_PERIOD = 900
RC_GROUP_STARTED = 3
SYNC_STATUS_COMPLETED = 3
FAILBACK_VALUE = 'default'
# License values for reported capabilities
PRIORITY_OPT_LIC = "Priority Optimization"
THIN_PROV_LIC = "Thin Provisioning"
REMOTE_COPY_LIC = "Remote Copy"
SYSTEM_REPORTER_LIC = "System Reporter"
COMPRESSION_LIC = "Compression"
# Valid values for volume type extra specs
# The first value in the list is the default value
valid_prov_values = ['thin', 'full', 'dedup']
valid_persona_values = ['2 - Generic-ALUA',
'1 - Generic',
'3 - Generic-legacy',
'4 - HPUX-legacy',
'5 - AIX-legacy',
'6 - EGENERA',
'7 - ONTAP-legacy',
'8 - VMware',
'9 - OpenVMS',
'10 - HPUX',
'11 - WindowsServer']
hpe_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency',
'priority']
qos_priority_level = {'low': 1, 'normal': 2, 'high': 3}
hpe3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs',
'flash_cache', 'compression']
def __init__(self, config, active_backend_id=None):
self.config = config
self.client = None
self.uuid = uuid.uuid4()
self._client_conf = {}
self._replication_targets = []
self._replication_enabled = False
self._active_backend_id = active_backend_id
def get_version(self):
return self.VERSION
def check_flags(self, options, required_flags):
for flag in required_flags:
if not getattr(options, flag, None):
msg = _('%s is not set') % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def check_replication_flags(self, options, required_flags):
for flag in required_flags:
if not options.get(flag, None):
msg = (_('%s is not set and is required for the replication '
'device to be valid.') % flag)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _create_client(self, timeout=None):
hpe3par_api_url = self._client_conf['hpe3par_api_url']
cl = client.HPE3ParClient(hpe3par_api_url, timeout=timeout)
client_version = hpe3parclient.version
if client_version < MIN_CLIENT_VERSION:
ex_msg = (_('Invalid hpe3parclient version found (%(found)s). '
'Version %(minimum)s or greater required. Run "pip'
' install --upgrade python-3parclient" to upgrade'
' the hpe3parclient.')
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
return cl
def client_login(self):
try:
LOG.debug("Connecting to 3PAR")
self.client.login(self._client_conf['hpe3par_username'],
self._client_conf['hpe3par_password'])
except hpeexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': self._client_conf['hpe3par_api_url'], 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
known_hosts_file = CONF.ssh_hosts_key_file
policy = "AutoAddPolicy"
if CONF.strict_ssh_host_key_policy:
policy = "RejectPolicy"
self.client.setSSHOptions(
self._client_conf['san_ip'],
self._client_conf['san_login'],
self._client_conf['san_password'],
port=self._client_conf['san_ssh_port'],
conn_timeout=self._client_conf['ssh_conn_timeout'],
privatekey=self._client_conf['san_private_key'],
missing_key_policy=policy,
known_hosts_file=known_hosts_file)
def client_logout(self):
LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid)
self.client.logout()
def _create_replication_client(self, remote_array):
try:
cl = client.HPE3ParClient(remote_array['hpe3par_api_url'])
cl.login(remote_array['hpe3par_username'],
remote_array['hpe3par_password'])
except hpeexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': remote_array['hpe3par_api_url'], 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
known_hosts_file = CONF.ssh_hosts_key_file
policy = "AutoAddPolicy"
if CONF.strict_ssh_host_key_policy:
policy = "RejectPolicy"
cl.setSSHOptions(
remote_array['san_ip'],
remote_array['san_login'],
remote_array['san_password'],
port=remote_array['san_ssh_port'],
conn_timeout=remote_array['ssh_conn_timeout'],
privatekey=remote_array['san_private_key'],
missing_key_policy=policy,
known_hosts_file=known_hosts_file)
return cl
def _destroy_replication_client(self, client):
if client is not None:
client.logout()
def do_setup(self, context, timeout=None, stats=None):
if hpe3parclient is None:
msg = _('You must install hpe3parclient before using 3PAR'
' drivers. Run "pip install python-3parclient" to'
' install the hpe3parclient.')
raise exception.VolumeBackendAPIException(data=msg)
try:
# This will set self._client_conf with the proper credentials
# to communicate with the 3PAR array. It will contain either
# the values for the primary array or secondary array in the
# case of a fail-over.
self._get_3par_config()
self.client = self._create_client(timeout=timeout)
wsapi_version = self.client.getWsApiVersion()
self.API_VERSION = wsapi_version['build']
# If replication is properly configured, the primary array's
# API version must meet the minimum requirements.
if self._replication_enabled and (
self.API_VERSION < REMOTE_COPY_API_VERSION):
self._replication_enabled = False
LOG.error("The primary array must have an API version of "
"%(min_ver)s or higher, but is only on "
"%(current_ver)s, therefore replication is not "
"supported.",
{'min_ver': REMOTE_COPY_API_VERSION,
'current_ver': self.API_VERSION})
except hpeexceptions.UnsupportedVersion as ex:
# In the event we cannot contact the configured primary array,
# we want to allow a failover if replication is enabled.
self._do_replication_setup()
if self._replication_enabled:
self.client = None
raise exception.InvalidInput(ex)
if context:
# The context is None except at driver startup.
LOG.info("HPE3PARCommon %(common_ver)s,"
"hpe3parclient %(rest_ver)s",
{"common_ver": self.VERSION,
"rest_ver": hpe3parclient.get_version_string()})
if self.config.hpe3par_debug:
self.client.debug_rest(True)
if self.API_VERSION < SRSTATLD_API_VERSION:
# Firmware version not compatible with srstatld
LOG.warning("srstatld requires "
"WSAPI version '%(srstatld_version)s' "
"version '%(version)s' is installed.",
{'srstatld_version': SRSTATLD_API_VERSION,
'version': self.API_VERSION})
# Get the client ID for provider_location. We only need to retrieve
# the ID directly from the array if the driver stats are not provided.
if not stats:
try:
self.client_login()
info = self.client.getStorageSystemInfo()
self.client.id = six.text_type(info['id'])
except Exception:
self.client.id = 0
finally:
self.client_logout()
else:
self.client.id = stats['array_id']
def check_for_setup_error(self):
if self.client:
self.client_login()
try:
cpg_names = self._client_conf['hpe3par_cpg']
for cpg_name in cpg_names:
self.validate_cpg(cpg_name)
finally:
self.client_logout()
def validate_cpg(self, cpg_name):
try:
self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
def get_domain(self, cpg_name):
try:
cpg = self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("Failed to get domain because CPG (%s) doesn't "
"exist on array.") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
if 'domain' in cpg:
return cpg['domain']
return None
def extend_volume(self, volume, new_size):
volume_name = self._get_3par_vol_name(volume['id'])
old_size = volume['size']
growth_size = int(new_size) - old_size
LOG.debug("Extending Volume %(vol)s from %(old)s to %(new)s, "
" by %(diff)s GB.",
{'vol': volume_name, 'old': old_size, 'new': new_size,
'diff': growth_size})
growth_size_mib = growth_size * units.Ki
self._extend_volume(volume, volume_name, growth_size_mib)
def create_group(self, context, group):
"""Creates a group."""
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
if group.volume_type_ids is not None:
for volume_type in group.volume_types:
allow_type = self.is_volume_group_snap_type(
volume_type)
if not allow_type:
msg = _('For a volume type to be a part of consistent '
'group, volume type extra spec must have '
'consistent_group_snapshot_enabled="<is> True"')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
pool = volume_utils.extract_host(group.host, level='pool')
domain = self.get_domain(pool)
cg_name = self._get_3par_vvs_name(group.id)
extra = {'group_id': group.id}
if group.group_snapshot_id is not None:
extra['group_snapshot_id'] = group.group_snapshot_id
self.client.createVolumeSet(cg_name, domain=domain,
comment=six.text_type(extra))
model_update = {'status': fields.GroupStatus.AVAILABLE}
return model_update
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
self.create_group(context, group)
vvs_name = self._get_3par_vvs_name(group.id)
if group_snapshot and snapshots:
cgsnap_name = self._get_3par_snap_name(group_snapshot.id)
snap_base = cgsnap_name
elif source_group and source_vols:
cg_id = source_group.id
# Create a brand new uuid for the temp snap.
snap_uuid = uuid.uuid4().hex
# Create a temporary snapshot of the volume set in order to
# perform an online copy. These temp snapshots will be deleted
# when the source consistency group is deleted.
temp_snap = self._get_3par_snap_name(snap_uuid, temp_snap=True)
snap_shot_name = temp_snap + "-@count@"
copy_of_name = self._get_3par_vvs_name(cg_id)
optional = {'expirationHours': 1}
self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name,
optional=optional)
snap_base = temp_snap
for i, volume in enumerate(volumes):
snap_name = snap_base + "-" + six.text_type(i)
volume_name = self._get_3par_vol_name(volume.id)
type_info = self.get_volume_settings_from_type(volume)
cpg = type_info['cpg']
snapcpg = type_info['snap_cpg']
tpvv = type_info.get('tpvv', False)
tdvv = type_info.get('tdvv', False)
compression = self.get_compression_policy(
type_info['hpe3par_keys'])
optional = {'online': True, 'snapCPG': snapcpg,
'tpvv': tpvv, 'tdvv': tdvv}
if compression is not None:
optional['compression'] = compression
self.client.copyVolume(snap_name, volume_name, cpg, optional)
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
return None, None
def delete_group(self, context, group, volumes):
"""Deletes a group."""
try:
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
cg_name = self._get_3par_vvs_name(group.id)
self.client.deleteVolumeSet(cg_name)
except hpeexceptions.HTTPNotFound:
LOG.warning("Virtual Volume Set '%s' doesn't exist on array.",
cg_name)
except hpeexceptions.HTTPConflict as e:
LOG.error("Conflict detected in Virtual Volume Set"
" %(volume_set)s: %(error)s",
{"volume_set": cg_name,
"error": e})
volume_model_updates = []
for volume in volumes:
volume_update = {'id': volume.id}
try:
self.delete_volume(volume)
volume_update['status'] = 'deleted'
except Exception as ex:
LOG.error("There was an error deleting volume %(id)s: "
"%(error)s.",
{'id': volume.id,
'error': ex})
volume_update['status'] = 'error'
volume_model_updates.append(volume_update)
model_update = {'status': group.status}
return model_update, volume_model_updates
def update_group(self, context, group, add_volumes=None,
remove_volumes=None):
grp_snap_enable = volume_utils.is_group_a_cg_snapshot_type(group)
if not grp_snap_enable:
raise NotImplementedError()
volume_set_name = self._get_3par_vvs_name(group.id)
for volume in add_volumes:
volume_name = self._get_3par_vol_name(volume.id)
vol_snap_enable = self.is_volume_group_snap_type(
volume.volume_type)
try:
if grp_snap_enable and vol_snap_enable:
self.client.addVolumeToVolumeSet(volume_set_name,
volume_name)
else:
msg = (_('Volume with volume id %s is not '
'supported as extra specs of this '
'volume does not have '
'consistent_group_snapshot_enabled="<is> True"'
) % volume['id'])
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
except hpeexceptions.HTTPNotFound:
msg = (_('Virtual Volume Set %s does not exist.') %
volume_set_name)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
for volume in remove_volumes:
volume_name = self._get_3par_vol_name(volume.id)
try:
self.client.removeVolumeFromVolumeSet(
volume_set_name, volume_name)
except hpeexceptions.HTTPNotFound:
msg = (_('Virtual Volume Set %s does not exist.') %
volume_set_name)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return None, None, None
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group snapshot."""
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
raise NotImplementedError()
cg_id = group_snapshot.group_id
snap_shot_name = self._get_3par_snap_name(group_snapshot.id) + (
"-@count@")
copy_of_name = self._get_3par_vvs_name(cg_id)
extra = {'group_snapshot_id': group_snapshot.id}
extra['group_id'] = cg_id
extra['description'] = group_snapshot.description
optional = {'comment': json.dumps(extra),
'readOnly': False}
if self.config.hpe3par_snapshot_expiration:
optional['expirationHours'] = (
int(self.config.hpe3par_snapshot_expiration))
if self.config.hpe3par_snapshot_retention:
optional['retentionHours'] = (
int(self.config.hpe3par_snapshot_retention))
try:
self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name,
optional=optional)
except Exception as ex:
msg = (_('There was an error creating the cgsnapshot: %s'),
six.text_type(ex))
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_update = {'id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE}
snapshot_model_updates.append(snapshot_update)
model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
return model_update, snapshot_model_updates
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group snapshot."""
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
raise NotImplementedError()
cgsnap_name = self._get_3par_snap_name(group_snapshot.id)
snapshot_model_updates = []
for i, snapshot in enumerate(snapshots):
snapshot_update = {'id': snapshot['id']}
try:
snap_name = cgsnap_name + "-" + six.text_type(i)
self.client.deleteVolume(snap_name)
snapshot_update['status'] = fields.SnapshotStatus.DELETED
except hpeexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.warning("Delete Snapshot id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s",
{'id': snapshot['id'], 'msg': ex})
snapshot_update['status'] = fields.SnapshotStatus.ERROR
except Exception as ex:
LOG.error("There was an error deleting snapshot %(id)s: "
"%(error)s.",
{'id': snapshot['id'],
'error': six.text_type(ex)})
snapshot_update['status'] = fields.SnapshotStatus.ERROR
snapshot_model_updates.append(snapshot_update)
model_update = {'status': fields.GroupSnapshotStatus.DELETED}
return model_update, snapshot_model_updates
def manage_existing(self, volume, existing_ref):
"""Manage an existing 3PAR volume.
existing_ref is a dictionary of the form:
{'source-name': <name of the virtual volume>}
"""
target_vol_name = self._get_existing_volume_ref_name(existing_ref)
# Check for the existence of the virtual volume.
old_comment_str = ""
try:
vol = self.client.getVolume(target_vol_name)
if 'comment' in vol:
old_comment_str = vol['comment']
except hpeexceptions.HTTPNotFound:
err = (_("Virtual volume '%s' doesn't exist on array.") %
target_vol_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
new_comment = {}
# Use the display name from the existing volume if no new name
# was chosen by the user.
if volume['display_name']:
display_name = volume['display_name']
new_comment['display_name'] = volume['display_name']
elif 'comment' in vol:
display_name = self._get_3par_vol_comment_value(vol['comment'],
'display_name')
if display_name:
new_comment['display_name'] = display_name
else:
display_name = None
# Generate the new volume information based on the new ID.
new_vol_name = self._get_3par_vol_name(volume['id'])
name = 'volume-' + volume['id']
new_comment['volume_id'] = volume['id']
new_comment['name'] = name
new_comment['type'] = 'OpenStack'
volume_type = None
if volume['volume_type_id']:
try:
volume_type = self._get_volume_type(volume['volume_type_id'])
except Exception:
reason = (_("Volume type ID '%s' is invalid.") %
volume['volume_type_id'])
raise exception.ManageExistingVolumeTypeMismatch(reason=reason)
new_vals = {'newName': new_vol_name,
'comment': json.dumps(new_comment)}
# Ensure that snapCPG is set
if 'snapCPG' not in vol:
new_vals['snapCPG'] = vol['userCPG']
LOG.info("Virtual volume %(disp)s '%(new)s' snapCPG "
"is empty so it will be set to: %(cpg)s",
{'disp': display_name, 'new': new_vol_name,
'cpg': new_vals['snapCPG']})
# Update the existing volume with the new name and comments.
self.client.modifyVolume(target_vol_name, new_vals)
LOG.info("Virtual volume '%(ref)s' renamed to '%(new)s'.",
{'ref': existing_ref['source-name'], 'new': new_vol_name})
retyped = False
model_update = None
if volume_type:
LOG.info("Virtual volume %(disp)s '%(new)s' is being retyped.",
{'disp': display_name, 'new': new_vol_name})
try:
retyped, model_update = self._retype_from_no_type(volume,
volume_type)
LOG.info("Virtual volume %(disp)s successfully retyped to "
"%(new_type)s.",
{'disp': display_name,
'new_type': volume_type.get('name')})
except Exception:
with excutils.save_and_reraise_exception():
LOG.warning("Failed to manage virtual volume %(disp)s "
"due to error during retype.",
{'disp': display_name})
# Try to undo the rename and clear the new comment.
self.client.modifyVolume(
new_vol_name,
{'newName': target_vol_name,
'comment': old_comment_str})
updates = {'display_name': display_name}
if retyped and model_update:
updates.update(model_update)
LOG.info("Virtual volume %(disp)s '%(new)s' is now being managed.",
{'disp': display_name, 'new': new_vol_name})
# Return display name to update the name displayed in the GUI and
# any model updates from retype.
return updates
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Manage an existing 3PAR snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of the snapshot>}
"""
# Potential parent volume for the snapshot
volume = snapshot['volume']
# Do not allow for managing of snapshots for 'failed-over' volumes.
if volume.get('replication_status') == 'failed-over':
err = (_("Managing of snapshots to failed-over volumes is "
"not allowed."))
raise exception.InvalidInput(reason=err)
target_snap_name = self._get_existing_volume_ref_name(existing_ref,
is_snapshot=True)
# Check for the existence of the snapshot.
try:
snap = self.client.getVolume(target_snap_name)
except hpeexceptions.HTTPNotFound:
err = (_("Snapshot '%s' doesn't exist on array.") %
target_snap_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
# Make sure the snapshot is being associated with the correct volume.
parent_vol_name = self._get_3par_vol_name(volume['id'])
if parent_vol_name != snap['copyOf']:
err = (_("The provided snapshot '%s' is not a snapshot of "
"the provided volume.") % target_snap_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
new_comment = {}
# Use the display name from the existing snapshot if no new name
# was chosen by the user.
if snapshot['display_name']:
display_name = snapshot['display_name']
new_comment['display_name'] = snapshot['display_name']
elif 'comment' in snap:
display_name = self._get_3par_vol_comment_value(snap['comment'],
'display_name')
if display_name:
new_comment['display_name'] = display_name
else:
display_name = None
# Generate the new snapshot information based on the new ID.
new_snap_name = self._get_3par_snap_name(snapshot['id'])
new_comment['volume_id'] = volume['id']
new_comment['volume_name'] = 'volume-' + volume['id']
if snapshot.get('display_description', None):
new_comment['description'] = snapshot['display_description']
else:
new_comment['description'] = ""
new_vals = {'newName': new_snap_name,
'comment': json.dumps(new_comment)}
# Update the existing snapshot with the new name and comments.
self.client.modifyVolume(target_snap_name, new_vals)
LOG.info("Snapshot '%(ref)s' renamed to '%(new)s'.",
{'ref': existing_ref['source-name'], 'new': new_snap_name})
updates = {'display_name': display_name}
LOG.info("Snapshot %(disp)s '%(new)s' is now being managed.",
{'disp': display_name, 'new': new_snap_name})
# Return display name to update the name displayed in the GUI.
return updates
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
existing_ref is a dictionary of the form:
{'source-name': <name of the virtual volume>}
"""
target_vol_name = self._get_existing_volume_ref_name(existing_ref)
# Make sure the reference is not in use.
if re.match('osv-*|oss-*|vvs-*', target_vol_name):
reason = _("Reference must be for an unmanaged virtual volume.")
raise exception.ManageExistingInvalidReference(
existing_ref=target_vol_name,
reason=reason)
# Check for the existence of the virtual volume.
try:
vol = self.client.getVolume(target_vol_name)
except hpeexceptions.HTTPNotFound:
err = (_("Virtual volume '%s' doesn't exist on array.") %
target_vol_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
return int(math.ceil(float(vol['sizeMiB']) / units.Ki))
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing_snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of the snapshot>}
"""
target_snap_name = self._get_existing_volume_ref_name(existing_ref,
is_snapshot=True)
# Make sure the reference is not in use.
if re.match('osv-*|oss-*|vvs-*|unm-*', target_snap_name):
reason = _("Reference must be for an unmanaged snapshot.")
raise exception.ManageExistingInvalidReference(
existing_ref=target_snap_name,
reason=reason)
# Check for the existence of the snapshot.
try:
snap = self.client.getVolume(target_snap_name)
except hpeexceptions.HTTPNotFound:
err = (_("Snapshot '%s' doesn't exist on array.") %
target_snap_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
return int(math.ceil(float(snap['sizeMiB']) / units.Ki))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
# Rename the volume's name to unm-* format so that it can be
# easily found later.
vol_name = self._get_3par_vol_name(volume['id'])
new_vol_name = self._get_3par_unm_name(volume['id'])
self.client.modifyVolume(vol_name, {'newName': new_vol_name})
LOG.info("Virtual volume %(disp)s '%(vol)s' is no longer managed. "
"Volume renamed to '%(new)s'.",
{'disp': volume['display_name'],
'vol': vol_name,
'new': new_vol_name})
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management."""
# Parent volume for the snapshot
volume = snapshot['volume']
# Do not allow unmanaging of snapshots from 'failed-over' volumes.
if volume.get('replication_status') == 'failed-over':
err = (_("Unmanaging of snapshots from failed-over volumes is "
"not allowed."))
LOG.error(err)
# TODO(leeantho) Change this exception to Invalid when the volume
# manager supports handling that.
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
# Rename the snapshots's name to ums-* format so that it can be
# easily found later.
snap_name = self._get_3par_snap_name(snapshot['id'])
new_snap_name = self._get_3par_ums_name(snapshot['id'])
self.client.modifyVolume(snap_name, {'newName': new_snap_name})
LOG.info("Snapshot %(disp)s '%(vol)s' is no longer managed. "
"Snapshot renamed to '%(new)s'.",
{'disp': snapshot['display_name'],
'vol': snap_name,
'new': new_snap_name})
def _get_existing_volume_ref_name(self, existing_ref, is_snapshot=False):
"""Returns the volume name of an existing reference.
Checks if an existing volume reference has a source-name or
source-id element. If source-name or source-id is not present an
error will be thrown.
"""
vol_name = None
if 'source-name' in existing_ref:
vol_name = existing_ref['source-name']
elif 'source-id' in existing_ref:
if is_snapshot:
vol_name = self._get_3par_ums_name(existing_ref['source-id'])
else:
vol_name = self._get_3par_unm_name(existing_ref['source-id'])
else:
reason = _("Reference must contain source-name or source-id.")
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=reason)
return vol_name
def _extend_volume(self, volume, volume_name, growth_size_mib,
_convert_to_base=False):
model_update = None
rcg_name = self._get_3par_rcg_name(volume['id'])
is_volume_replicated = self._volume_of_replicated_type(volume)
try:
if _convert_to_base:
LOG.debug("Converting to base volume prior to growing.")
model_update = self._convert_to_base_volume(volume)
# If the volume is replicated and we are not failed over,
# remote copy has to be stopped before the volume can be extended.
failed_over = volume.get("replication_status", None)
is_failed_over = failed_over == "failed-over"
if is_volume_replicated and not is_failed_over:
self.client.stopRemoteCopy(rcg_name)
self.client.growVolume(volume_name, growth_size_mib)
if is_volume_replicated and not is_failed_over:
self.client.startRemoteCopy(rcg_name)
except Exception as ex:
# If the extend fails, we must restart remote copy.
if is_volume_replicated:
self.client.startRemoteCopy(rcg_name)
with excutils.save_and_reraise_exception() as ex_ctxt:
if (not _convert_to_base and
isinstance(ex, hpeexceptions.HTTPForbidden) and
ex.get_code() == 150):
# Error code 150 means 'invalid operation: Cannot grow
# this type of volume'.
# Suppress raising this exception because we can
# resolve it by converting it into a base volume.
# Afterwards, extending the volume should succeed, or
# fail with a different exception/error code.
ex_ctxt.reraise = False
model_update = self._extend_volume(
volume, volume_name,
growth_size_mib,
_convert_to_base=True)
else:
LOG.error("Error extending volume: %(vol)s. "
"Exception: %(ex)s",
{'vol': volume_name, 'ex': ex})
return model_update
def _get_3par_vol_name(self, volume_id, temp_vol=False):
"""Get converted 3PAR volume name.
Converts the openstack volume id from
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
to
osv-7P.DD5jLTPWF7tcwnMF80g
We convert the 128 bits of the uuid into a 24character long
base64 encoded string to ensure we don't exceed the maximum
allowed 31 character name limit on 3Par
We strip the padding '=' and replace + with .
and / with -
"""
volume_name = self._encode_name(volume_id)
if temp_vol:
# is this a temporary volume
# this is done during migration
prefix = "tsv-%s"
else:
prefix = "osv-%s"
return prefix % volume_name
def _get_3par_snap_name(self, snapshot_id, temp_snap=False):
snapshot_name = self._encode_name(snapshot_id)
if temp_snap:
# is this a temporary snapshot
# this is done during cloning
prefix = "tss-%s"
else:
prefix = "oss-%s"
return prefix % snapshot_name
def _get_3par_ums_name(self, snapshot_id):
ums_name = self._encode_name(snapshot_id)
return "ums-%s" % ums_name
def _get_3par_vvs_name(self, volume_id):
vvs_name = self._encode_name(volume_id)
return "vvs-%s" % vvs_name
def _get_3par_unm_name(self, volume_id):
unm_name = self._encode_name(volume_id)
return "unm-%s" % unm_name
# v2 replication conversion
def _get_3par_rcg_name(self, volume_id):
rcg_name = self._encode_name(volume_id)
rcg = "rcg-%s" % rcg_name
return rcg[:22]
def _get_3par_remote_rcg_name(self, volume_id, provider_location):
return self._get_3par_rcg_name(volume_id) + ".r" + (
six.text_type(provider_location))
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.encode_as_text(vol_uuid.bytes)
# 3par doesn't allow +, nor /
vol_encoded = vol_encoded.replace('+', '.')
vol_encoded = vol_encoded.replace('/', '-')
# strip off the == as 3par doesn't like those.
vol_encoded = vol_encoded.replace('=', '')
return vol_encoded
def _capacity_from_size(self, vol_size):
# because 3PAR volume sizes are in Mebibytes.
if int(vol_size) == 0:
capacity = units.Gi # default: 1GiB
else:
capacity = vol_size * units.Gi
capacity = int(math.ceil(capacity / units.Mi))
return capacity
def _delete_3par_host(self, hostname):
self.client.deleteHost(hostname)
def _get_prioritized_host_on_3par(self, host, hosts, hostname):
# Check whether host with wwn/iqn of initiator present on 3par
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
# Retrieving 'host' and 'hosts' from 3par using hostname
# and wwn/iqn respectively. Compare hostname of 'host' and 'hosts',
# if they do not match it means 3par has a pre-existing host
# with some other name.
if host['name'] != hosts['members'][0]['name']:
hostname = hosts['members'][0]['name']
LOG.info(("Prioritize the host retrieved from wwn/iqn "
"Hostname : %(hosts)s is used instead "
"of Hostname: %(host)s"),
{'hosts': hostname,
'host': host['name']})
host = self._get_3par_host(hostname)
return host, hostname
return host, hostname
def _create_3par_vlun(self, volume, hostname, nsp, lun_id=None):
try:
location = None
auto = True
if lun_id is not None:
auto = False
if nsp is None:
location = self.client.createVLUN(volume, hostname=hostname,
auto=auto, lun=lun_id)
else:
port = self.build_portPos(nsp)
location = self.client.createVLUN(volume, hostname=hostname,
auto=auto, portPos=port,
lun=lun_id)
vlun_info = None
if location:
# The LUN id is returned as part of the location URI
vlun = location.split(',')
vlun_info = {'volume_name': vlun[0],
'lun_id': int(vlun[1]),
'host_name': vlun[2],
}
if len(vlun) > 3:
vlun_info['nsp'] = vlun[3]
return vlun_info
except hpeexceptions.HTTPBadRequest as e:
if 'must be in the same domain' in e.get_description():
LOG.error(e.get_description())
raise exception.Invalid3PARDomain(err=e.get_description())
else:
raise exception.VolumeBackendAPIException(
data=e.get_description())
def _safe_hostname(self, hostname):
"""We have to use a safe hostname length for 3PAR host names."""
try:
index = hostname.index('.')
except ValueError:
# couldn't find it
index = len(hostname)
# we'll just chop this off for now.
if index > 31:
index = 31
return hostname[:index]
def _get_3par_host(self, hostname):
return self.client.getHost(hostname)
def get_ports(self):
return self.client.getPorts()
def get_active_target_ports(self):
ports = self.get_ports()
target_ports = []
for port in ports['members']:
if (
port['mode'] == self.client.PORT_MODE_TARGET and
port['linkState'] == self.client.PORT_STATE_READY
):
port['nsp'] = self.build_nsp(port['portPos'])
target_ports.append(port)
return target_ports
def get_active_fc_target_ports(self):
ports = self.get_active_target_ports()
fc_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_FC:
fc_ports.append(port)
return fc_ports
def get_active_iscsi_target_ports(self):
ports = self.get_active_target_ports()
iscsi_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_ISCSI:
iscsi_ports.append(port)
return iscsi_ports
def get_volume_stats(self,
refresh,
filter_function=None,
goodness_function=None):
if refresh:
self._update_volume_stats(
filter_function=filter_function,
goodness_function=goodness_function)
return self.stats
def _update_volume_stats(self,
filter_function=None,
goodness_function=None):
# const to convert MiB to GB
const = 0.0009765625
# storage_protocol and volume_backend_name are
# set in the child classes
pools = []
info = self.client.getStorageSystemInfo()
qos_support = True
thin_support = True
remotecopy_support = True
sr_support = True
compression_support = False
if 'licenseInfo' in info:
if 'licenses' in info['licenseInfo']:
valid_licenses = info['licenseInfo']['licenses']
qos_support = self._check_license_enabled(
valid_licenses, self.PRIORITY_OPT_LIC,
"QoS_support")
thin_support = self._check_license_enabled(
valid_licenses, self.THIN_PROV_LIC,
"Thin_provisioning_support")
remotecopy_support = self._check_license_enabled(
valid_licenses, self.REMOTE_COPY_LIC,
"Replication")
sr_support = self._check_license_enabled(
valid_licenses, self.SYSTEM_REPORTER_LIC,
"System_reporter_support")
compression_support = self._check_license_enabled(
valid_licenses, self.COMPRESSION_LIC,
"Compression")
for cpg_name in self._client_conf['hpe3par_cpg']:
try:
stat_capabilities = {
THROUGHPUT: None,
BANDWIDTH: None,
LATENCY: None,
IO_SIZE: None,
QUEUE_LENGTH: None,
AVG_BUSY_PERC: None
}
cpg = self.client.getCPG(cpg_name)
if (self.API_VERSION >= SRSTATLD_API_VERSION and sr_support):
interval = 'daily'
history = '7d'
try:
stat_capabilities = self.client.getCPGStatData(
cpg_name,
interval,
history)
except Exception as ex:
LOG.warning("Exception at getCPGStatData() "
"for cpg: '%(cpg_name)s' "
"Reason: '%(reason)s'",
{'cpg_name': cpg_name, 'reason': ex})
if 'numTDVVs' in cpg:
total_volumes = int(
cpg['numFPVVs'] + cpg['numTPVVs'] + cpg['numTDVVs']
)
else:
total_volumes = int(
cpg['numFPVVs'] + cpg['numTPVVs']
)
if 'limitMiB' not in cpg['SDGrowth']:
# cpg usable free space
cpg_avail_space = (
self.client.getCPGAvailableSpace(cpg_name))
free_capacity = int(
cpg_avail_space['usableFreeMiB'] * const)
# total_capacity is the best we can do for a limitless cpg
total_capacity = int(
(cpg['SDUsage']['usedMiB'] +
cpg['UsrUsage']['usedMiB'] +
cpg_avail_space['usableFreeMiB']) * const)
else:
total_capacity = int(cpg['SDGrowth']['limitMiB'] * const)
free_capacity = int((cpg['SDGrowth']['limitMiB'] -
(cpg['UsrUsage']['usedMiB'] +
cpg['SDUsage']['usedMiB'])) * const)
capacity_utilization = (
(float(total_capacity - free_capacity) /
float(total_capacity)) * 100)
provisioned_capacity = int((cpg['UsrUsage']['totalMiB'] +
cpg['SAUsage']['totalMiB'] +
cpg['SDUsage']['totalMiB']) *
const)
except hpeexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array")
% cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
pool = {'pool_name': cpg_name,
'total_capacity_gb': total_capacity,
'free_capacity_gb': free_capacity,
'provisioned_capacity_gb': provisioned_capacity,
'QoS_support': qos_support,
'thin_provisioning_support': thin_support,
'thick_provisioning_support': True,
'max_over_subscription_ratio': (
self.config.safe_get('max_over_subscription_ratio')),
'reserved_percentage': (
self.config.safe_get('reserved_percentage')),
'location_info': ('HPE3PARDriver:%(sys_id)s:%(dest_cpg)s' %
{'sys_id': info['serialNumber'],
'dest_cpg': cpg_name}),
'total_volumes': total_volumes,
'capacity_utilization': capacity_utilization,
THROUGHPUT: stat_capabilities[THROUGHPUT],
BANDWIDTH: stat_capabilities[BANDWIDTH],
LATENCY: stat_capabilities[LATENCY],
IO_SIZE: stat_capabilities[IO_SIZE],
QUEUE_LENGTH: stat_capabilities[QUEUE_LENGTH],
AVG_BUSY_PERC: stat_capabilities[AVG_BUSY_PERC],
'filter_function': filter_function,
'goodness_function': goodness_function,
'multiattach': False,
'consistent_group_snapshot_enabled': True,
'compression': compression_support,
}
if remotecopy_support:
pool['replication_enabled'] = self._replication_enabled
pool['replication_type'] = ['sync', 'periodic']
pool['replication_count'] = len(self._replication_targets)
pools.append(pool)
self.stats = {'driver_version': '3.0',
'storage_protocol': None,
'vendor_name': 'Hewlett Packard Enterprise',
'volume_backend_name': None,
'array_id': info['id'],
'replication_enabled': self._replication_enabled,
'replication_targets': self._get_replication_targets(),
'pools': pools}
def _check_license_enabled(self, valid_licenses,
license_to_check, capability):
"""Check a license against valid licenses on the array."""
if valid_licenses:
for license in valid_licenses:
if license_to_check in license.get('name'):
return True
LOG.debug("'%(capability)s' requires a '%(license)s' "
"license which is not installed.",
{'capability': capability,
'license': license_to_check})
return False
def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None):
"""find a VLUN on a 3PAR host."""
vluns = self.client.getHostVLUNs(hostname)
found_vlun = None
for vlun in vluns:
if volume_name in vlun['volumeName']:
if lun_id is not None:
if vlun['lun'] == lun_id:
if nsp:
port = self.build_portPos(nsp)
if vlun['portPos'] == port:
found_vlun = vlun
break
else:
found_vlun = vlun
break
else:
found_vlun = vlun
break
if found_vlun is None:
LOG.info("3PAR vlun %(name)s not found on host %(host)s",
{'name': volume_name, 'host': hostname})
return found_vlun
def create_vlun(self, volume, host, nsp=None, lun_id=None):
"""Create a VLUN.
In order to export a volume on a 3PAR box, we have to create a VLUN.
"""
volume_name = self._get_3par_vol_name(volume['id'])
vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp,
lun_id=lun_id)
return self._get_vlun(volume_name,
host['name'],
vlun_info['lun_id'],
nsp)
def delete_vlun(self, volume, hostname, wwn=None, iqn=None):
volume_name = self._get_3par_vol_name(volume['id'])
vluns = self.client.getHostVLUNs(hostname)
# When deleteing VLUNs, you simply need to remove the template VLUN
# and any active VLUNs will be automatically removed. The template
# VLUN are marked as active: False
modify_host = True
volume_vluns = []
for vlun in vluns:
if volume_name in vlun['volumeName']:
# template VLUNs are 'active' = False
if not vlun['active']:
volume_vluns.append(vlun)
if not volume_vluns:
LOG.warning("3PAR vlun for volume %(name)s not found on host "
"%(host)s", {'name': volume_name, 'host': hostname})
return
# VLUN Type of MATCHED_SET 4 requires the port to be provided
for vlun in volume_vluns:
if 'portPos' in vlun:
self.client.deleteVLUN(volume_name, vlun['lun'],
hostname=hostname,
port=vlun['portPos'])
else:
self.client.deleteVLUN(volume_name, vlun['lun'],
hostname=hostname)
# Determine if there are other volumes attached to the host.
# This will determine whether we should try removing host from host set
# and deleting the host.
vluns = []
try:
vluns = self.client.getHostVLUNs(hostname)
except hpeexceptions.HTTPNotFound:
LOG.debug("All VLUNs removed from host %s", hostname)
pass
if wwn is not None and not isinstance(wwn, list):
wwn = [wwn]
if iqn is not None and not isinstance(iqn, list):
iqn = [iqn]
for vlun in vluns:
if vlun.get('active'):
if (wwn is not None and vlun.get('remoteName').lower() in wwn)\
or (iqn is not None and vlun.get('remoteName').lower() in
iqn):
modify_host = False
break
if len(vluns) == 0:
# We deleted the last vlun, so try to delete the host too.
# This check avoids the old unnecessary try/fail when vluns exist
# but adds a minor race condition if a vlun is manually deleted
# externally at precisely the wrong time. Worst case is leftover
# host, so it is worth the unlikely risk.
try:
self._delete_3par_host(hostname)
except Exception as ex:
# Any exception down here is only logged. The vlun is deleted.
# If the host is in a host set, the delete host will fail and
# the host will remain in the host set. This is desired
# because cinder was not responsible for the host set
# assignment. The host set could be used outside of cinder
# for future needs (e.g. export volume to host set).
# The log info explains why the host was left alone.
LOG.info("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not deleted "
"because: %(reason)s",
{'name': volume_name, 'host': hostname,
'reason': ex.get_description()})
elif modify_host:
if wwn is not None:
mod_request = {'pathOperation': self.client.HOST_EDIT_REMOVE,
'FCWWNs': wwn}
else:
mod_request = {'pathOperation': self.client.HOST_EDIT_REMOVE,
'iSCSINames': iqn}
try:
self.client.modifyHost(hostname, mod_request)
except Exception as ex:
LOG.info("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not Modified "
"because: %(reason)s",
{'name': volume_name, 'host': hostname,
'reason': ex.get_description()})
def _get_volume_type(self, type_id):
ctxt = context.get_admin_context()
return volume_types.get_volume_type(ctxt, type_id)
def _get_key_value(self, hpe3par_keys, key, default=None):
if hpe3par_keys is not None and key in hpe3par_keys:
return hpe3par_keys[key]
else:
return default
def _get_qos_value(self, qos, key, default=None):
if key in qos:
return qos[key]
else:
return default
def _get_qos_by_volume_type(self, volume_type):
qos = {}
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(kmartin): We prefer the qos_specs association
# and override any existing extra-specs settings
# if present.
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(context.get_admin_context(),
qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if 'qos:' in key:
fields = key.split(':')
key = fields[1]
if key in self.hpe_qos_keys:
qos[key] = value
return qos
def _get_keys_by_volume_type(self, volume_type):
hpe3par_keys = {}
specs = volume_type.get('extra_specs')
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.hpe3par_valid_keys:
hpe3par_keys[key] = value
return hpe3par_keys
def _set_qos_rule(self, qos, vvs_name):
min_io = self._get_qos_value(qos, 'minIOPS')
max_io = self._get_qos_value(qos, 'maxIOPS')
min_bw = self._get_qos_value(qos, 'minBWS')
max_bw = self._get_qos_value(qos, 'maxBWS')
latency = self._get_qos_value(qos, 'latency')
priority = self._get_qos_value(qos, 'priority', 'normal')
qosRule = {}
if min_io:
qosRule['ioMinGoal'] = int(min_io)
if max_io is None:
qosRule['ioMaxLimit'] = int(min_io)
if max_io:
qosRule['ioMaxLimit'] = int(max_io)
if min_io is None:
qosRule['ioMinGoal'] = int(max_io)
if min_bw:
qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki
if max_bw is None:
qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki
if max_bw:
qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki
if min_bw is None:
qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki
if latency:
qosRule['latencyGoal'] = int(latency)
if priority:
qosRule['priority'] = self.qos_priority_level.get(priority.lower())
try:
self.client.createQoSRules(vvs_name, qosRule)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error creating QOS rule %s", qosRule)
def get_flash_cache_policy(self, hpe3par_keys):
if hpe3par_keys is not None:
# First check list of extra spec keys
val = self._get_key_value(hpe3par_keys, 'flash_cache', None)
if val is not None:
# If requested, see if supported on back end
if self.API_VERSION < FLASH_CACHE_API_VERSION:
err = (_("Flash Cache Policy requires "
"WSAPI version '%(fcache_version)s' "
"version '%(version)s' is installed.") %
{'fcache_version': FLASH_CACHE_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
else:
if val.lower() == 'true':
return self.client.FLASH_CACHE_ENABLED
else:
return self.client.FLASH_CACHE_DISABLED
return None
def get_compression_policy(self, hpe3par_keys):
if hpe3par_keys is not None:
# here it should return true/false/None
val = self._get_key_value(hpe3par_keys, 'compression', None)
compression_support = False
if val is not None:
info = self.client.getStorageSystemInfo()
if 'licenseInfo' in info:
if 'licenses' in info['licenseInfo']:
valid_licenses = info['licenseInfo']['licenses']
compression_support = self._check_license_enabled(
valid_licenses, self.COMPRESSION_LIC,
"Compression")
# here check the wsapi version
if self.API_VERSION < COMPRESSION_API_VERSION:
err = (_("Compression Policy requires "
"WSAPI version '%(compression_version)s' "
"version '%(version)s' is installed.") %
{'compression_version': COMPRESSION_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
else:
if val.lower() == 'true':
if not compression_support:
msg = _('Compression is not supported on '
'underlying hardware')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return True
else:
return False
return None
def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name):
# Update virtual volume set
if flash_cache:
try:
self.client.modifyVolumeSet(vvs_name,
flashCachePolicy=flash_cache)
LOG.info("Flash Cache policy set to %s", flash_cache)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error setting Flash Cache policy "
"to %s - exception", flash_cache)
def _add_volume_to_volume_set(self, volume, volume_name,
cpg, vvs_name, qos, flash_cache):
if vvs_name is not None:
# Admin has set a volume set name to add the volume to
try:
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except hpeexceptions.HTTPNotFound:
msg = _('VV Set %s does not exist.') % vvs_name
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
else:
vvs_name = self._get_3par_vvs_name(volume['id'])
domain = self.get_domain(cpg)
self.client.createVolumeSet(vvs_name, domain)
try:
self._set_qos_rule(qos, vvs_name)
self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name)
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except Exception as ex:
# Cleanup the volume set if unable to create the qos rule
# or flash cache policy or add the volume to the volume set
self.client.deleteVolumeSet(vvs_name)
raise exception.CinderException(ex)
def get_cpg(self, volume, allowSnap=False):
volume_name = self._get_3par_vol_name(volume['id'])
vol = self.client.getVolume(volume_name)
# Search for 'userCPG' in the get volume REST API,
# if found return userCPG , else search for snapCPG attribute
# when allowSnap=True. For the cases where 3PAR REST call for
# get volume doesn't have either userCPG or snapCPG ,
# take the default value of cpg from 'host' attribute from volume param
LOG.debug("get volume response is: %s", vol)
if 'userCPG' in vol:
return vol['userCPG']
elif allowSnap and 'snapCPG' in vol:
return vol['snapCPG']
else:
return volume_utils.extract_host(volume['host'], 'pool')
def _get_3par_vol_comment(self, volume_name):
vol = self.client.getVolume(volume_name)
if 'comment' in vol:
return vol['comment']
return None
def validate_persona(self, persona_value):
"""Validate persona value.
If the passed in persona_value is not valid, raise InvalidInput,
otherwise return the persona ID.
:param persona_value:
:raises exception.InvalidInput:
:returns: persona ID
"""
if persona_value not in self.valid_persona_values:
err = (_("Must specify a valid persona %(valid)s,"
"value '%(persona)s' is invalid.") %
{'valid': self.valid_persona_values,
'persona': persona_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
# persona is set by the id so remove the text and return the id
# i.e for persona '1 - Generic' returns 1
persona_id = persona_value.split(' ')
return persona_id[0]
def get_persona_type(self, volume, hpe3par_keys=None):
default_persona = self.valid_persona_values[0]
type_id = volume.get('volume_type_id', None)
if type_id is not None:
volume_type = self._get_volume_type(type_id)
if hpe3par_keys is None:
hpe3par_keys = self._get_keys_by_volume_type(volume_type)
persona_value = self._get_key_value(hpe3par_keys, 'persona',
default_persona)
return self.validate_persona(persona_value)
def get_type_info(self, type_id):
"""Get 3PAR type info for the given type_id.
Reconciles VV Set, old-style extra-specs, and QOS specs
and returns commonly used info about the type.
:returns: hpe3par_keys, qos, volume_type, vvs_name
"""
volume_type = None
vvs_name = None
hpe3par_keys = {}
qos = {}
if type_id is not None:
volume_type = self._get_volume_type(type_id)
hpe3par_keys = self._get_keys_by_volume_type(volume_type)
vvs_name = self._get_key_value(hpe3par_keys, 'vvs')
if vvs_name is None:
qos = self._get_qos_by_volume_type(volume_type)
return hpe3par_keys, qos, volume_type, vvs_name
def get_volume_settings_from_type_id(self, type_id, pool):
"""Get 3PAR volume settings given a type_id.
Combines type info and config settings to return a dictionary
describing the 3PAR volume settings. Does some validation (CPG).
Uses pool as the default cpg (when not specified in volume type specs).
:param type_id: id of type to get settings for
:param pool: CPG to use if type does not have one set
:returns: dict
"""
hpe3par_keys, qos, volume_type, vvs_name = self.get_type_info(type_id)
# Default to pool extracted from host.
# If that doesn't work use the 1st CPG in the config as the default.
default_cpg = pool or self._client_conf['hpe3par_cpg'][0]
cpg = self._get_key_value(hpe3par_keys, 'cpg', default_cpg)
if cpg is not default_cpg:
# The cpg was specified in a volume type extra spec so it
# needs to be validated that it's in the correct domain.
# log warning here
msg = ("'hpe3par:cpg' is not supported as an extra spec "
"in a volume type. CPG's are chosen by "
"the cinder scheduler, as a pool, from the "
"cinder.conf entry 'hpe3par_cpg', which can "
"be a list of CPGs.")
versionutils.report_deprecated_feature(LOG, msg)
LOG.info("Using pool %(pool)s instead of %(cpg)s",
{'pool': pool, 'cpg': cpg})
cpg = pool
self.validate_cpg(cpg)
# Look to see if the snap_cpg was specified in volume type
# extra spec, if not use hpe3par_cpg_snap from config as the
# default.
snap_cpg = self.config.hpe3par_cpg_snap
snap_cpg = self._get_key_value(hpe3par_keys, 'snap_cpg', snap_cpg)
# If it's still not set or empty then set it to the cpg.
if not snap_cpg:
snap_cpg = cpg
# if provisioning is not set use thin
default_prov = self.valid_prov_values[0]
prov_value = self._get_key_value(hpe3par_keys, 'provisioning',
default_prov)
# check for valid provisioning type
if prov_value not in self.valid_prov_values:
err = (_("Must specify a valid provisioning type %(valid)s, "
"value '%(prov)s' is invalid.") %
{'valid': self.valid_prov_values,
'prov': prov_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
tpvv = True
tdvv = False
if prov_value == "full":
tpvv = False
elif prov_value == "dedup":
tpvv = False
tdvv = True
if tdvv and (self.API_VERSION < DEDUP_API_VERSION):
err = (_("Dedup is a valid provisioning type, "
"but requires WSAPI version '%(dedup_version)s' "
"version '%(version)s' is installed.") %
{'dedup_version': DEDUP_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
return {'hpe3par_keys': hpe3par_keys,
'cpg': cpg, 'snap_cpg': snap_cpg,
'vvs_name': vvs_name, 'qos': qos,
'tpvv': tpvv, 'tdvv': tdvv, 'volume_type': volume_type}
def get_volume_settings_from_type(self, volume, host=None):
"""Get 3PAR volume settings given a volume.
Combines type info and config settings to return a dictionary
describing the 3PAR volume settings. Does some validation (CPG and
persona).
:param volume:
:param host: Optional host to use for default pool.
:returns: dict
"""
type_id = volume.get('volume_type_id', None)
pool = None
if host:
pool = volume_utils.extract_host(host['host'], 'pool')
else:
pool = volume_utils.extract_host(volume['host'], 'pool')
volume_settings = self.get_volume_settings_from_type_id(type_id, pool)
# check for valid persona even if we don't use it until
# attach time, this will give the end user notice that the
# persona type is invalid at volume creation time
self.get_persona_type(volume, volume_settings['hpe3par_keys'])
return volume_settings
def create_volume(self, volume):
LOG.debug('CREATE VOLUME (%(disp_name)s: %(vol_name)s %(id)s on '
'%(host)s)',
{'disp_name': volume['display_name'],
'vol_name': volume['name'],
'id': self._get_3par_vol_name(volume['id']),
'host': volume['host']})
try:
comments = {'volume_id': volume['id'],
'name': volume['name'],
'type': 'OpenStack'}
name = volume.get('display_name', None)
if name:
comments['display_name'] = name
# get the options supported by volume types
type_info = self.get_volume_settings_from_type(volume)
volume_type = type_info['volume_type']
vvs_name = type_info['vvs_name']
qos = type_info['qos']
cpg = type_info['cpg']
snap_cpg = type_info['snap_cpg']
tpvv = type_info['tpvv']
tdvv = type_info['tdvv']
flash_cache = self.get_flash_cache_policy(
type_info['hpe3par_keys'])
compression = self.get_compression_policy(
type_info['hpe3par_keys'])
consis_group_snap_type = False
if volume_type is not None:
extra_specs = volume_type.get('extra_specs', None)
if extra_specs:
gsnap_val = extra_specs.get(
'consistent_group_snapshot_enabled', None)
if gsnap_val is not None and gsnap_val == "<is> True":
consis_group_snap_type = True
cg_id = volume.get('group_id', None)
if cg_id and consis_group_snap_type:
vvs_name = self._get_3par_vvs_name(cg_id)
type_id = volume.get('volume_type_id', None)
if type_id is not None:
comments['volume_type_name'] = volume_type.get('name')
comments['volume_type_id'] = type_id
if vvs_name is not None:
comments['vvs'] = vvs_name
else:
comments['qos'] = qos
extras = {'comment': json.dumps(comments),
'snapCPG': snap_cpg,
'tpvv': tpvv}
# Only set the dedup option if the backend supports it.
if self.API_VERSION >= DEDUP_API_VERSION:
extras['tdvv'] = tdvv
capacity = self._capacity_from_size(volume['size'])
volume_name = self._get_3par_vol_name(volume['id'])
if compression is not None:
extras['compression'] = compression
self.client.createVolume(volume_name, cpg, capacity, extras)
if qos or vvs_name or flash_cache is not None:
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, vvs_name, qos,
flash_cache)
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error("Exception: %s", ex)
raise exception.CinderException(ex)
# v2 replication check
replication_flag = False
if self._volume_of_replicated_type(volume) and (
self._do_volume_replication_setup(volume)):
replication_flag = True
except hpeexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpeexceptions.HTTPBadRequest as ex:
LOG.error("Exception: %s", ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error("Exception: %s", ex)
raise
except exception.CinderException as ex:
LOG.error("Exception: %s", ex)
raise
except Exception as ex:
LOG.error("Exception: %s", ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg,
replication=replication_flag,
provider_location=self.client.id)
def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None,
tpvv=True, tdvv=False, compression=None):
# Virtual volume sets are not supported with the -online option
LOG.debug('Creating clone of a volume %(src)s to %(dest)s.',
{'src': src_name, 'dest': dest_name})
optional = {'tpvv': tpvv, 'online': True}
if snap_cpg is not None:
optional['snapCPG'] = snap_cpg
if self.API_VERSION >= DEDUP_API_VERSION:
optional['tdvv'] = tdvv
if (compression is not None and
self.API_VERSION >= COMPRESSION_API_VERSION):
optional['compression'] = compression
body = self.client.copyVolume(src_name, dest_name, cpg, optional)
return body['taskid']
def get_next_word(self, s, search_string):
"""Return the next word.
Search 's' for 'search_string', if found return the word preceding
'search_string' from 's'.
"""
word = re.search(search_string.strip(' ') + ' ([^ ]*)', s)
return word.groups()[0].strip(' ')
def _get_3par_vol_comment_value(self, vol_comment, key):
comment_dict = dict(ast.literal_eval(vol_comment))
if key in comment_dict:
return comment_dict[key]
return None
def _get_model_update(self, volume_host, cpg, replication=False,
provider_location=None):
"""Get model_update dict to use when we select a pool.
The pools implementation uses a volume['host'] suffix of :poolname.
When the volume comes in with this selected pool, we sometimes use
a different pool (e.g. because the type says to use a different pool).
So in the several places that we do this, we need to return a model
update so that the volume will have the actual pool name in the host
suffix after the operation.
Given a volume_host, which should (might) have the pool suffix, and
given the CPG we actually chose to use, return a dict to use for a
model update iff an update is needed.
:param volume_host: The volume's host string.
:param cpg: The actual pool (cpg) used, for example from the type.
:returns: dict Model update if we need to update volume host, else None
"""
model_update = {}
host = volume_utils.extract_host(volume_host, 'backend')
host_and_pool = volume_utils.append_host(host, cpg)
if volume_host != host_and_pool:
# Since we selected a pool based on type, update the model.
model_update['host'] = host_and_pool
if replication:
model_update['replication_status'] = 'enabled'
if replication and provider_location:
model_update['provider_location'] = provider_location
if not model_update:
model_update = None
return model_update
def _create_temp_snapshot(self, volume):
"""This creates a temporary snapshot of a volume.
This is used by cloning a volume so that we can then
issue extend volume against the original volume.
"""
vol_name = self._get_3par_vol_name(volume['id'])
# create a brand new uuid for the temp snap
snap_uuid = uuid.uuid4().hex
# this will be named tss-%s
snap_name = self._get_3par_snap_name(snap_uuid, temp_snap=True)
extra = {'volume_name': volume['name'],
'volume_id': volume['id']}
optional = {'comment': json.dumps(extra)}
# let the snapshot die in an hour
optional['expirationHours'] = 1
LOG.info("Creating temp snapshot %(snap)s from volume %(vol)s",
{'snap': snap_name, 'vol': vol_name})
self.client.createSnapshot(snap_name, vol_name, optional)
return self.client.getVolume(snap_name)
def create_cloned_volume(self, volume, src_vref):
try:
vol_name = self._get_3par_vol_name(volume['id'])
src_vol_name = self._get_3par_vol_name(src_vref['id'])
back_up_process = False
vol_chap_enabled = False
# Check whether a volume is ISCSI and CHAP enabled on it.
if self._client_conf['hpe3par_iscsi_chap_enabled']:
try:
vol_chap_enabled = self.client.getVolumeMetaData(
src_vol_name, 'HPQ-cinder-CHAP-name')['value']
except hpeexceptions.HTTPNotFound:
LOG.debug("CHAP is not enabled on volume %(vol)s ",
{'vol': src_vref['id']})
vol_chap_enabled = False
# Check whether a process is a backup
if str(src_vref['status']) == 'backing-up':
back_up_process = True
# if the sizes of the 2 volumes are the same and except backup
# process for ISCSI volume with chap enabled on it.
# we can do an online copy, which is a background process
# on the 3PAR that makes the volume instantly available.
# We can't resize a volume, while it's being copied.
if volume['size'] == src_vref['size'] and not (
back_up_process and vol_chap_enabled):
LOG.debug("Creating a clone of volume, using online copy.")
type_info = self.get_volume_settings_from_type(volume)
snapshot = self._create_temp_snapshot(src_vref)
cpg = type_info['cpg']
qos = type_info['qos']
vvs_name = type_info['vvs_name']
flash_cache = self.get_flash_cache_policy(
type_info['hpe3par_keys'])
compression_val = self.get_compression_policy(
type_info['hpe3par_keys'])
# make the 3PAR copy the contents.
# can't delete the original until the copy is done.
self._copy_volume(snapshot['name'], vol_name, cpg=cpg,
snap_cpg=type_info['snap_cpg'],
tpvv=type_info['tpvv'],
tdvv=type_info['tdvv'],
compression=compression_val)
if qos or vvs_name or flash_cache is not None:
try:
self._add_volume_to_volume_set(
volume, vol_name, cpg, vvs_name, qos, flash_cache)
except exception.InvalidInput as ex:
# Delete volume if unable to add it to the volume set
self.client.deleteVolume(vol_name)
dbg = {'volume': vol_name,
'vvs_name': vvs_name,
'err': six.text_type(ex)}
msg = _("Failed to add volume '%(volume)s' to vvset "
"'%(vvs_name)s' because '%(err)s'") % dbg
LOG.error(msg)
raise exception.CinderException(msg)
# v2 replication check
replication_flag = False
if self._volume_of_replicated_type(volume) and (
self._do_volume_replication_setup(volume)):
replication_flag = True
return self._get_model_update(volume['host'], cpg,
replication=replication_flag,
provider_location=self.client.id)
else:
# The size of the new volume is different, so we have to
# copy the volume and wait. Do the resize after the copy
# is complete.
LOG.debug("Creating a clone of volume, using non-online copy.")
# we first have to create the destination volume
model_update = self.create_volume(volume)
optional = {'priority': 1}
body = self.client.copyVolume(src_vol_name, vol_name, None,
optional=optional)
task_id = body['taskid']
task_status = self._wait_for_task_completion(task_id)
if task_status['status'] is not self.client.TASK_DONE:
dbg = {'status': task_status, 'id': volume['id']}
msg = _('Copy volume task failed: create_cloned_volume '
'id=%(id)s, status=%(status)s.') % dbg
raise exception.CinderException(msg)
else:
LOG.debug('Copy volume completed: create_cloned_volume: '
'id=%s.', volume['id'])
return model_update
except hpeexceptions.HTTPForbidden:
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound:
raise exception.NotFound()
except Exception as ex:
LOG.error("Exception: %s", ex)
raise exception.CinderException(ex)
def delete_volume(self, volume):
# v2 replication check
# If the volume type is replication enabled, we want to call our own
# method of deconstructing the volume and its dependencies
if self._volume_of_replicated_type(volume):
replication_status = volume.get('replication_status', None)
if replication_status and replication_status == "failed-over":
self._delete_replicated_failed_over_volume(volume)
else:
self._do_volume_replication_destroy(volume)
return
try:
volume_name = self._get_3par_vol_name(volume['id'])
# Try and delete the volume, it might fail here because
# the volume is part of a volume set which will have the
# volume set name in the error.
try:
self.client.deleteVolume(volume_name)
except hpeexceptions.HTTPBadRequest as ex:
if ex.get_code() == 29:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug("Found an online copy for %(volume)s",
{'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
LOG.error("Exception: %s", ex)
raise
else:
LOG.error("Exception: %s", ex)
raise
except hpeexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
# volume is part of a volume set.
self._delete_vvset(volume)
self.client.deleteVolume(volume_name)
elif ex.get_code() == 151:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug("Found an online copy for %(volume)s",
{'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
# the volume is being operated on in a background
# task on the 3PAR.
# TODO(walter-boring) do a retry a few times.
# for now lets log a better message
msg = _("The volume is currently busy on the 3PAR"
" and cannot be deleted at this time. "
"You can try again later.")
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
elif (ex.get_code() == 32):
# Error 32 means that the volume has children
# see if we have any temp snapshots
snaps = self.client.getVolumeSnapshots(volume_name)
for snap in snaps:
if snap.startswith('tss-'):
# looks like we found a temp snapshot.
LOG.info(
"Found a temporary snapshot %(name)s",
{'name': snap})
try:
self.client.deleteVolume(snap)
except hpeexceptions.HTTPNotFound:
# if the volume is gone, it's as good as a
# successful delete
pass
except Exception:
msg = _("Volume has a temporary snapshot that "
"can't be deleted at this time.")
raise exception.VolumeIsBusy(message=msg)
try:
self.delete_volume(volume)
except Exception:
msg = _("Volume has children and cannot be deleted!")
raise exception.VolumeIsBusy(message=msg)
else:
LOG.error("Exception: %s", ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except hpeexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.warning("Delete volume id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s",
{'id': volume['id'], 'msg': ex})
except hpeexceptions.HTTPForbidden as ex:
LOG.error("Exception: %s", ex)
raise exception.NotAuthorized(ex.get_description())
except hpeexceptions.HTTPConflict as ex:
LOG.error("Exception: %s", ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except Exception as ex:
LOG.error("Exception: %s", ex)
raise exception.CinderException(ex)
def create_volume_from_snapshot(self, volume, snapshot, snap_name=None,
vvs_name=None):
"""Creates a volume from a snapshot."""
LOG.debug("Create Volume from Snapshot\n%(vol_name)s\n%(ss_name)s",
{'vol_name': pprint.pformat(volume['display_name']),
'ss_name': pprint.pformat(snapshot['display_name'])})
model_update = {}
if volume['size'] < snapshot['volume_size']:
err = ("You cannot reduce size of the volume. It must "
"be greater than or equal to the snapshot.")
LOG.error(err)
raise exception.InvalidInput(reason=err)
try:
if not snap_name:
snap_name = self._get_3par_snap_name(snapshot['id'])
volume_name = self._get_3par_vol_name(volume['id'])
extra = {'volume_id': volume['id'],
'snapshot_id': snapshot['id']}
type_id = volume.get('volume_type_id', None)
hpe3par_keys, qos, _volume_type, vvs = self.get_type_info(
type_id)
if vvs:
vvs_name = vvs
name = volume.get('display_name', None)
if name:
extra['display_name'] = name
description = volume.get('display_description', None)
if description:
extra['description'] = description
optional = {'comment': json.dumps(extra),
'readOnly': False}
self.client.createSnapshot(volume_name, snap_name, optional)
# Convert snapshot volume to base volume type
LOG.debug('Converting to base volume type: %s.',
volume['id'])
model_update = self._convert_to_base_volume(volume)
# Grow the snapshot if needed
growth_size = volume['size'] - snapshot['volume_size']
if growth_size > 0:
try:
growth_size_mib = growth_size * units.Gi / units.Mi
LOG.debug('Growing volume: %(id)s by %(size)s GiB.',
{'id': volume['id'], 'size': growth_size})
self.client.growVolume(volume_name, growth_size_mib)
except Exception as ex:
LOG.error("Error extending volume %(id)s. "
"Ex: %(ex)s",
{'id': volume['id'], 'ex': ex})
# Delete the volume if unable to grow it
self.client.deleteVolume(volume_name)
raise exception.CinderException(ex)
# Check for flash cache setting in extra specs
flash_cache = self.get_flash_cache_policy(hpe3par_keys)
if qos or vvs_name or flash_cache is not None:
cpg_names = self._get_key_value(
hpe3par_keys, 'cpg', self._client_conf['hpe3par_cpg'])
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg_names[0], vvs_name,
qos, flash_cache)
except Exception as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error("Exception: %s", ex)
raise exception.CinderException(ex)
# v2 replication check
if self._volume_of_replicated_type(volume) and (
self._do_volume_replication_setup(volume)):
model_update['replication_status'] = 'enabled'
model_update['provider_location'] = self.client.id
except hpeexceptions.HTTPForbidden as ex:
LOG.error("Exception: %s", ex)
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound as ex:
LOG.error("Exception: %s", ex)
raise exception.NotFound()
except Exception as ex:
LOG.error("Exception: %s", ex)
raise exception.CinderException(ex)
return model_update
def create_snapshot(self, snapshot):
LOG.debug("Create Snapshot\n%s", pprint.pformat(snapshot))
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
vol_name = self._get_3par_vol_name(snapshot['volume_id'])
extra = {'volume_name': snapshot['volume_name']}
vol_id = snapshot.get('volume_id', None)
if vol_id:
extra['volume_id'] = vol_id
try:
extra['display_name'] = snapshot['display_name']
except AttributeError:
pass
try:
extra['description'] = snapshot['display_description']
except AttributeError:
pass
optional = {'comment': json.dumps(extra),
'readOnly': True}
if self.config.hpe3par_snapshot_expiration:
optional['expirationHours'] = (
int(self.config.hpe3par_snapshot_expiration))
if self.config.hpe3par_snapshot_retention:
optional['retentionHours'] = (
int(self.config.hpe3par_snapshot_retention))
self.client.createSnapshot(snap_name, vol_name, optional)
except hpeexceptions.HTTPForbidden as ex:
LOG.error("Exception: %s", ex)
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound as ex:
LOG.error("Exception: %s", ex)
raise exception.NotFound()
def migrate_volume(self, volume, host):
"""Migrate directly if source and dest are managed by same storage.
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
:returns: (False, None) if the driver does not support migration,
(True, model_update) if successful
"""
dbg = {'id': volume['id'],
'host': host['host'],
'status': volume['status']}
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
'status=%(status)s.', dbg)
ret = False, None
if volume['status'] in ['available', 'in-use']:
volume_type = None
if volume['volume_type_id']:
volume_type = self._get_volume_type(volume['volume_type_id'])
try:
ret = self.retype(volume, volume_type, None, host)
except Exception as e:
LOG.info('3PAR driver cannot perform migration. '
'Retype exception: %s', e)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s, '
'status=%(status)s.', dbg)
dbg_ret = {'supported': ret[0], 'model_update': ret[1]}
LOG.debug('migrate_volume result: %(supported)s, %(model_update)s',
dbg_ret)
return ret
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Rename the new (temp) volume to it's original name.
This method tries to rename the new volume to it's original
name after the migration has completed.
"""
LOG.debug("Update volume name for %(id)s", {'id': new_volume['id']})
name_id = None
provider_location = None
if original_volume_status == 'available':
# volume isn't attached and can be updated
original_name = self._get_3par_vol_name(volume['id'])
temp_name = self._get_3par_vol_name(volume['id'], temp_vol=True)
current_name = self._get_3par_vol_name(new_volume['id'])
try:
volumeMods = {'newName': original_name}
volumeTempMods = {'newName': temp_name}
volumeCurrentMods = {'newName': current_name}
# swap volume name in backend
self.client.modifyVolume(original_name, volumeTempMods)
self.client.modifyVolume(current_name, volumeMods)
self.client.modifyVolume(temp_name, volumeCurrentMods)
LOG.info("Volume name changed from %(tmp)s to %(orig)s",
{'tmp': current_name, 'orig': original_name})
except Exception as e:
LOG.error("Changing the volume name from %(tmp)s to "
"%(orig)s failed because %(reason)s",
{'tmp': current_name, 'orig': original_name,
'reason': e})
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# the backend can't change the name.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def _wait_for_task_completion(self, task_id):
"""This waits for a 3PAR background task complete or fail.
This looks for a task to get out of the 'active' state.
"""
# Wait for the physical copy task to complete
def _wait_for_task(task_id):
status = self.client.getTask(task_id)
LOG.debug("3PAR Task id %(id)s status = %(status)s",
{'id': task_id,
'status': status['status']})
if status['status'] is not self.client.TASK_ACTIVE:
self._task_status = status
raise loopingcall.LoopingCallDone()
self._task_status = None
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_task, task_id)
timer.start(interval=1).wait()
return self._task_status
def _convert_to_base_volume(self, volume, new_cpg=None):
try:
type_info = self.get_volume_settings_from_type(volume)
if new_cpg:
cpg = new_cpg
else:
cpg = type_info['cpg']
# Change the name such that it is unique since 3PAR
# names must be unique across all CPGs
volume_name = self._get_3par_vol_name(volume['id'])
temp_vol_name = volume_name.replace("osv-", "omv-")
compression = self.get_compression_policy(
type_info['hpe3par_keys'])
# Create a physical copy of the volume
task_id = self._copy_volume(volume_name, temp_vol_name,
cpg, cpg, type_info['tpvv'],
type_info['tdvv'],
compression)
LOG.debug('Copy volume scheduled: convert_to_base_volume: '
'id=%s.', volume['id'])
task_status = self._wait_for_task_completion(task_id)
if task_status['status'] is not self.client.TASK_DONE:
dbg = {'status': task_status, 'id': volume['id']}
msg = _('Copy volume task failed: convert_to_base_volume: '
'id=%(id)s, status=%(status)s.') % dbg
raise exception.CinderException(msg)
else:
LOG.debug('Copy volume completed: convert_to_base_volume: '
'id=%s.', volume['id'])
comment = self._get_3par_vol_comment(volume_name)
if comment:
self.client.modifyVolume(temp_vol_name, {'comment': comment})
LOG.debug('Volume rename completed: convert_to_base_volume: '
'id=%s.', volume['id'])
# Delete source volume after the copy is complete
self.client.deleteVolume(volume_name)
LOG.debug('Delete src volume completed: convert_to_base_volume: '
'id=%s.', volume['id'])
# Rename the new volume to the original name
self.client.modifyVolume(temp_vol_name, {'newName': volume_name})
LOG.info('Completed: convert_to_base_volume: '
'id=%s.', volume['id'])
except hpeexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array.") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpeexceptions.HTTPBadRequest as ex:
LOG.error("Exception: %s", ex)
raise exception.Invalid(ex.get_description())
except exception.CinderException as ex:
LOG.error("Exception: %s", ex)
raise
except Exception as ex:
LOG.error("Exception: %s", ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg)
def delete_snapshot(self, snapshot):
LOG.debug("Delete Snapshot id %(id)s %(name)s",
{'id': snapshot['id'], 'name': pprint.pformat(snapshot)})
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
self.client.deleteVolume(snap_name)
except hpeexceptions.HTTPForbidden as ex:
LOG.error("Exception: %s", ex)
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.warning("Delete Snapshot id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s",
{'id': snapshot['id'], 'msg': ex})
except hpeexceptions.HTTPConflict as ex:
if (ex.get_code() == 32):
# Error 32 means that the snapshot has children
# see if we have any temp snapshots
snaps = self.client.getVolumeSnapshots(snap_name)
for snap in snaps:
if snap.startswith('tss-'):
LOG.info(
"Found a temporary snapshot %(name)s",
{'name': snap})
try:
self.client.deleteVolume(snap)
except hpeexceptions.HTTPNotFound:
# if the volume is gone, it's as good as a
# successful delete
pass
except Exception:
msg = _("Snapshot has a temporary snapshot that "
"can't be deleted at this time.")
raise exception.SnapshotIsBusy(message=msg)
try:
self.client.deleteVolume(snap_name)
except Exception:
msg = _("Snapshot has children and cannot be deleted!")
raise exception.SnapshotIsBusy(message=msg)
else:
LOG.error("Exception: %s", ex)
raise exception.SnapshotIsBusy(message=ex.get_description())
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
if wwns is not None and not isinstance(wwns, list):
wwns = [wwns]
if iqns is not None and not isinstance(iqns, list):
iqns = [iqns]
out = self.client.getHosts()
hosts = out['members']
for host in hosts:
if 'iSCSIPaths' in host and iqns is not None:
iscsi_paths = host['iSCSIPaths']
for iscsi in iscsi_paths:
for iqn in iqns:
if iqn == iscsi['name']:
return host['name']
if 'FCPaths' in host and wwns is not None:
fc_paths = host['FCPaths']
for fc in fc_paths:
for wwn in wwns:
if wwn.upper() == fc['wwn'].upper():
return host['name']
def terminate_connection(self, volume, hostname, wwn=None, iqn=None):
"""Driver entry point to unattach a volume from an instance."""
# does 3par know this host by a different name?
hosts = None
if wwn:
hosts = self.client.queryHost(wwns=wwn)
elif iqn:
hosts = self.client.queryHost(iqns=[iqn])
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
hostname = hosts['members'][0]['name']
try:
self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn)
return
except hpeexceptions.HTTPNotFound as e:
if 'host does not exist' in e.get_description():
# If a host is failed-over, we want to allow the detach to
# 'succeed' when it cannot find the host. We can simply
# return out of the terminate connection in order for things
# to be updated correctly.
if self._active_backend_id:
LOG.warning("Because the host is currently in a "
"failed-over state, the volume will not "
"be properly detached from the primary "
"array. The detach will be considered a "
"success as far as Cinder is concerned. "
"The volume can now be attached to the "
"secondary target.")
return
else:
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
if hostname is None:
LOG.error("Exception: %s", e)
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
LOG.error("Exception: %s", e)
raise
# try again with name retrieved from 3par
self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn)
def build_nsp(self, portPos):
return '%s:%s:%s' % (portPos['node'],
portPos['slot'],
portPos['cardPort'])
def build_portPos(self, nsp):
split = nsp.split(":")
portPos = {}
portPos['node'] = int(split[0])
portPos['slot'] = int(split[1])
portPos['cardPort'] = int(split[2])
return portPos
def tune_vv(self, old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_cpg, new_cpg, volume_name, new_compression):
"""Tune the volume to change the userCPG and/or provisioningType.
The volume will be modified/tuned/converted to the new userCPG and
provisioningType, as needed.
TaskWaiter is used to make this function wait until the 3PAR task
is no longer active. When the task is no longer active, then it must
either be done or it is in a state that we need to treat as an error.
"""
compression = False
if new_compression is not None:
compression = new_compression
if old_tpvv == new_tpvv and old_tdvv == new_tdvv:
if new_cpg != old_cpg:
LOG.info("Modifying %(volume_name)s userCPG "
"from %(old_cpg)s"
" to %(new_cpg)s",
{'volume_name': volume_name,
'old_cpg': old_cpg, 'new_cpg': new_cpg})
_response, body = self.client.modifyVolume(
volume_name,
{'action': 6,
'tuneOperation': 1,
'userCPG': new_cpg})
task_id = body['taskid']
status = self.TaskWaiter(self.client, task_id).wait_for_task()
if status['status'] is not self.client.TASK_DONE:
msg = (_('Tune volume task stopped before it was done: '
'volume_name=%(volume_name)s, '
'task-status=%(status)s.') %
{'status': status, 'volume_name': volume_name})
raise exception.VolumeBackendAPIException(msg)
else:
if new_tpvv:
cop = self.CONVERT_TO_THIN
LOG.info("Converting %(volume_name)s to thin provisioning "
"with userCPG=%(new_cpg)s",
{'volume_name': volume_name, 'new_cpg': new_cpg})
elif new_tdvv:
cop = self.CONVERT_TO_DEDUP
LOG.info("Converting %(volume_name)s to thin dedup "
"provisioning with userCPG=%(new_cpg)s",
{'volume_name': volume_name, 'new_cpg': new_cpg})
else:
cop = self.CONVERT_TO_FULL
LOG.info("Converting %(volume_name)s to full provisioning "
"with userCPG=%(new_cpg)s",
{'volume_name': volume_name, 'new_cpg': new_cpg})
try:
if self.API_VERSION < COMPRESSION_API_VERSION:
response, body = self.client.modifyVolume(
volume_name,
{'action': 6,
'tuneOperation': 1,
'userCPG': new_cpg,
'conversionOperation': cop})
else:
response, body = self.client.modifyVolume(
volume_name,
{'action': 6,
'tuneOperation': 1,
'userCPG': new_cpg,
'compression': compression,
'conversionOperation': cop})
except hpeexceptions.HTTPBadRequest as ex:
if ex.get_code() == 40 and "keepVV" in six.text_type(ex):
# Cannot retype with snapshots because we don't want to
# use keepVV and have straggling volumes. Log additional
# info and then raise.
LOG.info("tunevv failed because the volume '%s' "
"has snapshots.", volume_name)
raise
task_id = body['taskid']
status = self.TaskWaiter(self.client, task_id).wait_for_task()
if status['status'] is not self.client.TASK_DONE:
msg = (_('Tune volume task stopped before it was done: '
'volume_name=%(volume_name)s, '
'task-status=%(status)s.') %
{'status': status, 'volume_name': volume_name})
raise exception.VolumeBackendAPIException(msg)
def _retype_pre_checks(self, volume, host, new_persona,
old_cpg, new_cpg,
new_snap_cpg):
"""Test retype parameters before making retype changes.
Do pre-retype parameter validation. These checks will
raise an exception if we should not attempt this retype.
"""
if new_persona:
self.validate_persona(new_persona)
if host is not None:
(host_type, host_id, _host_cpg) = (
host['capabilities']['location_info']).split(':')
if not (host_type == 'HPE3PARDriver'):
reason = (_("Cannot retype from HPE3PARDriver to %s.") %
host_type)
raise exception.InvalidHost(reason)
sys_info = self.client.getStorageSystemInfo()
if not (host_id == sys_info['serialNumber']):
reason = (_("Cannot retype from one 3PAR array to another."))
raise exception.InvalidHost(reason)
# Validate new_snap_cpg. A white-space snapCPG will fail eventually,
# but we'd prefer to fail fast -- if this ever happens.
if not new_snap_cpg or new_snap_cpg.isspace():
reason = (_("Invalid new snapCPG name for retype. "
"new_snap_cpg='%s'.") % new_snap_cpg)
raise exception.InvalidInput(reason)
# Check to make sure CPGs are in the same domain
domain = self.get_domain(old_cpg)
if domain != self.get_domain(new_cpg):
reason = (_('Cannot retype to a CPG in a different domain.'))
raise exception.Invalid3PARDomain(reason)
if domain != self.get_domain(new_snap_cpg):
reason = (_('Cannot retype to a snap CPG in a different domain.'))
raise exception.Invalid3PARDomain(reason)
def _retype(self, volume, volume_name, new_type_name, new_type_id, host,
new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg,
old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_vvs, new_vvs, old_qos, new_qos,
old_flash_cache, new_flash_cache,
old_comment, new_compression):
action = "volume:retype"
self._retype_pre_checks(volume, host, new_persona,
old_cpg, new_cpg,
new_snap_cpg)
flow_name = action.replace(":", "_") + "_api"
retype_flow = linear_flow.Flow(flow_name)
# Keep this linear and do the big tunevv last. Everything leading
# up to that is reversible, but we'd let the 3PAR deal with tunevv
# errors on its own.
retype_flow.add(
ModifyVolumeTask(action),
ModifySpecsTask(action),
TuneVolumeTask(action),
ReplicateVolumeTask(action))
taskflow.engines.run(
retype_flow,
store={'common': self,
'volume_name': volume_name, 'volume': volume,
'old_tpvv': old_tpvv, 'new_tpvv': new_tpvv,
'old_tdvv': old_tdvv, 'new_tdvv': new_tdvv,
'old_cpg': old_cpg, 'new_cpg': new_cpg,
'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg,
'old_vvs': old_vvs, 'new_vvs': new_vvs,
'old_qos': old_qos, 'new_qos': new_qos,
'old_flash_cache': old_flash_cache,
'new_flash_cache': new_flash_cache,
'new_type_name': new_type_name, 'new_type_id': new_type_id,
'old_comment': old_comment,
'new_compression': new_compression
})
def _retype_from_old_to_new(self, volume, new_type, old_volume_settings,
host):
"""Convert the volume to be of the new type. Given old type settings.
Returns True if the retype was successful.
Uses taskflow to revert changes if errors occur.
:param volume: A dictionary describing the volume to retype
:param new_type: A dictionary describing the volume type to convert to
:param old_volume_settings: Volume settings describing the old type.
:param host: A dictionary describing the host, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities. Host validation
is just skipped if host is None.
"""
volume_id = volume['id']
volume_name = self._get_3par_vol_name(volume_id)
new_type_name = None
new_type_id = None
if new_type:
new_type_name = new_type['name']
new_type_id = new_type['id']
pool = None
if host:
pool = volume_utils.extract_host(host['host'], 'pool')
else:
pool = volume_utils.extract_host(volume['host'], 'pool')
new_volume_settings = self.get_volume_settings_from_type_id(
new_type_id, pool)
new_cpg = new_volume_settings['cpg']
new_snap_cpg = new_volume_settings['snap_cpg']
new_tpvv = new_volume_settings['tpvv']
new_tdvv = new_volume_settings['tdvv']
new_qos = new_volume_settings['qos']
new_vvs = new_volume_settings['vvs_name']
new_persona = None
new_hpe3par_keys = new_volume_settings['hpe3par_keys']
if 'persona' in new_hpe3par_keys:
new_persona = new_hpe3par_keys['persona']
new_flash_cache = self.get_flash_cache_policy(new_hpe3par_keys)
# it will return None / True /False$
new_compression = self.get_compression_policy(new_hpe3par_keys)
old_qos = old_volume_settings['qos']
old_vvs = old_volume_settings['vvs_name']
old_hpe3par_keys = old_volume_settings['hpe3par_keys']
old_flash_cache = self.get_flash_cache_policy(old_hpe3par_keys)
# Get the current volume info because we can get in a bad state
# if we trust that all the volume type settings are still the
# same settings that were used with this volume.
old_volume_info = self.client.getVolume(volume_name)
old_tpvv = old_volume_info['provisioningType'] == self.THIN
old_tdvv = old_volume_info['provisioningType'] == self.DEDUP
old_cpg = old_volume_info['userCPG']
old_comment = old_volume_info['comment']
old_snap_cpg = None
if 'snapCPG' in old_volume_info:
old_snap_cpg = old_volume_info['snapCPG']
LOG.debug("retype old_volume_info=%s", old_volume_info)
LOG.debug("retype old_volume_settings=%s", old_volume_settings)
LOG.debug("retype new_volume_settings=%s", new_volume_settings)
self._retype(volume, volume_name, new_type_name, new_type_id,
host, new_persona, old_cpg, new_cpg,
old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv,
old_tdvv, new_tdvv, old_vvs, new_vvs,
old_qos, new_qos, old_flash_cache, new_flash_cache,
old_comment, new_compression)
if host:
return True, self._get_model_update(host['host'], new_cpg)
else:
return True, self._get_model_update(volume['host'], new_cpg)
def _retype_from_no_type(self, volume, new_type):
"""Convert the volume to be of the new type. Starting from no type.
Returns True if the retype was successful.
Uses taskflow to revert changes if errors occur.
:param volume: A dictionary describing the volume to retype. Except the
volume-type is not used here. This method uses None.
:param new_type: A dictionary describing the volume type to convert to
"""
pool = volume_utils.extract_host(volume['host'], 'pool')
none_type_settings = self.get_volume_settings_from_type_id(None, pool)
return self._retype_from_old_to_new(volume, new_type,
none_type_settings, None)
def retype(self, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns True if the retype was successful.
Uses taskflow to revert changes if errors occur.
:param volume: A dictionary describing the volume to retype
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities. Host validation
is just skipped if host is None.
"""
LOG.debug(("enter: retype: id=%(id)s, new_type=%(new_type)s,"
"diff=%(diff)s, host=%(host)s"), {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
self.remove_temporary_snapshots(volume)
old_volume_settings = self.get_volume_settings_from_type(volume, host)
return self._retype_from_old_to_new(volume, new_type,
old_volume_settings, host)
def remove_temporary_snapshots(self, volume):
vol_name = self._get_3par_vol_name(volume['id'])
snapshots_list = self.client.getVolumeSnapshots(vol_name)
tmp_snapshots_list = [snap
for snap in snapshots_list
if snap.startswith('tss-')]
LOG.debug("temporary snapshot list %(name)s",
{'name': tmp_snapshots_list})
for temp_snap in tmp_snapshots_list:
LOG.debug("Found a temporary snapshot %(name)s",
{'name': temp_snap})
try:
self.client.deleteVolume(temp_snap)
except hpeexceptions.HTTPNotFound:
# if the volume is gone, it's as good as a
# successful delete
pass
except Exception:
msg = _("Volume has a temporary snapshot.")
raise exception.VolumeIsBusy(message=msg)
def find_existing_vlun(self, volume, host):
"""Finds an existing VLUN for a volume on a host.
Returns an existing VLUN's information. If no existing VLUN is found,
None is returned.
:param volume: A dictionary describing a volume.
:param host: A dictionary describing a host.
"""
existing_vlun = None
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
# The first existing VLUN found will be returned.
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vlun = vlun
break
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vlun
def find_existing_vluns(self, volume, host):
existing_vluns = []
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vluns.append(vlun)
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vluns
# v2 replication methods
def failover_host(self, context, volumes, secondary_backend_id):
"""Force failover to a secondary replication target."""
# Ensure replication is enabled before we try and failover.
if not self._replication_enabled:
msg = _("Issuing a fail-over failed because replication is "
"not properly configured.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Check to see if the user requested to failback.
if (secondary_backend_id and
secondary_backend_id == self.FAILBACK_VALUE):
volume_update_list = self._replication_failback(volumes)
target_id = None
else:
# Find the failover target.
failover_target = None
for target in self._replication_targets:
if target['backend_id'] == secondary_backend_id:
failover_target = target
break
if not failover_target:
msg = _("A valid secondary target MUST be specified in order "
"to failover.")
LOG.error(msg)
raise exception.InvalidReplicationTarget(reason=msg)
target_id = failover_target['backend_id']
# For each volume, if it is replicated, we want to fail it over.
volume_update_list = []
for volume in volumes:
if self._volume_of_replicated_type(volume):
try:
# Try and stop remote-copy on main array. We eat the
# exception here because when an array goes down, the
# groups will stop automatically.
rcg_name = self._get_3par_rcg_name(volume['id'])
self.client.stopRemoteCopy(rcg_name)
except Exception:
pass
try:
# Failover to secondary array.
remote_rcg_name = self._get_3par_remote_rcg_name(
volume['id'], volume['provider_location'])
cl = self._create_replication_client(failover_target)
cl.recoverRemoteCopyGroupFromDisaster(
remote_rcg_name, self.RC_ACTION_CHANGE_TO_PRIMARY)
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'failed-over'}})
except Exception as ex:
LOG.error("There was a problem with the failover "
"(%(error)s) and it was unsuccessful. "
"Volume '%(volume)s will not be available "
"on the failed over target.",
{'error': ex,
'volume': volume['id']})
LOG.error(msg)
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'error'}})
finally:
self._destroy_replication_client(cl)
else:
# If the volume is not of replicated type, we need to
# force the status into error state so a user knows they
# do not have access to the volume.
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'status': 'error'}})
return target_id, volume_update_list
def _replication_failback(self, volumes):
# Make sure the proper steps on the backend have been completed before
# we allow a fail-over.
if not self._is_host_ready_for_failback(volumes):
msg = _("The host is not ready to be failed back. Please "
"resynchronize the volumes and resume replication on the "
"3PAR backends.")
LOG.error(msg)
raise exception.InvalidReplicationTarget(reason=msg)
# Update the volumes status to available.
volume_update_list = []
for volume in volumes:
if self._volume_of_replicated_type(volume):
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'available'}})
else:
# Upon failing back, we can move the non-replicated volumes
# back into available state.
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'status': 'available'}})
return volume_update_list
def _is_host_ready_for_failback(self, volumes):
"""Checks to make sure the volume has been synchronized
This ensures that all the remote copy targets have been restored
to their natural direction, and all of the volumes have been
fully synchronized.
"""
try:
for volume in volumes:
if self._volume_of_replicated_type(volume):
location = volume.get('provider_location')
remote_rcg_name = self._get_3par_remote_rcg_name(
volume['id'],
location)
rcg = self.client.getRemoteCopyGroup(remote_rcg_name)
# Make sure all targets are in their natural direction.
targets = rcg['targets']
for target in targets:
if target['roleReversed'] or (
target['state'] != self.RC_GROUP_STARTED):
return False
# Make sure all volumes are fully synced.
volumes = rcg['volumes']
for volume in volumes:
remote_volumes = volume['remoteVolumes']
for remote_volume in remote_volumes:
if remote_volume['syncStatus'] != (
self.SYNC_STATUS_COMPLETED):
return False
except Exception:
# If there was a problem, we will return false so we can
# log an error in the parent function.
return False
return True
def _do_replication_setup(self):
replication_targets = []
replication_devices = self.config.replication_device
if replication_devices:
for dev in replication_devices:
remote_array = dict(dev.items())
# Override and set defaults for certain entries
remote_array['managed_backend_name'] = (
dev.get('managed_backend_name'))
remote_array['replication_mode'] = (
self._get_remote_copy_mode_num(
dev.get('replication_mode')))
remote_array['san_ssh_port'] = (
dev.get('san_ssh_port', self.config.san_ssh_port))
remote_array['ssh_conn_timeout'] = (
dev.get('ssh_conn_timeout', self.config.ssh_conn_timeout))
remote_array['san_private_key'] = (
dev.get('san_private_key', self.config.san_private_key))
# Format iscsi IPs correctly
iscsi_ips = dev.get('hpe3par_iscsi_ips')
if iscsi_ips:
remote_array['hpe3par_iscsi_ips'] = iscsi_ips.split(' ')
# Format hpe3par_iscsi_chap_enabled as a bool
remote_array['hpe3par_iscsi_chap_enabled'] = (
dev.get('hpe3par_iscsi_chap_enabled') == 'True')
array_name = remote_array['backend_id']
# Make sure we can log into the array, that it has been
# correctly configured, and its API version meets the
# minimum requirement.
cl = None
try:
cl = self._create_replication_client(remote_array)
array_id = six.text_type(cl.getStorageSystemInfo()['id'])
remote_array['id'] = array_id
wsapi_version = cl.getWsApiVersion()['build']
if wsapi_version < REMOTE_COPY_API_VERSION:
LOG.warning("The secondary array must have an API "
"version of %(min_ver)s or higher. Array "
"'%(target)s' is on %(target_ver)s, "
"therefore it will not be added as a "
"valid replication target.",
{'target': array_name,
'min_ver': REMOTE_COPY_API_VERSION,
'target_ver': wsapi_version})
elif not self._is_valid_replication_array(remote_array):
LOG.warning("'%s' is not a valid replication array. "
"In order to be valid, backend_id, "
"replication_mode, "
"hpe3par_api_url, hpe3par_username, "
"hpe3par_password, cpg_map, san_ip, "
"san_login, and san_password "
"must be specified. If the target is "
"managed, managed_backend_name must be "
"set as well.", array_name)
else:
replication_targets.append(remote_array)
except Exception:
LOG.error("Could not log in to 3PAR array (%s) with the "
"provided credentials.", array_name)
finally:
self._destroy_replication_client(cl)
self._replication_targets = replication_targets
if self._is_replication_configured_correct():
self._replication_enabled = True
def _is_valid_replication_array(self, target):
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password', 'san_ip', 'san_login',
'san_password', 'backend_id',
'replication_mode', 'cpg_map']
try:
self.check_replication_flags(target, required_flags)
return True
except Exception:
return False
def _is_replication_configured_correct(self):
rep_flag = True
# Make sure there is at least one replication target.
if len(self._replication_targets) < 1:
LOG.error("There must be at least one valid replication "
"device configured.")
rep_flag = False
return rep_flag
def _is_replication_mode_correct(self, mode, sync_num):
rep_flag = True
# Make sure replication_mode is set to either sync|periodic.
mode = self._get_remote_copy_mode_num(mode)
if not mode:
LOG.error("Extra spec replication:mode must be set and must "
"be either 'sync' or 'periodic'.")
rep_flag = False
else:
# If replication:mode is periodic, replication_sync_period must be
# set between 300 - 31622400 seconds.
if mode == self.PERIODIC and (
sync_num < 300 or sync_num > 31622400):
LOG.error("Extra spec replication:sync_period must be "
"greater than 299 and less than 31622401 "
"seconds.")
rep_flag = False
return rep_flag
def is_volume_group_snap_type(self, volume_type):
consis_group_snap_type = False
if volume_type:
extra_specs = volume_type.extra_specs
if 'consistent_group_snapshot_enabled' in extra_specs:
gsnap_val = extra_specs['consistent_group_snapshot_enabled']
consis_group_snap_type = (gsnap_val == "<is> True")
return consis_group_snap_type
def _volume_of_replicated_type(self, volume):
replicated_type = False
volume_type_id = volume.get('volume_type_id')
if volume_type_id:
volume_type = self._get_volume_type(volume_type_id)
extra_specs = volume_type.get('extra_specs')
if extra_specs and 'replication_enabled' in extra_specs:
rep_val = extra_specs['replication_enabled']
replicated_type = (rep_val == "<is> True")
return replicated_type
def _is_volume_in_remote_copy_group(self, volume):
rcg_name = self._get_3par_rcg_name(volume['id'])
try:
self.client.getRemoteCopyGroup(rcg_name)
return True
except hpeexceptions.HTTPNotFound:
return False
def _get_remote_copy_mode_num(self, mode):
ret_mode = None
if mode == "sync":
ret_mode = self.SYNC
if mode == "periodic":
ret_mode = self.PERIODIC
return ret_mode
def _get_3par_config(self):
self._do_replication_setup()
conf = None
if self._replication_enabled:
for target in self._replication_targets:
if target['backend_id'] == self._active_backend_id:
conf = target
break
self._build_3par_config(conf)
def _build_3par_config(self, conf=None):
"""Build 3PAR client config dictionary.
self._client_conf will contain values from self.config if the volume
is located on the primary array in order to properly contact it. If
the volume has been failed over and therefore on a secondary array,
self._client_conf will contain values on how to contact that array.
The only time we will return with entries from a secondary array is
with unmanaged replication.
"""
if conf:
self._client_conf['hpe3par_cpg'] = self._generate_hpe3par_cpgs(
conf.get('cpg_map'))
self._client_conf['hpe3par_username'] = (
conf.get('hpe3par_username'))
self._client_conf['hpe3par_password'] = (
conf.get('hpe3par_password'))
self._client_conf['san_ip'] = conf.get('san_ip')
self._client_conf['san_login'] = conf.get('san_login')
self._client_conf['san_password'] = conf.get('san_password')
self._client_conf['san_ssh_port'] = conf.get('san_ssh_port')
self._client_conf['ssh_conn_timeout'] = (
conf.get('ssh_conn_timeout'))
self._client_conf['san_private_key'] = conf.get('san_private_key')
self._client_conf['hpe3par_api_url'] = conf.get('hpe3par_api_url')
self._client_conf['hpe3par_iscsi_ips'] = (
conf.get('hpe3par_iscsi_ips'))
self._client_conf['hpe3par_iscsi_chap_enabled'] = (
conf.get('hpe3par_iscsi_chap_enabled'))
self._client_conf['iscsi_ip_address'] = (
conf.get('iscsi_ip_address'))
self._client_conf['iscsi_port'] = conf.get('iscsi_port')
else:
self._client_conf['hpe3par_cpg'] = (
self.config.hpe3par_cpg)
self._client_conf['hpe3par_username'] = (
self.config.hpe3par_username)
self._client_conf['hpe3par_password'] = (
self.config.hpe3par_password)
self._client_conf['san_ip'] = self.config.san_ip
self._client_conf['san_login'] = self.config.san_login
self._client_conf['san_password'] = self.config.san_password
self._client_conf['san_ssh_port'] = self.config.san_ssh_port
self._client_conf['ssh_conn_timeout'] = (
self.config.ssh_conn_timeout)
self._client_conf['san_private_key'] = self.config.san_private_key
self._client_conf['hpe3par_api_url'] = self.config.hpe3par_api_url
self._client_conf['hpe3par_iscsi_ips'] = (
self.config.hpe3par_iscsi_ips)
self._client_conf['hpe3par_iscsi_chap_enabled'] = (
self.config.hpe3par_iscsi_chap_enabled)
self._client_conf['iscsi_ip_address'] = (
self.config.iscsi_ip_address)
self._client_conf['iscsi_port'] = self.config.iscsi_port
def _get_cpg_from_cpg_map(self, cpg_map, target_cpg):
ret_target_cpg = None
cpg_pairs = cpg_map.split(' ')
for cpg_pair in cpg_pairs:
cpgs = cpg_pair.split(':')
cpg = cpgs[0]
dest_cpg = cpgs[1]
if cpg == target_cpg:
ret_target_cpg = dest_cpg
return ret_target_cpg
def _generate_hpe3par_cpgs(self, cpg_map):
hpe3par_cpgs = []
cpg_pairs = cpg_map.split(' ')
for cpg_pair in cpg_pairs:
cpgs = cpg_pair.split(':')
hpe3par_cpgs.append(cpgs[1])
return hpe3par_cpgs
def _get_replication_targets(self):
replication_targets = []
for target in self._replication_targets:
replication_targets.append(target['backend_id'])
return replication_targets
def _do_volume_replication_setup(self, volume, retype=False,
dist_type_id=None):
"""This function will do or ensure the following:
-Create volume on main array (already done in create_volume)
-Create Remote Copy Group on main array
-Add volume to Remote Copy Group on main array
-Start remote copy
If anything here fails, we will need to clean everything up in
reverse order, including the original volume.
"""
rcg_name = self._get_3par_rcg_name(volume['id'])
# If the volume is already in a remote copy group, return True
# after starting remote copy. If remote copy is already started,
# issuing this command again will be fine.
if self._is_volume_in_remote_copy_group(volume):
try:
self.client.startRemoteCopy(rcg_name)
except Exception:
pass
return True
try:
# Grab the extra_spec entries for replication and make sure they
# are set correctly.
volume_type = self._get_volume_type(volume["volume_type_id"])
if retype and dist_type_id is not None:
dist_type = self._get_volume_type(dist_type_id)
extra_specs = dist_type.get("extra_specs")
else:
extra_specs = volume_type.get("extra_specs")
replication_mode = extra_specs.get(
self.EXTRA_SPEC_REP_MODE, self.DEFAULT_REP_MODE)
replication_mode_num = self._get_remote_copy_mode_num(
replication_mode)
replication_sync_period = extra_specs.get(
self.EXTRA_SPEC_REP_SYNC_PERIOD, self.DEFAULT_SYNC_PERIOD)
if replication_sync_period:
replication_sync_period = int(replication_sync_period)
if not self._is_replication_mode_correct(replication_mode,
replication_sync_period):
msg = _("The replication mode was not configured correctly "
"in the volume type extra_specs. If replication:mode "
"is periodic, replication:sync_period must also be "
"specified and be between 300 and 31622400 seconds.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
vol_settings = self.get_volume_settings_from_type(volume)
local_cpg = vol_settings['cpg']
vol_name = self._get_3par_vol_name(volume['id'])
# Create remote copy group on main array.
rcg_targets = []
sync_targets = []
for target in self._replication_targets:
# Only add targets that match the volumes replication mode.
if target['replication_mode'] == replication_mode_num:
cpg = self._get_cpg_from_cpg_map(target['cpg_map'],
local_cpg)
rcg_target = {'targetName': target['backend_id'],
'mode': replication_mode_num,
'snapCPG': cpg,
'userCPG': cpg}
rcg_targets.append(rcg_target)
sync_target = {'targetName': target['backend_id'],
'syncPeriod': replication_sync_period}
sync_targets.append(sync_target)
optional = {'localSnapCPG': vol_settings['snap_cpg'],
'localUserCPG': local_cpg}
pool = volume_utils.extract_host(volume['host'], level='pool')
domain = self.get_domain(pool)
if domain:
optional["domain"] = domain
try:
self.client.createRemoteCopyGroup(rcg_name, rcg_targets,
optional)
except Exception as ex:
msg = (_("There was an error creating the remote copy "
"group: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Add volume to remote copy group.
rcg_targets = []
for target in self._replication_targets:
# Only add targets that match the volumes replication mode.
if target['replication_mode'] == replication_mode_num:
rcg_target = {'targetName': target['backend_id'],
'secVolumeName': vol_name}
rcg_targets.append(rcg_target)
optional = {'volumeAutoCreation': True}
try:
self.client.addVolumeToRemoteCopyGroup(rcg_name, vol_name,
rcg_targets,
optional=optional)
except Exception as ex:
msg = (_("There was an error adding the volume to the remote "
"copy group: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Check and see if we are in periodic mode. If we are, update
# Remote Copy Group to have a sync period.
if replication_sync_period and (
replication_mode_num == self.PERIODIC):
opt = {'targets': sync_targets}
try:
self.client.modifyRemoteCopyGroup(rcg_name, opt)
except Exception as ex:
msg = (_("There was an error setting the sync period for "
"the remote copy group: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Start the remote copy.
try:
self.client.startRemoteCopy(rcg_name)
except Exception as ex:
msg = (_("There was an error starting remote copy: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return True
except Exception as ex:
self._do_volume_replication_destroy(volume)
msg = (_("There was an error setting up a remote copy group "
"on the 3PAR arrays: ('%s'). The volume will not be "
"recognized as replication type.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _do_volume_replication_destroy(self, volume, rcg_name=None,
retype=False):
"""This will completely remove all traces of a remote copy group.
It should be used when deleting a replication enabled volume
or if setting up a remote copy group fails. It will try and do the
following:
-Stop remote copy
-Remove volume from Remote Copy Group on main array
-Delete Remote Copy Group from main array
-Delete volume from main array
"""
if not rcg_name:
rcg_name = self._get_3par_rcg_name(volume['id'])
vol_name = self._get_3par_vol_name(volume['id'])
# Stop remote copy.
try:
self.client.stopRemoteCopy(rcg_name)
except Exception:
pass
# Delete volume from remote copy group on main array.
try:
self.client.removeVolumeFromRemoteCopyGroup(
rcg_name, vol_name, removeFromTarget=True)
except Exception:
pass
# Delete remote copy group on main array.
try:
self.client.removeRemoteCopyGroup(rcg_name)
except Exception:
pass
# Delete volume on the main array.
try:
if not retype:
self.client.deleteVolume(vol_name)
except hpeexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
# volume is part of a volume set.
self._delete_vvset(volume)
self.client.deleteVolume(vol_name)
except Exception:
pass
def _delete_replicated_failed_over_volume(self, volume):
location = volume.get('provider_location')
rcg_name = self._get_3par_remote_rcg_name(volume['id'], location)
targets = self.client.getRemoteCopyGroup(rcg_name)['targets']
# When failed over, we want to temporarily disable config mirroring
# in order to be allowed to delete the volume and remote copy group
for target in targets:
target_name = target['targetName']
self.client.toggleRemoteCopyConfigMirror(target_name,
mirror_config=False)
# Do regular volume replication destroy now config mirroring is off
try:
self._do_volume_replication_destroy(volume, rcg_name)
except Exception as ex:
msg = (_("The failed-over volume could not be deleted: %s") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
finally:
# Turn config mirroring back on
for target in targets:
target_name = target['targetName']
self.client.toggleRemoteCopyConfigMirror(target_name,
mirror_config=True)
def _delete_vvset(self, volume):
# volume is part of a volume set.
volume_name = self._get_3par_vol_name(volume['id'])
vvset_name = self.client.findVolumeSet(volume_name)
LOG.debug("Returned vvset_name = %s", vvset_name)
if vvset_name is not None:
if vvset_name.startswith('vvs-'):
# We have a single volume per volume set, so
# remove the volume set.
self.client.deleteVolumeSet(
self._get_3par_vvs_name(volume['id']))
else:
# We have a pre-defined volume set just remove the
# volume and leave the volume set.
self.client.removeVolumeFromVolumeSet(vvset_name,
volume_name)
class TaskWaiter(object):
"""TaskWaiter waits for task to be not active and returns status."""
def __init__(self, client, task_id, interval=1, initial_delay=0):
self.client = client
self.task_id = task_id
self.interval = interval
self.initial_delay = initial_delay
def _wait_for_task(self):
status = self.client.getTask(self.task_id)
LOG.debug("3PAR Task id %(id)s status = %(status)s",
{'id': self.task_id,
'status': status['status']})
if status['status'] is not self.client.TASK_ACTIVE:
raise loopingcall.LoopingCallDone(status)
def wait_for_task(self):
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_task)
return timer.start(interval=self.interval,
initial_delay=self.initial_delay).wait()
class ReplicateVolumeTask(flow_utils.CinderTask):
"""Task to replicate a volume.
This is a task for adding/removing the replication feature to volume.
It is intended for use during retype(). This task has no revert.
# TODO(sumit): revert back to original volume extra-spec
"""
def __init__(self, action, **kwargs):
super(ReplicateVolumeTask, self).__init__(addons=[action])
def execute(self, common, volume, new_type_id):
new_replicated_type = False
if new_type_id:
new_volume_type = common._get_volume_type(new_type_id)
extra_specs = new_volume_type.get('extra_specs', None)
if extra_specs and 'replication_enabled' in extra_specs:
rep_val = extra_specs['replication_enabled']
new_replicated_type = (rep_val == "<is> True")
if common._volume_of_replicated_type(volume) and new_replicated_type:
# Retype from replication enabled to replication enable.
common._do_volume_replication_destroy(volume, retype=True)
common._do_volume_replication_setup(
volume,
retype=True,
dist_type_id=new_type_id)
elif (not common._volume_of_replicated_type(volume)
and new_replicated_type):
# Retype from replication disabled to replication enable.
common._do_volume_replication_setup(
volume,
retype=True,
dist_type_id=new_type_id)
elif common._volume_of_replicated_type(volume):
# Retype from replication enabled to replication disable.
common._do_volume_replication_destroy(volume, retype=True)
class ModifyVolumeTask(flow_utils.CinderTask):
"""Task to change a volume's snapCPG and comment.
This is a task for changing the snapCPG and comment. It is intended for
use during retype(). These changes are done together with a single
modify request which should be fast and easy to revert.
Because we do not support retype with existing snapshots, we can change
the snapCPG without using a keepVV. If snapshots exist, then this will
fail, as desired.
This task does not change the userCPG or provisioningType. Those changes
may require tunevv, so they are done by the TuneVolumeTask.
The new comment will contain the new type, VVS and QOS information along
with whatever else was in the old comment dict.
The old comment and snapCPG are restored if revert is called.
"""
def __init__(self, action):
self.needs_revert = False
super(ModifyVolumeTask, self).__init__(addons=[action])
def _get_new_comment(self, old_comment, new_vvs, new_qos,
new_type_name, new_type_id):
# Modify the comment during ModifyVolume
comment_dict = dict(ast.literal_eval(old_comment))
if 'vvs' in comment_dict:
del comment_dict['vvs']
if 'qos' in comment_dict:
del comment_dict['qos']
if new_vvs:
comment_dict['vvs'] = new_vvs
elif new_qos:
comment_dict['qos'] = new_qos
else:
comment_dict['qos'] = {}
if new_type_name:
comment_dict['volume_type_name'] = new_type_name
else:
comment_dict.pop('volume_type_name', None)
if new_type_id:
comment_dict['volume_type_id'] = new_type_id
else:
comment_dict.pop('volume_type_id', None)
return comment_dict
def execute(self, common, volume_name, old_snap_cpg, new_snap_cpg,
old_comment, new_vvs, new_qos, new_type_name, new_type_id):
comment_dict = self._get_new_comment(
old_comment, new_vvs, new_qos, new_type_name, new_type_id)
if new_snap_cpg != old_snap_cpg:
# Modify the snap_cpg. This will fail with snapshots.
LOG.info("Modifying %(volume_name)s snap_cpg from "
"%(old_snap_cpg)s to %(new_snap_cpg)s.",
{'volume_name': volume_name,
'old_snap_cpg': old_snap_cpg,
'new_snap_cpg': new_snap_cpg})
common.client.modifyVolume(
volume_name,
{'snapCPG': new_snap_cpg,
'comment': json.dumps(comment_dict)})
self.needs_revert = True
else:
LOG.info("Modifying %s comments.", volume_name)
common.client.modifyVolume(
volume_name,
{'comment': json.dumps(comment_dict)})
self.needs_revert = True
def revert(self, common, volume_name, old_snap_cpg, new_snap_cpg,
old_comment, **kwargs):
if self.needs_revert:
LOG.info("Retype revert %(volume_name)s snap_cpg from "
"%(new_snap_cpg)s back to %(old_snap_cpg)s.",
{'volume_name': volume_name,
'new_snap_cpg': new_snap_cpg,
'old_snap_cpg': old_snap_cpg})
try:
common.client.modifyVolume(
volume_name,
{'snapCPG': old_snap_cpg, 'comment': old_comment})
except Exception as ex:
LOG.error("Exception during snapCPG revert: %s", ex)
class TuneVolumeTask(flow_utils.CinderTask):
"""Task to change a volume's CPG and/or provisioning type.
This is a task for changing the CPG and/or provisioning type.
It is intended for use during retype().
This task has no revert. The current design is to do this task last
and do revert-able tasks first. Un-doing a tunevv can be expensive
and should be avoided.
"""
def __init__(self, action, **kwargs):
super(TuneVolumeTask, self).__init__(addons=[action])
def execute(self, common, old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_cpg, new_cpg, volume_name, new_compression):
common.tune_vv(old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_cpg, new_cpg, volume_name, new_compression)
class ModifySpecsTask(flow_utils.CinderTask):
"""Set/unset the QOS settings and/or VV set for the volume's new type.
This is a task for changing the QOS settings and/or VV set. It is intended
for use during retype(). If changes are made during execute(), then they
need to be undone if revert() is called (i.e., if a later task fails).
For 3PAR, we ignore QOS settings if a VVS is explicitly set, otherwise we
create a VV set and use that for QOS settings. That is why they are lumped
together here. Most of the decision-making about VVS vs. QOS settings vs.
old-style scoped extra-specs is handled in existing reusable code. Here
we mainly need to know what old stuff to remove before calling the function
that knows how to set the new stuff.
Basic task flow is as follows: Remove the volume from the old externally
created VVS (when appropriate), delete the old cinder-created VVS, call
the function that knows how to set a new VVS or QOS settings.
If any changes are made during execute, then revert needs to reverse them.
"""
def __init__(self, action):
self.needs_revert = False
super(ModifySpecsTask, self).__init__(addons=[action])
def execute(self, common, volume_name, volume, old_cpg, new_cpg,
old_vvs, new_vvs, old_qos, new_qos,
old_flash_cache, new_flash_cache):
if (old_vvs != new_vvs or
old_qos != new_qos or
old_flash_cache != new_flash_cache):
# Remove VV from old VV Set.
if old_vvs is not None and old_vvs != new_vvs:
common.client.removeVolumeFromVolumeSet(old_vvs,
volume_name)
self.needs_revert = True
# If any extra or qos specs changed then remove the old
# special VV set that we create. We'll recreate it
# as needed.
vvs_name = common._get_3par_vvs_name(volume['id'])
try:
common.client.deleteVolumeSet(vvs_name)
self.needs_revert = True
except hpeexceptions.HTTPNotFound as ex:
# HTTPNotFound(code=102) is OK. Set does not exist.
if ex.get_code() != 102:
LOG.error("Unexpected error when retype() tried to "
"deleteVolumeSet(%s)", vvs_name)
raise
if new_vvs or new_qos or new_flash_cache:
common._add_volume_to_volume_set(
volume, volume_name, new_cpg, new_vvs, new_qos,
new_flash_cache)
self.needs_revert = True
def revert(self, common, volume_name, volume, old_vvs, new_vvs, old_qos,
old_cpg, **kwargs):
if self.needs_revert:
# If any extra or qos specs changed then remove the old
# special VV set that we create and recreate it per
# the old type specs.
vvs_name = common._get_3par_vvs_name(volume['id'])
try:
common.client.deleteVolumeSet(vvs_name)
except hpeexceptions.HTTPNotFound as ex:
# HTTPNotFound(code=102) is OK. Set does not exist.
if ex.get_code() != 102:
LOG.error("Unexpected error when retype() revert "
"tried to deleteVolumeSet(%s)", vvs_name)
except Exception:
LOG.error("Unexpected error when retype() revert "
"tried to deleteVolumeSet(%s)", vvs_name)
if old_vvs is not None or old_qos is not None:
try:
common._add_volume_to_volume_set(
volume, volume_name, old_cpg, old_vvs, old_qos)
except Exception as ex:
LOG.error("%(exception)s: Exception during revert of "
"retype for volume %(volume_name)s. "
"Original volume set/QOS settings may not "
"have been fully restored.",
{'exception': ex, 'volume_name': volume_name})
if new_vvs is not None and old_vvs != new_vvs:
try:
common.client.removeVolumeFromVolumeSet(
new_vvs, volume_name)
except Exception as ex:
LOG.error("%(exception)s: Exception during revert of "
"retype for volume %(volume_name)s. "
"Failed to remove from new volume set "
"%(new_vvs)s.",
{'exception': ex,
'volume_name': volume_name,
'new_vvs': new_vvs})
| 44.083436 | 79 | 0.561765 |
a600364e51dbc092c8ea42f96e544c09d406eac3 | 7,169 | py | Python | schema_test.py | krummas/cassandra-dtest | 7e3bcfd52fbc926b4c43e258a7e0efa19e1ca13d | [
"Apache-2.0"
] | 52 | 2015-02-13T15:49:03.000Z | 2020-11-15T10:59:20.000Z | schema_test.py | krummas/cassandra-dtest | 7e3bcfd52fbc926b4c43e258a7e0efa19e1ca13d | [
"Apache-2.0"
] | 1,232 | 2015-01-05T19:31:26.000Z | 2020-06-07T02:59:43.000Z | schema_test.py | krummas/cassandra-dtest | 7e3bcfd52fbc926b4c43e258a7e0efa19e1ca13d | [
"Apache-2.0"
] | 91 | 2015-02-23T23:58:44.000Z | 2020-05-24T11:05:03.000Z | import time
from cassandra.concurrent import execute_concurrent_with_args
from tools.assertions import assert_invalid, assert_all, assert_one
from dtest import Tester, create_ks
class TestSchema(Tester):
def table_alteration_test(self):
"""
Tests that table alters return as expected with many sstables at different schema points
"""
cluster = self.cluster
cluster.populate(1).start()
node1, = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
session.execute("use ks;")
session.execute("create table tbl_o_churn (id int primary key, c0 text, c1 text) "
"WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'min_threshold': 1024, 'max_threshold': 1024 };")
stmt1 = session.prepare("insert into tbl_o_churn (id, c0, c1) values (?, ?, ?)")
rows_to_insert = 50
for n in range(5):
parameters = [(x, 'aaa', 'bbb') for x in range(n * rows_to_insert, (n * rows_to_insert) + rows_to_insert)]
execute_concurrent_with_args(session, stmt1, parameters, concurrency=rows_to_insert)
node1.flush()
session.execute("alter table tbl_o_churn add c2 text")
session.execute("alter table tbl_o_churn drop c0")
stmt2 = session.prepare("insert into tbl_o_churn (id, c1, c2) values (?, ?, ?);")
for n in range(5, 10):
parameters = [(x, 'ccc', 'ddd') for x in range(n * rows_to_insert, (n * rows_to_insert) + rows_to_insert)]
execute_concurrent_with_args(session, stmt2, parameters, concurrency=rows_to_insert)
node1.flush()
rows = session.execute("select * from tbl_o_churn")
for row in rows:
if row.id < rows_to_insert * 5:
self.assertEqual(row.c1, 'bbb')
self.assertIsNone(row.c2)
self.assertFalse(hasattr(row, 'c0'))
else:
self.assertEqual(row.c1, 'ccc')
self.assertEqual(row.c2, 'ddd')
self.assertFalse(hasattr(row, 'c0'))
def drop_column_compact_test(self):
session = self.prepare()
session.execute("USE ks")
session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int) WITH COMPACT STORAGE")
assert_invalid(session, "ALTER TABLE cf DROP c1", "Cannot drop columns from a")
def drop_column_compaction_test(self):
session = self.prepare()
session.execute("USE ks")
session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
# insert some data.
session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")
# drop and readd c1.
session.execute("ALTER TABLE cf DROP c1")
session.execute("ALTER TABLE cf ADD c1 int")
# add another row.
session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")
node = self.cluster.nodelist()[0]
node.flush()
node.compact()
# test that c1 values have been compacted away.
session = self.patient_cql_connection(node)
assert_all(session, "SELECT c1 FROM ks.cf", [[None], [None], [None], [4]], ignore_order=True)
def drop_column_queries_test(self):
session = self.prepare()
session.execute("USE ks")
session.execute("CREATE TABLE cf (key int PRIMARY KEY, c1 int, c2 int)")
session.execute("CREATE INDEX ON cf(c2)")
# insert some data.
session.execute("INSERT INTO cf (key, c1, c2) VALUES (0, 1, 2)")
session.execute("INSERT INTO cf (key, c1, c2) VALUES (1, 2, 3)")
session.execute("INSERT INTO cf (key, c1, c2) VALUES (2, 3, 4)")
# drop and readd c1.
session.execute("ALTER TABLE cf DROP c1")
session.execute("ALTER TABLE cf ADD c1 int")
# add another row.
session.execute("INSERT INTO cf (key, c1, c2) VALUES (3, 4, 5)")
# test that old (pre-drop) c1 values aren't returned and new ones are.
assert_all(session, "SELECT c1 FROM cf", [[None], [None], [None], [4]], ignore_order=True)
assert_all(session, "SELECT * FROM cf", [[0, None, 2], [1, None, 3], [2, None, 4], [3, 4, 5]], ignore_order=True)
assert_one(session, "SELECT c1 FROM cf WHERE key = 0", [None])
assert_one(session, "SELECT c1 FROM cf WHERE key = 3", [4])
assert_one(session, "SELECT * FROM cf WHERE c2 = 2", [0, None, 2])
assert_one(session, "SELECT * FROM cf WHERE c2 = 5", [3, 4, 5])
def drop_column_and_restart_test(self):
"""
Simply insert data in a table, drop a column involved in the insert and restart the node afterwards.
This ensures that the dropped_columns system table is properly flushed on the alter or the restart
fails as in CASSANDRA-11050.
@jira_ticket CASSANDRA-11050
"""
session = self.prepare()
session.execute("USE ks")
session.execute("CREATE TABLE t (k int PRIMARY KEY, c1 int, c2 int)")
session.execute("INSERT INTO t (k, c1, c2) VALUES (0, 0, 0)")
session.execute("ALTER TABLE t DROP c2")
assert_one(session, "SELECT * FROM t", [0, 0])
self.cluster.stop()
self.cluster.start()
session = self.patient_cql_connection(self.cluster.nodelist()[0])
session.execute("USE ks")
assert_one(session, "SELECT * FROM t", [0, 0])
def drop_static_column_and_restart_test(self):
"""
Dropping a static column caused an sstable corrupt exception after restarting, here
we test that we can drop a static column and restart safely.
@jira_ticket CASSANDRA-12582
"""
session = self.prepare()
session.execute("USE ks")
session.execute("CREATE TABLE ts (id1 int, id2 int, id3 int static, val text, PRIMARY KEY (id1, id2))")
session.execute("INSERT INTO ts (id1, id2, id3, val) VALUES (1, 1, 0, 'v1')")
session.execute("INSERT INTO ts (id1, id2, id3, val) VALUES (1, 2, 0, 'v2')")
session.execute("INSERT INTO ts (id1, id2, id3, val) VALUES (2, 1, 1, 'v3')")
self.cluster.nodelist()[0].nodetool('flush ks ts')
assert_all(session, "SELECT * FROM ts", [[1, 1, 0, 'v1'], [1, 2, 0, 'v2'], [2, 1, 1, 'v3']])
session.execute("alter table ts drop id3")
assert_all(session, "SELECT * FROM ts", [[1, 1, 'v1'], [1, 2, 'v2'], [2, 1, 'v3']])
self.cluster.stop()
self.cluster.start()
session = self.patient_cql_connection(self.cluster.nodelist()[0])
session.execute("USE ks")
assert_all(session, "SELECT * FROM ts", [[1, 1, 'v1'], [1, 2, 'v2'], [2, 1, 'v3']])
def prepare(self):
cluster = self.cluster
cluster.populate(1).start()
time.sleep(.5)
nodes = cluster.nodelist()
session = self.patient_cql_connection(nodes[0])
create_ks(session, 'ks', 1)
return session
| 39.607735 | 134 | 0.605524 |
809f7c57f3fe0f19c2e0f5ba9bf0d0c73c7e22b4 | 1,694 | py | Python | setup.py | Raihana07/django-simple-history | 9b957a55313cc628d9f2b3fa1484598ef09208e4 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Raihana07/django-simple-history | 9b957a55313cc628d9f2b3fa1484598ef09208e4 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Raihana07/django-simple-history | 9b957a55313cc628d9f2b3fa1484598ef09208e4 | [
"BSD-3-Clause"
] | 2 | 2021-10-01T06:54:23.000Z | 2021-10-05T06:31:23.000Z | from setuptools import setup
import simple_history
tests_require = [
'Django>=1.11', 'WebTest==2.0.24', 'django-webtest==1.8.0', 'mock==1.0.1',
'six']
setup(
name='django-simple-history',
version=simple_history.__version__,
description='Store model history and view/revert changes from admin site.',
long_description='\n'.join((
open('README.rst').read(),
open('CHANGES.rst').read(),
)),
author='Corey Bertram',
author_email='corey@qr7.com',
maintainer='Trey Hunner',
url='https://github.com/treyhunner/django-simple-history',
packages=[
'simple_history', 'simple_history.management',
'simple_history.management.commands', 'simple_history.templatetags'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Django",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0",
"Framework :: Django :: 2.1",
"Framework :: Django :: 2.2",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"License :: OSI Approved :: BSD License",
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
tests_require=tests_require,
install_requires=["six"],
include_package_data=True,
test_suite='runtests.main',
)
| 35.291667 | 79 | 0.600354 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.