hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b194d8704816a0157d770f0945bf947a71c4526d
| 6,737
|
py
|
Python
|
wanderbits/things.py
|
Who8MyLunch/WanderBits
|
058685971f5ab2083c9fdd7bd2eba960c2ae5992
|
[
"MIT"
] | null | null | null |
wanderbits/things.py
|
Who8MyLunch/WanderBits
|
058685971f5ab2083c9fdd7bd2eba960c2ae5992
|
[
"MIT"
] | 1
|
2018-01-13T20:53:38.000Z
|
2018-01-13T20:53:38.000Z
|
wanderbits/things.py
|
Who8MyLunch/WanderBits
|
058685971f5ab2083c9fdd7bd2eba960c2ae5992
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import division, print_function, unicode_literals
"""
Things class for WanderBits, a text-based adventure game.
"""
import abc
import errors
# Helpers
def find_thing(many_things, name):
"""
Find a matching Thing by name.
"""
if not isinstance(name, basestring):
msg = 'name must be a string: {:s}'.format(str(name))
raise errors.ThingError(msg)
for t in many_things:
if t.name.lower() == name.lower():
return t
msg = 'Unable to find matching Thing: {:s}'.format(name)
raise errors.FindThingError(msg)
#################################################
class Thing(object):
"""
Things class for WanderBits, a text-based adventure game.
This class is a base class. Inherit from this class to implement
a particular game item.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, **kwargs):
"""
Initialize Thing class.
Each kind of game item needs to be implemented as a subclass of
the Thing base class.
"""
base_property_keys = ['name', 'description']
self._properties = {}
self.update_properties(base_property_keys, kwargs)
# Things are able to contain other Things.
self._container = []
# Which Thing contains the current Thing.
self._parent = None
def __repr__(self):
return 'Thing [{:s}]'.format(self.name)
def update_properties(self, property_keys, mapping):
"""Update this Thing's inherent property values.
"""
for k in property_keys:
try:
self._properties[k] = mapping[k]
except KeyError:
print(k)
raise
@property
def name(self):
"""This Thing's characteristic name.
"""
return self._properties['name']
@property
def kind(self):
"""This Thing's characteristic kind of thing.
"""
return self._properties['kind']
@property
def description(self):
"""This Thing's description.
"""
return self._properties['description']
@property
def size(self):
"""This Thing's physical size.
"""
try:
return self._properties['size']
except KeyError:
return 0
# msg = 'Thing hasn't a size: {:s}'.format(self.name)
# raise errors.ThingError(msg)
@property
def capacity(self):
"""This Thing's physical size.
"""
try:
return self._properties['capacity']
except KeyError:
return 0
# msg = 'Thing hasn't a capacity: {:s}'.format(self.name)
# raise errors.ThingError(msg)
@property
def parent(self):
"""Another Thing that contains self.
"""
return self._parent
@parent.setter
def parent(self, value):
if isinstance(value, Thing) or value is None:
# TODO: I don't like having None here as a valid input.
self._parent = value
else:
msg = 'Parent must be a Thing: {:s}'.format(str(value))
raise errors.ThingError(msg)
def add(self, obj):
"""Place new object inside oneself.
"""
if not isinstance(obj, Thing):
msg = 'Object must be a Thing: {:s}'.format(str(obj))
raise errors.ThingError(msg)
if obj in self._container:
msg = '{:s} already contains {:s}'.format(self, obj)
raise errors.ThingError(msg)
if self.available_space < obj.size:
msg = 'Not enough room in {:s} to contain {:s}'.format(self, obj)
raise errors.ThingError(msg)
# Add to container, update it's parent.
self._container.append(obj)
obj.parent = self
def remove(self, obj):
"""Remove object from oneself.
"""
try:
# Remove from container, remove self as parent.
self._container.remove(obj)
obj.parent = None
except ValueError:
msg = '{:s} does not contains {:s}'.format(self, obj)
raise errors.ThingError(msg)
@property
def container(self):
"""A list of Things contained by this Thing.
"""
return self._container
@property
def available_space(self):
"""Amount of space inside this Thing available for storing more Things.
"""
contained_size = 0
for T in self._container:
contained_size += T.size
return self.capacity - contained_size
#################################################
#################################################
# nice discussion that clarifies inheriting from an abstract class and
# using also using super():
# http://pymotw.com/2/abc/#concrete-methods-in-abcs
class Room(Thing):
"""Room object.
"""
property_keys = ['connections', 'size', 'capacity']
def __init__(self, **kwargs):
super(Room, self).__init__(**kwargs)
self.update_properties(self.property_keys, kwargs)
self.update_properties(['kind'], {'kind': 'room'})
@property
def connections(self):
"""
Mapping to other rooms.
"""
return self._properties['connections']
#################################################
class Item(Thing):
"""Item object.
"""
property_keys = ['size', 'capacity']
def __init__(self, **kwargs):
super(Item, self).__init__(**kwargs)
self.update_properties(self.property_keys, kwargs)
self.update_properties(['kind'], {'kind': 'item'})
#################################################
class User(Thing):
"""User object.
"""
property_keys = ['size', 'capacity']
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
self.update_properties(self.property_keys, kwargs)
self.update_properties(['kind'], {'kind': 'user'})
@property
def local_things(self):
"""
Return list of Things that are nearby.
These are Things that may be either physically manipulated or observed.
This includes the current room, Things in the room, Things held
by the user. Does not include Things inside Things held
by the user.
"""
# User should be contained by a room.
room = self.parent
# List of things.
things = [room] + room.container + self.container
# Remove self from list.
# things.remove(self)
return things
#################################################
if __name__ == '__main__':
pass
| 26.840637
| 79
| 0.556776
|
bd7208a83cbbf70db8fc27535d30e6b93a9f4345
| 642
|
py
|
Python
|
session2/parantheses1.py
|
MrCsabaToth/IK
|
713f91c28af7b4a964ba854ede9fec73bf0c4682
|
[
"Apache-2.0"
] | null | null | null |
session2/parantheses1.py
|
MrCsabaToth/IK
|
713f91c28af7b4a964ba854ede9fec73bf0c4682
|
[
"Apache-2.0"
] | null | null | null |
session2/parantheses1.py
|
MrCsabaToth/IK
|
713f91c28af7b4a964ba854ede9fec73bf0c4682
|
[
"Apache-2.0"
] | null | null | null |
def parantheses_helper(opener, closer, n, slate):
# Backtracking
# if closer < opener:
# return []
# Leaf node
if opener == 0 and closer == 0:
return ["".join(slate)]
result = []
# Internal node
# Opener
if opener > 0:
slate.append("(")
result.extend(parantheses_helper(opener - 1, closer, n, slate))
slate.pop()
# Closer
if closer > opener and opener < n:
slate.append(")")
result.extend(parantheses_helper(opener, closer - 1, n, slate))
slate.pop()
return result
def parantheses(n):
return parantheses_helper(n, n, n, [])
| 22.137931
| 71
| 0.570093
|
3a35c678e5d00a185666fd77a2ed3ad7bedf72ef
| 3,465
|
py
|
Python
|
magic_the_decorating/importer.py
|
jayvdb/magic_the_decorating
|
c08fb0c21e03376ddd28c9ce63b24f1c1122ceca
|
[
"Apache-2.0"
] | 2
|
2015-11-02T22:06:26.000Z
|
2019-10-28T12:40:37.000Z
|
magic_the_decorating/importer.py
|
jayvdb/magic_the_decorating
|
c08fb0c21e03376ddd28c9ce63b24f1c1122ceca
|
[
"Apache-2.0"
] | 1
|
2019-10-28T12:33:21.000Z
|
2019-10-28T12:33:21.000Z
|
magic_the_decorating/importer.py
|
jayvdb/magic_the_decorating
|
c08fb0c21e03376ddd28c9ce63b24f1c1122ceca
|
[
"Apache-2.0"
] | 3
|
2015-11-02T22:11:08.000Z
|
2019-10-31T01:29:25.000Z
|
import sys
from config import load as load_config
from loaders import CallableLoader, ModuleLoader
class Finder(object):
"""
Custom importer that should follow 302.
"""
def __init__(self, config_filename):
"""
Inits the finder. Accepts a file name containing yaml dictionary
describing how to decorate imports.
"""
self.config_filename = config_filename
self.config = load_config(self.config_filename)
callable_loader = CallableLoader()
self.callables = {}
for module_name, module_config in self.config.items():
self.callables[module_name] = \
callable_loader.load(module_config['callable'])
def find_module(self, fullname, path=None):
"""
Method to find a module. Check if module is one we are intrested in.
Return None if not interested. A finder if interested
"""
if fullname in self.config:
return Loader(path,
self.callables[fullname],
self.config[fullname].get('config'))
return None
class Loader(object):
"""
Custom loader that should follow 302.
"""
decorated_key = '__magic_the_decorated__'
def __init__(self, path, callable_, callable_config):
"""
@param path - Path passed to find module
@param callable_ - Callable to apply to module
@param callable_config - Dictionary to configure callable
"""
self._path = path
self._callable = callable_
self._callable_config = callable_config
def is_decorated(self, module):
"""
Return whether or not the object had an attribute set indicating
that it has already been decorated.
@param module - Module to check
@return Boolean
"""
return hasattr(module, self.decorated_key)
def set_decorated(self, module):
"""
Sets the decorated attribute.
@param module - Module to flag as decorated
"""
setattr(module, self.decorated_key, True)
def load_module(self, fullname):
"""
If an existing object named fullname is in sys.modules,
the loader must use that module before running code.
If sys.modules does not contain fullname, then a new module
object must be added to sys.modules before running any code.
If the load fails, the loader needs to remove any module that may
have been inserted into sys.modules. If the module was already
in sys.modules, the loader needs to leave it alone.
A loaded module must have the __file__ attribute set
A loaded module must have the __name__ attribute set
A loaded module must have the __loader__ attribute set
- should be this module
The package attribute should be set
"""
if fullname in sys.modules:
module = sys.modules[fullname]
existing = True
else:
module = ModuleLoader().load(fullname, self._path)
existing = False
if not self.is_decorated(module):
try:
module = self._callable(module, self._callable_config)
self.set_decorated(module)
except Exception:
if fullname in sys.modules and not existing:
del sys.modules[fullname]
return module
| 30.9375
| 76
| 0.619336
|
9022b3aa13307edadc082384b6d193f727342757
| 13,370
|
py
|
Python
|
GRU-CFA/Codes/mainClef.py
|
cs-chan/Deep-Plant
|
079fdc538585efa5eab9b5bfef48654a89748b3f
|
[
"BSD-3-Clause"
] | 81
|
2017-06-24T14:07:18.000Z
|
2022-02-04T14:31:22.000Z
|
GRU-CFA/Codes/mainClef.py
|
oldfemalepig/Deep-Plant
|
42967fa6bc0a30a65caeccc67af44b32492ef449
|
[
"BSD-3-Clause"
] | 2
|
2020-03-24T01:31:47.000Z
|
2020-03-29T03:26:11.000Z
|
GRU-CFA/Codes/mainClef.py
|
oldfemalepig/Deep-Plant
|
42967fa6bc0a30a65caeccc67af44b32492ef449
|
[
"BSD-3-Clause"
] | 35
|
2017-06-04T07:30:54.000Z
|
2021-09-23T00:04:12.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 30 12:26:45 2017
@author: root
"""
import tensorflow as tf
import numpy as np
import os
import struct
import scipy.io as sio
from array import array as pyarray
from numpy import array, int8, uint8, zeros
import collections
import pickle
import functools
import sets
from tensorflow.python.ops import rnn, array_ops
from tensorflow.contrib.rnn import GRUCell, DropoutWrapper, MultiRNNCell
from attn_7_1_ex import VariableSequenceClassification
from temp_createStruct5 import ConstructLookupTable
from time import gmtime, strftime
from logging_util import makelog
logfile=makelog()
class DataSet(object):
def __init__(self, layername, numMap):
"""Construct a DataSet."""
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/train_obs_list.mat')
self._trainList = mat_contents['train_obs_list']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/train_obs_class.mat')
self._trainLabels = mat_contents['train_obs_class']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_list.mat')
self._testList = mat_contents['test_obs_list']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_class.mat')
self._testLabels = mat_contents['test_obs_class']
self.layerextract = layername
self.numMap = numMap
self._num_examples = self._trainLabels.shape[0]
self._perm_list = np.arange(self._num_examples)
np.random.shuffle(self._perm_list)
self._trainLabelsPerm = self._trainLabels[self._perm_list]
self._num_testexamples = self._testLabels.shape[0]
self._perm_list_test = np.arange(self._num_testexamples)
self._batch_seq = 0
self._epochs_completed = 0
self._index_in_epoch = 0
self._index_in_epoch_test = 0
self._max_seq = 0
self.Batch_Up_model = ConstructLookupTable()
self.mydict2_test256 = self.Batch_Up_model.main(self._testList,2) # for train_testID ! = 1
self.feature_size_conv = self.numMap*14*14
self.feature_size_fc = 4096
def trainList(self):
return self._trainList
def trainLabels(self):
return self._trainLabels
def trainLabelsPerm(self):
return self._trainLabelsPerm
def testList(self):
return self._testList
def testLabels(self):
return self._testLabels
def num_examples(self):
return self._num_examples
def num_testexamples(self):
return self._num_testexamples
def epochs_completed(self):
return self._epochs_completed
def index_in_epoch(self):
return self._index_in_epoch
def max_seq(self):
return self._max_seq
def batch_seq(self):
return self._batch_seq
def PrepareTrainingBatch(self,Newpermbatch, batch_size, indicator):
if indicator == 1:
mydictG = self.Batch_Up_model.main(self._trainList,1) # for train_testID == 1
else:
mydictG = self.mydict2_test256
i = 0
temp = np.zeros(batch_size)
while i < batch_size:
temp[i] = len(mydictG[Newpermbatch[i]][1])
i = i + 1
self._max_seq = int(np.amax(temp))
self._batch_seq = temp
batch_conv = np.zeros([batch_size,self._max_seq,self.feature_size_conv])
batch_fc = np.zeros([batch_size,self._max_seq,self.feature_size_fc])
i = 0
while i < batch_size:
media_length = len(mydictG[Newpermbatch[i]][1])
j = 0
while j < media_length:
### for 256 image size for testing
pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
output = pickle.load(pkl_file1)
pkl_file1.close()
pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
output2 = pickle.load(pkl_file2)
pkl_file2.close()
pkl_file3 = open(mydictG[Newpermbatch[i]][1][j][2], 'rb')
output3 = pickle.load(pkl_file3)
pkl_file3.close()
output.update(output2)
output.update(output3)
mat_contents = output[self.layerextract[0]]
batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
mat_contents = output[self.layerextract[1]]
batch_fc[i][j][:] = mat_contents #'convfc7'
j = j + 1
## for 384,512 image size for testing
# if indicator == 1: # training ###################
# pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
# output = pickle.load(pkl_file1)
# pkl_file1.close()
#
# pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
# output2 = pickle.load(pkl_file2)
# pkl_file2.close()
#
# pkl_file3 = open(mydictG[Newpermbatch[i]][1][j][2], 'rb')
# output3 = pickle.load(pkl_file3)
# pkl_file3.close()
#
# output.update(output2)
# output.update(output3)
# mat_contents = output[self.layerextract[0]]
# batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
#
# mat_contents = output[self.layerextract[1]]
# batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3_O'
#
# j = j + 1
#
# else: # testing
#
# pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
# output = pickle.load(pkl_file1)
# pkl_file1.close()
#
# pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
# output2 = pickle.load(pkl_file2)
# pkl_file2.close()
#
# output.update(output2)
# mat_contents = output[self.layerextract[0]]
# batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
#
# mat_contents = output[self.layerextract[1]]
# batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3_O'
#
# j = j + 1
#########################################################
# random shuffle organ sequeces
if indicator == 1:
J = np.arange(media_length)
np.random.shuffle(J)
temp_arr = batch_conv[i,:media_length,:]
temp_arr = temp_arr[J,:]
batch_conv[i,:media_length,:] = temp_arr
temp_arr = batch_fc[i,:media_length,:]
temp_arr = temp_arr[J,:]
batch_fc[i,:media_length,:] = temp_arr
i = i + 1
return batch_fc, batch_conv
def dense_to_one_hot(self,labels_dense, num_classes=1000):
labels_dense = labels_dense.astype(int)
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
labels_one_hot = labels_one_hot.astype(np.float32)
temp = zeros((labels_one_hot.shape[0],self._max_seq,num_classes))
i=0
while i < labels_one_hot.shape[0]:
temp[i][0:int(self._batch_seq[i])] = labels_one_hot[i]
i=i+1
return temp
def next_batch(self,batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
self._perm_list = np.arange(self._num_examples)
np.random.shuffle(self._perm_list)
#self._trainList = self._trainList[perm]
self._trainLabelsPerm = self._trainLabels[self._perm_list]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self.PrepareTrainingBatch(self._perm_list[start:end], batch_size, 1), self.dense_to_one_hot(self._trainLabelsPerm[start:end])
def PrepareTestingBatch(self,test_total):
start = self._index_in_epoch_test
self._index_in_epoch_test += test_total
if self._index_in_epoch_test > self._num_testexamples:
start = 0
self._index_in_epoch_test = test_total
assert test_total <= self._num_testexamples
end = self._index_in_epoch_test
return self.PrepareTrainingBatch(self._perm_list_test[start:end], test_total, 0), self.dense_to_one_hot(self._testLabels[start:end])
####### Network Parameters ########
training_iters = 10000000
batch_size = 15
display_step = 280
test_num_total = 15
layername_conv = 'conv5_3'
layername_fc = 'fc7_final'
layername = [layername_conv, layername_fc]
numMap = 512#20
num_classes = 1000
dropTrain = 0.5
dropTest = 1
plantclefdata = DataSet(layername,numMap)
# tf Graph input
x = tf.placeholder("float", [None, None, 4096])
data = tf.placeholder("float", [None, None, numMap*14*14])
target = tf.placeholder("float", [None, None, num_classes])
dropout = tf.placeholder(tf.float32)
batch_size2 = tf.placeholder(tf.int32)
#saved Model directory
save_dir = '/media/titanz/Data3TB/tensorboard_log/model_20180418/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model = VariableSequenceClassification(x, data, target, dropout, batch_size2)
#combine all summaries for tensorboard
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep = None)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Resume training
#saver.restore(sess, "/media/titanz/Data3TB/tensorboard_log/model_20180418/model_13160")
# declare tensorboard folder
log_path = '/media/titanz/Data3TB/tensorboard_log/20180418'
train_writer = tf.summary.FileWriter(log_path + '/train',
sess.graph)
test_writer = tf.summary.FileWriter(log_path + '/test')
step = 1
while step * batch_size < training_iters: # step = 280 is equal to one epoch
(batch_x_fc, batch_x_conv), batch_y = plantclefdata.next_batch(batch_size)
loss = sess.run(model.cost, feed_dict={x: batch_x_fc, data: batch_x_conv, batch_size2: batch_size, target: batch_y, dropout: dropTrain})
train_acc = sess.run(model.error, feed_dict={x: batch_x_fc, data: batch_x_conv, batch_size2: batch_size, target: batch_y, dropout: dropTrain})
_,summary = sess.run([model.optimize, summary_op], feed_dict={x: batch_x_fc, data: batch_x_conv, batch_size2: batch_size, target: batch_y, dropout: dropTrain})
# write log
train_writer.add_summary(summary, step * batch_size)
if step % display_step == 0:
strftime("%Y-%m-%d %H:%M:%S", gmtime())
logfile.logging("Epoch" + str(step) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy = " + \
"{:.5f}".format(train_acc) + ", lengthData= " + "{:.1f}".format(plantclefdata.max_seq()))
if step % display_step == 0:
saveid = 'model_%s' %step
save_path = save_dir + saveid
saver.save(sess, save_path)
(test_data_x, test_data_conv), test_label = plantclefdata.PrepareTestingBatch(test_num_total) # step/epoch = 694.35 = All testing data tested
test_loss = sess.run(model.cost, feed_dict={x: test_data_x, data: test_data_conv, batch_size2: test_num_total, target: test_label, dropout: dropTest})
test_acc,summary = sess.run([model.error, summary_op], feed_dict={x: test_data_x, data: test_data_conv, batch_size2: test_num_total, target: test_label, dropout: dropTest})
logfile.logging('testing accuracy {:3.5f}%'.format(test_acc) + ", testbatch Loss= " + \
"{:.6f}".format(test_loss))
test_writer.add_summary(summary, step * batch_size)
step += 1
print("Optimization Finished!")
| 36.630137
| 183
| 0.583096
|
9ae84ae8657385ba458049caa3710f4fe4857cd9
| 5,173
|
py
|
Python
|
research/deeplab/core/dense_prediction_cell_test.py
|
slomrafgrav/models
|
daa6c0415e47bdc52ad6434dc2bdb5d8aeb4f7ce
|
[
"Apache-2.0"
] | 79
|
2019-03-02T17:40:25.000Z
|
2021-08-17T13:22:03.000Z
|
research/deeplab/core/dense_prediction_cell_test.py
|
bhushan23/models
|
e498d28503fd4a12d1fa9ade41891f2f9601c674
|
[
"Apache-2.0"
] | 7
|
2019-01-07T16:49:27.000Z
|
2020-04-28T16:48:52.000Z
|
research/deeplab/core/dense_prediction_cell_test.py
|
bhushan23/models
|
e498d28503fd4a12d1fa9ade41891f2f9601c674
|
[
"Apache-2.0"
] | 26
|
2019-04-17T19:44:47.000Z
|
2021-08-07T00:52:32.000Z
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_prediction_cell."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from deeplab.core import dense_prediction_cell
class DensePredictionCellTest(tf.test.TestCase):
def setUp(self):
self.segmentation_layer = dense_prediction_cell.DensePredictionCell(
config=[
{
dense_prediction_cell._INPUT: -1,
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: 1,
},
{
dense_prediction_cell._INPUT: 0,
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: 3,
dense_prediction_cell._RATE: [1, 3],
},
{
dense_prediction_cell._INPUT: 1,
dense_prediction_cell._OP: (
dense_prediction_cell._PYRAMID_POOLING),
dense_prediction_cell._GRID_SIZE: [1, 2],
},
],
hparams={'conv_rate_multiplier': 2})
def testPyramidPoolingArguments(self):
features_size, pooled_kernel = (
self.segmentation_layer._get_pyramid_pooling_arguments(
crop_size=[513, 513],
output_stride=16,
image_grid=[4, 4]))
self.assertListEqual(features_size, [33, 33])
self.assertListEqual(pooled_kernel, [9, 9])
def testPyramidPoolingArgumentsWithImageGrid1x1(self):
features_size, pooled_kernel = (
self.segmentation_layer._get_pyramid_pooling_arguments(
crop_size=[257, 257],
output_stride=16,
image_grid=[1, 1]))
self.assertListEqual(features_size, [17, 17])
self.assertListEqual(pooled_kernel, [17, 17])
def testParseOperationStringWithConv1x1(self):
operation = self.segmentation_layer._parse_operation(
config={
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: [1, 1],
},
crop_size=[513, 513], output_stride=16)
self.assertEqual(operation[dense_prediction_cell._OP],
dense_prediction_cell._CONV)
self.assertListEqual(operation[dense_prediction_cell._KERNEL], [1, 1])
def testParseOperationStringWithConv3x3(self):
operation = self.segmentation_layer._parse_operation(
config={
dense_prediction_cell._OP: dense_prediction_cell._CONV,
dense_prediction_cell._KERNEL: [3, 3],
dense_prediction_cell._RATE: [9, 6],
},
crop_size=[513, 513], output_stride=16)
self.assertEqual(operation[dense_prediction_cell._OP],
dense_prediction_cell._CONV)
self.assertListEqual(operation[dense_prediction_cell._KERNEL], [3, 3])
self.assertEqual(operation[dense_prediction_cell._RATE], [9, 6])
def testParseOperationStringWithPyramidPooling2x2(self):
operation = self.segmentation_layer._parse_operation(
config={
dense_prediction_cell._OP: dense_prediction_cell._PYRAMID_POOLING,
dense_prediction_cell._GRID_SIZE: [2, 2],
},
crop_size=[513, 513],
output_stride=16)
self.assertEqual(operation[dense_prediction_cell._OP],
dense_prediction_cell._PYRAMID_POOLING)
# The feature maps of size [33, 33] should be covered by 2x2 kernels with
# size [17, 17].
self.assertListEqual(
operation[dense_prediction_cell._TARGET_SIZE], [33, 33])
self.assertListEqual(operation[dense_prediction_cell._KERNEL], [17, 17])
def testBuildCell(self):
with self.test_session(graph=tf.Graph()) as sess:
features = tf.random_normal([2, 33, 33, 5])
concat_logits = self.segmentation_layer.build_cell(
features,
output_stride=8,
crop_size=[257, 257])
sess.run(tf.global_variables_initializer())
concat_logits = sess.run(concat_logits)
self.assertTrue(concat_logits.any())
def testBuildCellWithImagePoolingCropSize(self):
with self.test_session(graph=tf.Graph()) as sess:
features = tf.random_normal([2, 33, 33, 5])
concat_logits = self.segmentation_layer.build_cell(
features,
output_stride=8,
crop_size=[257, 257],
image_pooling_crop_size=[129, 129])
sess.run(tf.global_variables_initializer())
concat_logits = sess.run(concat_logits)
self.assertTrue(concat_logits.any())
if __name__ == '__main__':
tf.test.main()
| 38.318519
| 80
| 0.67833
|
c3ac9a07281c8c2bcd1925ee506e143c80128436
| 12,687
|
py
|
Python
|
nenupytv/image/visibilities_old.py
|
AlanLoh/nenupy-tv
|
9c33652521293eaba726f02fdb2331ae32dda6f6
|
[
"MIT"
] | null | null | null |
nenupytv/image/visibilities_old.py
|
AlanLoh/nenupy-tv
|
9c33652521293eaba726f02fdb2331ae32dda6f6
|
[
"MIT"
] | 14
|
2019-11-12T09:48:00.000Z
|
2020-02-28T17:02:54.000Z
|
nenupytv/image/visibilities_old.py
|
AlanLoh/nenupy-tv
|
9c33652521293eaba726f02fdb2331ae32dda6f6
|
[
"MIT"
] | 1
|
2020-09-09T17:40:58.000Z
|
2020-09-09T17:40:58.000Z
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
"""
__author__ = 'Alan Loh, Julien Girard'
__copyright__ = 'Copyright 2020, nenupytv'
__credits__ = ['Alan Loh', 'Julien Girard']
__maintainer__ = 'Alan'
__email__ = 'alan.loh@obspm.fr'
__status__ = 'Production'
__all__ = [
'Visibilities'
]
import numpy as np
from astropy.time import Time
from nenupytv.read import Crosslets
from nenupytv.uvw import SphUVW
from nenupytv.astro import eq_zenith, to_lmn, rephase, nenufar_loc
from nenupytv.calibration import Skymodel
from nenupytv.image import Grid_Simple, Dirty
# ============================================================= #
# ----------------------- Visibilities ------------------------ #
# ============================================================= #
class Visibilities(object):
"""
"""
def __init__(self, crosslets):
self.flag = None
self.time = None
self.freq = None
self.cal_vis = None
self.vis = None
self.uvw = None
self.grid = None
self.cross = crosslets
# --------------------------------------------------------- #
# --------------------- Getter/Setter --------------------- #
@property
def cross(self):
return self._cross
@cross.setter
def cross(self, c):
if isinstance(c, str):
c = Crosslets(c)
if not isinstance(c, Crosslets):
raise TypeError(
'Crosslets object expected'
)
self._cross = c
self._get_vis()
self._compute_uvw()
return
@property
def time(self):
return self._time
@time.setter
def time(self, t):
if t is None:
pass
elif not isinstance(t, Time):
raise TypeError(
'Time object expected'
)
else:
if t.shape[0] != self.vis.shape[0]:
raise ValueError(
'Time shape mismatch'
)
self._time = t
return
@property
def freq(self):
return self._freq
@freq.setter
def freq(self, f):
if f is None:
pass
elif not isinstance(f, np.ndarray):
raise TypeError(
'np.ndarray object expected'
)
else:
if f.shape[0] != self.vis.shape[1]:
raise ValueError(
'freq shape mismatch'
)
self._freq = f
return
@property
def vis(self):
if self.flag is None:
self.flag = np.zeros(
self._vis.shape[:-1],
dtype=bool
)
return np.ma.masked_array(
self._vis,
mask=np.tile(np.expand_dims(self.flag, axis=4), 4)
)
@vis.setter
def vis(self, v):
if v is None:
pass
elif not isinstance(v, np.ndarray):
raise TypeError(
'np.ndarray expected'
)
self._vis = v
return
@property
def uvw(self):
if self.flag is None:
self.flag = np.zeros(
self._uvw.shape[:-1],
dtype=bool
)
return np.ma.masked_array(
self._uvw,
mask=np.tile(np.expand_dims(self.flag, axis=4), 3)
)
@uvw.setter
def uvw(self, u):
if u is None:
pass
elif not isinstance(u, np.ndarray):
raise TypeError(
'np.ndarray expected'
)
else:
if not self.vis.shape[:-1] == u.shape[:-1]:
raise ValueError(
'vis and uvw have shape discrepancies'
)
self._uvw = u
return
@property
def phase_center(self):
""" Phase center (time, (RA, Dec)) in degrees
"""
return np.array(list(map(eq_zenith, self.time)))
@property
def time_mean(self):
"""
"""
dt = self.time[-1] - self.time[0]
return self.time[0] + dt/2
@property
def freq_mean(self):
"""
"""
return np.mean(self.freq)
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
def uvcut(self, uvmin=None, uvmax=None):
"""
"""
if uvmin is None:
uvmin = self.uvdist.min()
if uvmax is None:
uvmax = self.uvdist.max()
self.flag = (self.uvdist < uvmin) | (self.uvdist > uvmax)
return
def calibrate(self):
"""
"""
# We search for sources around 35 deg of the zenith
# no need to be very precise as they are fixed (RA, Dec)
sk = Skymodel(
center=eq_zenith(self.time_mean),
radius=35,
freq=self.freq_mean,
method='gsm',
cutoff=150
)
# The sky model does not contain polarization!
model_vis = self._model_vis(sk.skymodel)
# glm, Glm = _create_G_LM(self.vis, model_vis)
# self.cal_vis = Glm**(-1) * self.vis
self.vis = self.vis[:, 0, :, :, 0]
model_vis = model_vis[:, 0, :, :, 0]
gains = self._gain_cal(model_vis)
self.cal_vis = gains**(-1) * self.vis
return
def average(self):
"""
"""
return
def make_dirty(self, fov=60, robust=-2, coord=None):
"""
"""
avg_vis = np.mean(self.vis, axis=(0, 1))
avg_uvw = np.mean(self.uvw, axis=(0, 1))
if coord is not None:
transfo, origtransfo, finaltransfo, dw = rephase(
ra=coord[0],
dec=coord[1],
time=self.time_mean,
loc=nenufar_loc(),
dw=True
)
# phase = np.dot( avg_uvw, np.dot( dw.T, origtransfo).T)
# dphi = np.exp( phase * 2 * np.pi * 1j)# / wavelength[idx1:idx2, chan])
# avg_vis *= dphi
# avg_uvw = np.dot(avg_uvw, transfo.T)
avg_uvw = np.dot(avg_uvw, origtransfo.T)#finaltransfo.T)
avg_vis *= np.exp( np.dot(avg_uvw, -dw) * 2 * np.pi * 1j)
self.grid = Grid_Simple(
vis=avg_vis,
uvw=avg_uvw,
freq=self.freq_mean,
fov=fov,
robust=robust,
convolution=None # 'gaussian'
)
self.grid.populate()
dirty = Dirty(self.grid, self.cross)
dirty.compute()
return dirty
def make_image(self):
"""
"""
return
# --------------------------------------------------------- #
# ----------------------- Internal ------------------------ #
def _get_vis(self):
"""
"""
self.vis = self._cross.reshape(
fmean=False,
tmean=False
)
self.time = self._cross.time
self.freq = self._cross.meta['freq']
return
def _model_vis(self, skymodel):
"""
"""
vis_model = np.zeros(
self.vis.shape,
dtype='complex'
)
# compute the zenith coordinates for every time step
zen = self.phase_center #np.array(list(map(eq_zenith, self.time)))
ra_0 = zen[:, 0]
dec_0 = zen[:, 1]
# pointers to u, v, w coordinates
u = self.uvw[..., 0]
v = self.uvw[..., 1]
w = self.uvw[..., 2]
# loop over skymodel sources
na = np.newaxis
for k in range(skymodel.shape[0]):
flux = skymodel[k, 0] # Jy
ra, dec = skymodel[k, 1], skymodel[k, 2]
l, m, n = to_lmn(ra, dec, ra_0, dec_0)
ul = u*l[:, na, na, na]
vm = v*m[:, na, na, na]
nw = (n[:, na, na, na] - 1)*w
phase = np.exp(-2*np.pi*1j*(ul + vm))# + nw))
# adding the w component mess up with subsequent plots
vis_model += flux * phase[..., na]
return vis_model
def _gain_cal(self, model):
"""
"""
from scipy.optimize import least_squares #leastsq
gains = np.zeros(
self.vis.shape,
dtype='complex'
)
def err_func(gain, data, model):
shape = self.vis.shape[1:]
gain = np.reshape(gain, shape)
data = np.reshape(data, shape)
model = np.reshape(model, shape)
calmodel = gain * model
calmodel = calmodel * gain.conj()
# scipy optimize doesn't like complex numbers
a = (data - calmodel).ravel()
return a.real**2 + a.imag**2
for t in range(self.time.size):
print(t)
# res = leastsq(
# err_func,
# np.ones(
# self.vis[t, ...].size,
# ),
# args=(self.vis[t, ...].ravel(), model[t, ...].ravel())
# )
res = least_squares(
err_func,
np.ones(
self.vis[t, ...].size,
),
args=(self.vis[t, ...].ravel(), model[t, ...].ravel()),
verbose=2
)
gains[t, ...] = res.x # res
return gains
# def _create_G_LM(self, D, M):
# """ This function finds argmin G ||D-GMG^H|| using Levenberg-Marquardt.
# It uses the optimize.leastsq scipy to perform
# the actual minimization.
# D/self.vis is your observed visibilities matrx.
# M is your predicted visibilities.
# g the antenna gains.
# G = gg^H.
# """
# from scipy.optimize import leastsq
# def err_func(g, d, m):
# """ Unpolarized direction independent calibration entails
# finding the G that minimizes ||R-GMG^H||.
# This function evaluates D-GMG^H.
# g is a vector containing the real and imaginary components of the antenna gains.
# d is a vector containing a vecotrized R (observed visibilities), real and imaginary.
# m is a vector containing a vecotrized M (predicted), real and imaginary.
# r is a vector containing the residuals.
# """
# Nm = len(d)//2
# N = len(g)//2
# G = np.diag(g[0:N] + 1j*g[N:])
# D = np.reshape(d[0:Nm],(N,N)) + np.reshape(d[Nm:],(N,N))*1j #matrization
# M = np.reshape(m[0:Nm],(N,N)) + np.reshape(m[Nm:],(N,N))*1j
# T = np.dot(G, M)
# T = np.dot(T, G.conj())
# R = D - T
# r_r = np.ravel(R.real) #vectorization
# r_i = np.ravel(R.imag)
# r = np.hstack([r_r, r_i])
# return r
# nant = D.shape[0] #number of antennas
# temp = np.ones(
# (nant, nant), # MAYBE FALSE CHECK D.SHAPE[1]
# dtype='complex'
# )
# G = np.zeros(
# D.shape, #(ant,ant,time)
# dtype='complex'
# )
# g = np.zeros(
# (self.time.size, nant),
# dtype='complex'
# )
# # perform calibration per time-slot
# for t in range(self.time.size):
# g_0 = np.ones((2*nant)) # first antenna gain guess
# g_0[nant:] = 0
# d_r = np.ravel(D[t, ...].real) #vectorization of observed + seperating real and imag
# d_i = np.ravel(D[t, ...].imag)
# d = np.hstack([d_r,d_i])
# m_r = np.ravel(M[t, ...].real) #vectorization of model + seperating real and imag
# m_i = np.ravel(M[t, ...].imag)
# m = np.hstack([m_r, m_i])
# g_lstsqr_temp = leastsq(
# err_func,
# g_0,
# args=(d, m)
# )
# g_lstsqr = g_lstsqr_temp[0]
# G_m = np.dot(np.diag(g_lstsqr[0:nant] + 1j*g_lstsqr[nant:]), temp)
# G_m = np.dot(G_m, np.diag((g_lstsqr[0:nant] + 1j*g_lstsqr[nant:]).conj()))
# g[t, :] = g_lstsqr[0:nant] + 1j*g_lstsqr[nant:] #creating antenna gain vector
# G[t, ...] = G_m
# return g, G
def _compute_uvw(self):
"""
"""
uvw = SphUVW()
uvw.from_crosslets(self._cross)
self.uvw = uvw._uvw
self.uvdist = uvw.uvdist
return
# ============================================================= #
| 28.574324
| 102
| 0.45251
|
832e9ede8a6e343b12d17e5326d2a387520d502f
| 3,123
|
py
|
Python
|
ibis/expr/tests/test_case.py
|
andrewseidl/ibis
|
1468b8c4f96d9d58f6fa147a2579b0d9e5796186
|
[
"Apache-2.0"
] | null | null | null |
ibis/expr/tests/test_case.py
|
andrewseidl/ibis
|
1468b8c4f96d9d58f6fa147a2579b0d9e5796186
|
[
"Apache-2.0"
] | null | null | null |
ibis/expr/tests/test_case.py
|
andrewseidl/ibis
|
1468b8c4f96d9d58f6fa147a2579b0d9e5796186
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis
from ibis.tests.util import assert_equal
def test_ifelse(table):
bools = table.g.isnull()
result = bools.ifelse("foo", "bar")
assert isinstance(result, ir.StringColumn)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_ifelse_literal():
assert False
def test_simple_case_expr(table):
case1, result1 = "foo", table.a
case2, result2 = "bar", table.c
default_result = table.b
expr1 = table.g.lower().cases(
[(case1, result1),
(case2, result2)],
default=default_result
)
expr2 = (table.g.lower().case()
.when(case1, result1)
.when(case2, result2)
.else_(default_result)
.end())
assert_equal(expr1, expr2)
assert isinstance(expr1, ir.IntegerColumn)
def test_multiple_case_expr(table):
case1 = table.a == 5
case2 = table.b == 128
case3 = table.c == 1000
result1 = table.f
result2 = table.b * 2
result3 = table.e
default = table.d
expr = (ibis.case()
.when(case1, result1)
.when(case2, result2)
.when(case3, result3)
.else_(default)
.end())
op = expr.op()
assert isinstance(expr, ir.FloatingColumn)
assert isinstance(op, ops.SearchedCase)
assert op.default is default
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_simple_case_no_default():
# TODO: this conflicts with the null else cases below. Make a decision
# about what to do, what to make the default behavior based on what the
# user provides. SQL behavior is to use NULL when nothing else
# provided. The .replace convenience API could use the field values as
# the default, getting us around this issue.
assert False
def test_simple_case_null_else(table):
expr = table.g.case().when("foo", "bar").end()
op = expr.op()
assert isinstance(expr, ir.StringColumn)
assert isinstance(op.default, ir.ValueExpr)
assert isinstance(op.default.op(), ops.Cast)
assert op.default.op().to == dt.string
def test_multiple_case_null_else(table):
expr = ibis.case().when(table.g == "foo", "bar").end()
op = expr.op()
assert isinstance(expr, ir.StringColumn)
assert isinstance(op.default, ir.ValueExpr)
assert isinstance(op.default.op(), ops.Cast)
assert op.default.op().to == dt.string
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_case_type_precedence():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_no_implicit_cast_possible():
assert False
def test_case_mixed_type():
t0 = ibis.table(
[('one', 'string'),
('two', 'double'),
('three', 'int32')], name='my_data')
expr = (
t0.three
.case()
.when(0, 'low')
.when(1, 'high')
.else_('null')
.end()
.name('label'))
result = t0[expr]
assert result['label'].type().equals(dt.string)
| 25.390244
| 75
| 0.634646
|
026fc7575e03b916275d794761d78493dafe60b7
| 6,652
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/thermofilumpendens.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/thermofilumpendens.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/thermofilumpendens.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Thermofilum pendens.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:35:37.467976
The undirected graph Thermofilum pendens has 1866 nodes and 140059 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08049 and has 17 connected components, where the component with most
nodes has 1807 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 139, the mean node degree is 150.12, and
the node degree mode is 5. The top 5 most central nodes are 368408.Tpen_0948
(degree 681), 368408.Tpen_0880 (degree 622), 368408.Tpen_1821 (degree 605),
368408.Tpen_1765 (degree 580) and 368408.Tpen_0660 (degree 559).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ThermofilumPendens
# Then load the graph
graph = ThermofilumPendens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ThermofilumPendens(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Thermofilum pendens graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Thermofilum pendens graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 20:35:37.467976
The undirected graph Thermofilum pendens has 1866 nodes and 140059 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08049 and has 17 connected components, where the component with most
nodes has 1807 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 139, the mean node degree is 150.12, and
the node degree mode is 5. The top 5 most central nodes are 368408.Tpen_0948
(degree 681), 368408.Tpen_0880 (degree 622), 368408.Tpen_1821 (degree 605),
368408.Tpen_1765 (degree 580) and 368408.Tpen_0660 (degree 559).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ThermofilumPendens
# Then load the graph
graph = ThermofilumPendens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ThermofilumPendens",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.195767
| 223
| 0.702195
|
f2cfb7042f75b33a0f223f53d1ac2b0b4244c1dc
| 1,510
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_watch_grpc_server.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_watch_grpc_server.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_watch_grpc_server.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | null | null | null |
import time
from dagster.core.host_representation.grpc_server_state_subscriber import (
LocationStateChangeEvent,
LocationStateChangeEventType,
LocationStateSubscriber,
)
from .graphql_context_test_suite import GraphQLContextVariant, make_graphql_context_test_suite
class TestSubscribeToGrpcServerEvents(
make_graphql_context_test_suite(
context_variants=[GraphQLContextVariant.non_launchable_sqlite_instance_deployed_grpc_env()]
)
):
def test_grpc_server_handle_message_subscription(self, graphql_context):
events = []
test_subscriber = LocationStateSubscriber(events.append)
location = next(
iter(
graphql_context.process_context._workspace.repository_locations # pylint: disable=protected-access
)
)
graphql_context.process_context._workspace.add_state_subscriber( # pylint: disable=protected-access
test_subscriber
)
location.client.shutdown_server()
# Wait for event
start_time = time.time()
timeout = 60
while not len(events) > 0:
if time.time() - start_time > timeout:
raise Exception("Timed out waiting for LocationStateChangeEvent")
time.sleep(1)
assert len(events) == 1
assert isinstance(events[0], LocationStateChangeEvent)
assert events[0].event_type == LocationStateChangeEventType.LOCATION_ERROR
assert events[0].location_name == location.name
| 35.952381
| 115
| 0.706623
|
b6f45f8c4cb21f29842cf691d66c2cb85773a592
| 734
|
py
|
Python
|
googlemaps/__init__.py
|
ZayanShahid/google-maps-services-python
|
e630331bb03ac750db5d1df0e2727ec925439574
|
[
"Apache-2.0"
] | 1
|
2021-06-02T04:13:17.000Z
|
2021-06-02T04:13:17.000Z
|
googlemaps/__init__.py
|
ZayanShahid/google-maps-services-python
|
e630331bb03ac750db5d1df0e2727ec925439574
|
[
"Apache-2.0"
] | null | null | null |
googlemaps/__init__.py
|
ZayanShahid/google-maps-services-python
|
e630331bb03ac750db5d1df0e2727ec925439574
|
[
"Apache-2.0"
] | 1
|
2020-10-31T05:44:03.000Z
|
2020-10-31T05:44:03.000Z
|
#
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
__version__ = "4.4.2"
from googlemaps.client import Client
from googlemaps import exceptions
__all__ = ["Client", "exceptions"]
| 29.36
| 79
| 0.754768
|
d5b2f2b3ad04f231e8c4fbde31a352cd3237eb18
| 531
|
py
|
Python
|
src/regression/predict_model.py
|
satishukadam/regressionmodel
|
1d6cb4c549b632c09ad81da3494dffc43741e451
|
[
"MIT"
] | null | null | null |
src/regression/predict_model.py
|
satishukadam/regressionmodel
|
1d6cb4c549b632c09ad81da3494dffc43741e451
|
[
"MIT"
] | null | null | null |
src/regression/predict_model.py
|
satishukadam/regressionmodel
|
1d6cb4c549b632c09ad81da3494dffc43741e451
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
from configs import config
import joblib
def predict_model(model_name, data_file_name):
"""This function predicts house prices based on input data"""
model_path = os.path.join(config.TRAINED_MODEL_DIR, model_name)
data_file_path = os.path.join(os.path.join(config.DATA_DIR, data_file_name))
pipe = joblib.load(model_path)
data = pd.read_csv(data_file_path)
prediction = pipe.predict(data)
return prediction
print(predict_model('model1.pkl', 'predict_house_price.csv'))
| 29.5
| 80
| 0.758945
|
17ae9ad4ebfb47f805bb7afa6f216df0b27adfe9
| 73
|
py
|
Python
|
tfnlu/classification/__init__.py
|
ishine/tfnlu
|
73d567a5f07845a70bc13da63e6ad7b9eefe837e
|
[
"MIT"
] | 1
|
2021-03-22T03:51:18.000Z
|
2021-03-22T03:51:18.000Z
|
tfnlu/classification/__init__.py
|
ishine/tfnlu
|
73d567a5f07845a70bc13da63e6ad7b9eefe837e
|
[
"MIT"
] | null | null | null |
tfnlu/classification/__init__.py
|
ishine/tfnlu
|
73d567a5f07845a70bc13da63e6ad7b9eefe837e
|
[
"MIT"
] | 3
|
2020-09-08T14:45:48.000Z
|
2021-05-14T13:45:51.000Z
|
from .classification import Classification
__all__ = ['Classification']
| 18.25
| 42
| 0.808219
|
d38ba84cd1397fa032cf2e5b1e7377c5b0d15c18
| 5,713
|
py
|
Python
|
pandas/tests/groupby/test_value_counts.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 2
|
2022-02-27T04:02:18.000Z
|
2022-03-01T03:48:47.000Z
|
pandas/tests/groupby/test_value_counts.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T20:25:37.000Z
|
2022-02-25T22:34:54.000Z
|
pandas/tests/groupby/test_value_counts.py
|
umangino/pandas
|
c492672699110fe711b7f76ded5828ff24bce5ab
|
[
"BSD-3-Clause"
] | 2
|
2022-02-27T04:02:19.000Z
|
2022-03-01T03:49:21.000Z
|
"""
these are systematically testing all of the args to value_counts
with different size combinations. This is to ensure stability of the sorting
and proper parameter handling
"""
from itertools import product
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Grouper,
MultiIndex,
Series,
date_range,
to_datetime,
)
import pandas._testing as tm
def tests_value_counts_index_names_category_column():
# GH44324 Missing name of index category column
df = DataFrame(
{
"gender": ["female"],
"country": ["US"],
}
)
df["gender"] = df["gender"].astype("category")
result = df.groupby("country")["gender"].value_counts()
# Construct expected, very specific multiindex
df_mi_expected = DataFrame([["US", "female"]], columns=["country", "gender"])
df_mi_expected["gender"] = df_mi_expected["gender"].astype("category")
mi_expected = MultiIndex.from_frame(df_mi_expected)
expected = Series([1], index=mi_expected, name="gender")
tm.assert_series_equal(result, expected)
# our starting frame
def seed_df(seed_nans, n, m):
np.random.seed(1234)
days = date_range("2015-08-24", periods=10)
frame = DataFrame(
{
"1st": np.random.choice(list("abcd"), n),
"2nd": np.random.choice(days, n),
"3rd": np.random.randint(1, m + 1, n),
}
)
if seed_nans:
frame.loc[1::11, "1st"] = np.nan
frame.loc[3::17, "2nd"] = np.nan
frame.loc[7::19, "3rd"] = np.nan
frame.loc[8::19, "3rd"] = np.nan
frame.loc[9::19, "3rd"] = np.nan
return frame
# create input df, keys, and the bins
binned = []
ids = []
for seed_nans in [True, False]:
for n, m in product((100, 1000), (5, 20)):
df = seed_df(seed_nans, n, m)
bins = None, np.arange(0, max(5, df["3rd"].max()) + 1, 2)
keys = "1st", "2nd", ["1st", "2nd"]
for k, b in product(keys, bins):
binned.append((df, k, b, n, m))
ids.append(f"{k}-{n}-{m}")
@pytest.mark.slow
@pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids)
@pytest.mark.parametrize("isort", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("sort", [True, False])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("dropna", [True, False])
def test_series_groupby_value_counts(
df, keys, bins, n, m, isort, normalize, sort, ascending, dropna
):
def rebuild_index(df):
arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
df.index = MultiIndex.from_arrays(arr, names=df.index.names)
return df
kwargs = {
"normalize": normalize,
"sort": sort,
"ascending": ascending,
"dropna": dropna,
"bins": bins,
}
gr = df.groupby(keys, sort=isort)
left = gr["3rd"].value_counts(**kwargs)
gr = df.groupby(keys, sort=isort)
right = gr["3rd"].apply(Series.value_counts, **kwargs)
right.index.names = right.index.names[:-1] + ["3rd"]
# have to sort on index because of unstable sort on values
left, right = map(rebuild_index, (left, right)) # xref GH9212
tm.assert_series_equal(left.sort_index(), right.sort_index())
def test_series_groupby_value_counts_with_grouper():
# GH28479
df = DataFrame(
{
"Timestamp": [
1565083561,
1565083561 + 86400,
1565083561 + 86500,
1565083561 + 86400 * 2,
1565083561 + 86400 * 3,
1565083561 + 86500 * 3,
1565083561 + 86400 * 4,
],
"Food": ["apple", "apple", "banana", "banana", "orange", "orange", "pear"],
}
).drop([3])
df["Datetime"] = to_datetime(df["Timestamp"].apply(lambda t: str(t)), unit="s")
dfg = df.groupby(Grouper(freq="1D", key="Datetime"))
# have to sort on index because of unstable sort on values xref GH9212
result = dfg["Food"].value_counts().sort_index()
expected = dfg["Food"].apply(Series.value_counts).sort_index()
expected.index.names = result.index.names
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])
def test_series_groupby_value_counts_empty(columns):
# GH39172
df = DataFrame(columns=columns)
dfg = df.groupby(columns[:-1])
result = dfg[columns[-1]].value_counts()
expected = Series([], name=columns[-1], dtype=result.dtype)
expected.index = MultiIndex.from_arrays([[]] * len(columns), names=columns)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("columns", [["A", "B"], ["A", "B", "C"]])
def test_series_groupby_value_counts_one_row(columns):
# GH42618
df = DataFrame(data=[range(len(columns))], columns=columns)
dfg = df.groupby(columns[:-1])
result = dfg[columns[-1]].value_counts()
expected = df.value_counts().rename(columns[-1])
tm.assert_series_equal(result, expected)
def test_series_groupby_value_counts_on_categorical():
# GH38672
s = Series(Categorical(["a"], categories=["a", "b"]))
result = s.groupby([0]).value_counts()
expected = Series(
data=[1, 0],
index=MultiIndex.from_arrays(
[
[0, 0],
CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, dtype="category"
),
]
),
name=0,
)
# Expected:
# 0 a 1
# b 0
# Name: 0, dtype: int64
tm.assert_series_equal(result, expected)
| 29.297436
| 87
| 0.59951
|
f5d242be9a521cd2a15d26663c080e1636a33e24
| 1,541
|
py
|
Python
|
hlt/constants.py
|
bendemers/Halite3-SVM-Bot
|
ae30b821d760bd6f7e15f6094029ab78ceaa88d6
|
[
"MIT"
] | 2
|
2018-11-15T14:04:26.000Z
|
2018-11-19T01:54:01.000Z
|
hlt/constants.py
|
bendemers/Halite3-SVM-Bot
|
ae30b821d760bd6f7e15f6094029ab78ceaa88d6
|
[
"MIT"
] | 5
|
2021-02-08T20:26:47.000Z
|
2022-02-26T04:28:33.000Z
|
hlt/constants.py
|
bendemers/Halite3-SVM-Bot
|
ae30b821d760bd6f7e15f6094029ab78ceaa88d6
|
[
"MIT"
] | 1
|
2018-11-22T14:58:12.000Z
|
2018-11-22T14:58:12.000Z
|
"""
Constants representing the game variation being played.
Most constants are global and come from game engine and are immutable and are strictly informational.
Some constants are only used by the local game client and so are mutable.
"""
################################################
# Local and mutable constants.
"""Maximum number of steps to consider in pathfinding."""
MAX_BFS_STEPS = 1024 # = can search a 32x32 area completely
################################################
# Global and immutable constants.
"""The maximum amount of halite a ship can carry."""
MAX_HALITE = 1000
"""The cost to build a single ship."""
SHIP_COST = 500
"""The cost to build a dropoff."""
DROPOFF_COST = 2000
"""The maximum number of turns a game can last."""
MAX_TURNS = 500
"""1/EXTRACT_RATIO halite (rounded) is collected from a square per turn."""
EXTRACT_RATIO = 4
"""1/MOVE_COST_RATIO halite (rounded) is needed to move off a cell."""
MOVE_COST_RATIO = 10
def load_constants(constants):
"""
Load constants from JSON given by the game engine.
"""
global SHIP_COST, DROPOFF_COST, MAX_HALITE, MAX_TURNS
global EXTRACT_RATIO, MOVE_COST_RATIO
SHIP_COST = constants.get('NEW_ENTITY_ENERGY_COST', SHIP_COST)
DROPOFF_COST = constants.get('DROPOFF_COST', DROPOFF_COST)
MAX_HALITE = constants.get('MAX_ENERGY', MAX_HALITE)
MAX_TURNS = constants.get('MAX_TURNS', MAX_TURNS)
EXTRACT_RATIO = constants.get('EXTRACT_RATIO', EXTRACT_RATIO)
MOVE_COST_RATIO = constants.get('MOVE_COST_RATIO', MOVE_COST_RATIO)
| 38.525
| 101
| 0.700195
|
b4212854a7c3edaddbcf88aa560c6e73e804f042
| 5,242
|
py
|
Python
|
zmon_worker_monitor/builtins/plugins/redis_wrapper.py
|
dneuhaeuser-zalando/zmon-worker
|
eab7480b4cef8aecf910fb816c4dd0e484caaec4
|
[
"Apache-2.0"
] | null | null | null |
zmon_worker_monitor/builtins/plugins/redis_wrapper.py
|
dneuhaeuser-zalando/zmon-worker
|
eab7480b4cef8aecf910fb816c4dd0e484caaec4
|
[
"Apache-2.0"
] | null | null | null |
zmon_worker_monitor/builtins/plugins/redis_wrapper.py
|
dneuhaeuser-zalando/zmon-worker
|
eab7480b4cef8aecf910fb816c4dd0e484caaec4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
from zmon_worker_monitor.zmon_worker.errors import ConfigurationError
from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial
from zmon_worker_monitor import plugin_manager
STATISTIC_GAUGE_KEYS = frozenset([
'blocked_clients',
'connected_clients',
'connected_slaves',
'instantaneous_ops_per_sec',
'mem_fragmentation_ratio',
'master_repl_offset',
'role',
'slave0',
'maxmemory',
'used_memory',
'used_memory_lua',
'used_memory_peak',
'used_memory_rss',
])
STATISTIC_COUNTER_KEYS = frozenset([
'evicted_keys',
'expired_keys',
'keyspace_hits',
'keyspace_misses',
'total_commands_processed',
'total_connections_received',
])
class RedisFactory(IFunctionFactoryPlugin):
def __init__(self):
super(RedisFactory, self).__init__()
# fields to store dependencies: plugin depends on 1 other plugin
self.counter_factory = None
def configure(self, conf):
"""
Called after plugin is loaded to pass the [configuration] section in their plugin info file
:param conf: configuration dictionary
"""
self.__password = conf.get('password')
return
def create(self, factory_ctx):
"""
Automatically called to create the check function's object
:param factory_ctx: (dict) names available for Function instantiation
:return: an object that implements a check function
"""
# load plugins dependencies and store them locally for efficiency
if not self.counter_factory:
self.counter_factory = plugin_manager.get_plugin_obj_by_name('counter', 'Function')
return propartial(
RedisWrapper,
counter=self.counter_factory.create(factory_ctx),
host=factory_ctx['host'],
password=self.__password
)
class RedisWrapper(object):
'''Class to allow only readonly access to underlying redis connection'''
def __init__(self, counter, host, port=6379, db=0, password=None, socket_connect_timeout=1, socket_timeout=5):
if not host:
raise ConfigurationError('Redis wrapper improperly configured. Valid redis host is required!')
self._counter = counter('')
self.__con = redis.StrictRedis(
host,
port,
db,
password,
socket_connect_timeout=socket_connect_timeout,
socket_timeout=socket_timeout
)
def llen(self, key):
return self.__con.llen(key)
def lrange(self, key, start, stop):
return self.__con.lrange(key, start, stop)
def get(self, key):
return self.__con.get(key)
def hget(self, key, field):
return self.__con.hget(key, field)
def hgetall(self, key):
return self.__con.hgetall(key)
def scan(self, cursor, match=None, count=None):
return self.__con.scan(cursor, match=match, count=count)
def ttl(self, key):
return self.__con.ttl(key)
def keys(self, pattern):
return self.__con.keys(pattern)
def smembers(self, key):
return self.__con.smembers(key)
def scard(self, key):
return self.__con.scard(key)
def zcard(self, key):
return self.__con.zcard(key)
def zrange(self, key, start, end, desc=False, withscores=False, score_cast_func=float):
return self.__con.zrange(key, start, end, desc, withscores, score_cast_func)
def statistics(self):
'''
Return general Redis statistics such as operations/s
Example result::
{
"blocked_clients": 2,
"commands_processed_per_sec": 15946.48,
"connected_clients": 162,
"connected_slaves": 0,
"connections_received_per_sec": 0.5,
"dbsize": 27351,
"evicted_keys_per_sec": 0.0,
"expired_keys_per_sec": 0.0,
"instantaneous_ops_per_sec": 29626,
"keyspace_hits_per_sec": 1195.43,
"keyspace_misses_per_sec": 1237.99,
"used_memory": 50781216,
"used_memory_rss": 63475712
}
'''
data = self.__con.info()
stats = {}
for key in STATISTIC_GAUGE_KEYS:
stats[key] = data.get(key)
for key in STATISTIC_COUNTER_KEYS:
stats['{}_per_sec'.format(key).replace('total_', '')] = \
round(self._counter.key(key).per_second(data.get(key, 0)), 2)
stats['dbsize'] = self.__con.dbsize()
return stats
if __name__ == '__main__':
import sys
import json
# init plugin manager and collect plugins, as done by Zmon when worker is starting
plugin_manager.init_plugin_manager()
plugin_manager.collect_plugins(load_builtins=True, load_env=True)
factory_ctx = {
'redis_host': 'localhost',
}
counter = plugin_manager.get_plugin_obj_by_name('counter', 'Function').create(factory_ctx)
wrapper = RedisWrapper(counter, sys.argv[1])
print json.dumps(wrapper.statistics(), indent=4, sort_keys=True)
| 30.654971
| 114
| 0.634681
|
0ac690f394bc50dcf6617007c512fe6a7bf82f5f
| 13,236
|
py
|
Python
|
credmark/cmf/model/ledger/__init__.py
|
credmark/credmark-model-framework-py
|
ab449990018dc1cbb1c70cfbb61c71bfc02f1ebe
|
[
"MIT"
] | 7
|
2022-03-10T22:28:23.000Z
|
2022-03-31T17:02:16.000Z
|
credmark/cmf/model/ledger/__init__.py
|
credmark/credmark-model-framework-py
|
ab449990018dc1cbb1c70cfbb61c71bfc02f1ebe
|
[
"MIT"
] | 2
|
2022-03-09T04:11:13.000Z
|
2022-03-24T14:36:14.000Z
|
credmark/cmf/model/ledger/__init__.py
|
credmark/credmark-model-framework-py
|
ab449990018dc1cbb1c70cfbb61c71bfc02f1ebe
|
[
"MIT"
] | 1
|
2022-03-29T22:42:07.000Z
|
2022-03-29T22:42:07.000Z
|
from typing import Type, Union, List
from .errors import (
InvalidColumnException,
InvalidQueryException,
)
from credmark.cmf.types.ledger import (
BlockTable, ContractTable,
LogTable, ReceiptTable, TokenTable, TokenTransferTable,
TraceTable, TransactionTable, LedgerTable,
LedgerAggregate, LedgerModelOutput
)
QUERY_METHOD_DOC_STRING = """
Parameters:
columns: The columns list should be built using ``Ledger.{TABLE}.Columns``
aggregates: The aggregates list should be built using ``Ledger.Aggregate()``
calls where the expression contains an SQL function(ex. MAX, SUM etc.) and
column names are from ``Ledger.{TABLE}.Columns``.
where: The where portion of an SQL query(without the word WHERE.)
The column names are from ``Ledger.{TABLE}.Columns``.
Aggregate column names must be in double-quotes.
group_by: The "group by" portion of an SQL query(without the words "GROUP BY".)
The column names are from ``Ledger.{TABLE}.Columns``.
Aggregate column names must be in double-quotes.
order_by: The "order by" portion of an SQL query(without the words "ORDER BY".)
The column names are from ``Ledger.{TABLE}.Columns``.
Aggregate column names must be in double-quotes.
having: The "having" portion of an SQL query(without the word "HAVING".)
The column names are from ``Ledger.{TABLE}.Columns``.
Aggregate column names must be in double-quotes.
limit: The "limit" portion of an SQL query(without the word "LIMIT".)
Typically this can be an integer as a string.
offset: The "offset" portion of an SQL query(without the word "OFFSET".)
Typically this can be an integer as a string.
Returns:
An object with a ``data`` property which is a list
of dicts, each dict holding a row with the keys being the column
names. The column names can be referenced using
``Ledger.{TABLE}.Columns`` and aggregate columns names.
"""
def query_method(table: str):
def _doc(func):
func.__doc__ += QUERY_METHOD_DOC_STRING.replace('{TABLE}', table)
return func
return _doc
class Ledger:
"""
Performs queries on ledger data.
Access an instance of this class from the model context using
``self.context.ledger``.
Run a query using one of the ``get_`` methods, for example
``context.ledger.get_transactions()``. The query parameters are
common to all query methods.
"""
Transaction = TransactionTable
Trace = TraceTable
Block = BlockTable
Contract = ContractTable
Log = LogTable
Receipt = ReceiptTable
Token = TokenTable
TokenTransfer = TokenTransferTable
@classmethod
def Aggregate(cls, expression: str, as_name: str): # pylint: disable=invalid-name
"""
Return a new LedgerAggregate instance that can be used in
an aggregates list.
For example: :
aggregates = [Ledger.Aggregate(f'SUM({Ledger.Block.Columns.GAS_USED})', 'total_gas')]
"""
return LedgerAggregate(expression=expression, asName=as_name)
def __init__(self, context):
# We type the property here to avoid circular ref
self.context = context
def _validate_columns(self, model_slug: str,
columns: List[str],
ledger_object_type: type[LedgerTable]):
column_set = ledger_object_type.columns()
for column in columns:
if column.lower() not in column_set:
raise InvalidColumnException(
model_slug,
column, list(column_set), "invalid column name")
def _send_cwgo_query(self, # pylint: disable=too-many-arguments
model_slug: str,
table_def: Type[LedgerTable],
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
if not columns and not aggregates:
raise InvalidQueryException(
model_slug, f'{model_slug} call must have at least one column or aggregate.')
if columns is None:
columns = []
else:
self._validate_columns(model_slug, columns, table_def)
if where is None and limit is None and not aggregates:
raise InvalidQueryException(
model_slug,
f'{model_slug} call must have a where or limit value for non-aggregate queries.')
return self.context.run_model(model_slug,
{'columns': columns,
'aggregates': aggregates,
'where': where,
'groupBy': group_by,
'having': having,
'orderBy': order_by,
'limit': limit,
'offset': offset},
return_type=LedgerModelOutput)
@query_method('Transaction')
def get_transactions(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the Transactions table.
"""
return self._send_cwgo_query('ledger.transaction_data',
TransactionTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
@query_method('Trace')
def get_traces(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the Traces table.
"""
return self._send_cwgo_query('ledger.trace_data',
TraceTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
@query_method('Log')
def get_logs(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the Logs table.
"""
return self._send_cwgo_query('ledger.log_data',
LogTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
@query_method('Contract')
def get_contracts(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the Contracts table.
"""
return self._send_cwgo_query('ledger.contract_data',
ContractTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
@query_method('Block')
def get_blocks(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the Blocks table.
"""
return self._send_cwgo_query('ledger.block_data',
BlockTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
@query_method('Receipt')
def get_receipts(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the Receipts table.
"""
return self._send_cwgo_query('ledger.receipt_data',
ReceiptTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
@query_method('Token')
def get_erc20_tokens(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the ERC20 Tokens table.
"""
return self._send_cwgo_query('ledger.erc20_token_data',
TokenTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
@query_method('TokenTransfer')
def get_erc20_transfers(self, # pylint: disable=too-many-arguments
columns: Union[List[str], None] = None,
where: Union[str, None] = None,
group_by: Union[str, None] = None,
order_by: Union[str, None] = None,
limit: Union[str, None] = None,
offset: Union[str, None] = None,
aggregates: Union[List[LedgerAggregate], None] = None,
having: Union[str, None] = None) -> LedgerModelOutput:
"""
Query data from the ERC20 Token Transfers table.
"""
return self._send_cwgo_query('ledger.erc20_token_transfer_data',
TokenTransferTable,
columns, where, group_by,
order_by, limit, offset,
aggregates, having)
| 44.416107
| 97
| 0.507782
|
cd7a360ae253cdcdf59e641f9adefc4ca87dc299
| 13,300
|
py
|
Python
|
paddlespeech/s2t/decoders/recog_bin.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 1,540
|
2017-11-14T13:26:33.000Z
|
2021-11-09T14:05:08.000Z
|
paddlespeech/s2t/decoders/recog_bin.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 599
|
2017-11-14T13:19:12.000Z
|
2021-11-09T01:58:26.000Z
|
paddlespeech/s2t/decoders/recog_bin.py
|
JiehangXie/PaddleSpeech
|
60090b49ec27437127ab62358026dd5bb95fccc7
|
[
"Apache-2.0"
] | 449
|
2017-11-14T12:48:46.000Z
|
2021-11-06T09:34:33.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference espnet Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""End-to-end speech recognition model decoding script."""
import logging
import os
import random
import sys
from distutils.util import strtobool
import configargparse
import numpy as np
def get_parser():
"""Get default arguments."""
parser = configargparse.ArgumentParser(
description="Transcribe text from speech using "
"a speech recognition model on one CPU or GPU",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter, )
parser.add(
'--model-name',
type=str,
default='u2_kaldi',
help='model name, e.g: deepspeech2, u2, u2_kaldi, u2_st')
# general configuration
parser.add("--config", is_config_file=True, help="Config file path")
parser.add(
"--config2",
is_config_file=True,
help="Second config file path that overwrites the settings in `--config`",
)
parser.add(
"--config3",
is_config_file=True,
help="Third config file path that overwrites the settings "
"in `--config` and `--config2`", )
parser.add_argument("--ngpu", type=int, default=0, help="Number of GPUs")
parser.add_argument(
"--dtype",
choices=("float16", "float32", "float64"),
default="float32",
help="Float precision (only available in --api v2)", )
parser.add_argument("--debugmode", type=int, default=1, help="Debugmode")
parser.add_argument("--seed", type=int, default=1, help="Random seed")
parser.add_argument(
"--verbose", "-V", type=int, default=2, help="Verbose option")
parser.add_argument(
"--batchsize",
type=int,
default=1,
help="Batch size for beam search (0: means no batch processing)", )
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
help="The configuration file for the pre-processing", )
parser.add_argument(
"--api",
default="v2",
choices=["v2"],
help="Beam search APIs "
"v2: Experimental API. It supports any models that implements ScorerInterface.",
)
# task related
parser.add_argument(
"--recog-json", type=str, help="Filename of recognition data (json)")
parser.add_argument(
"--result-label",
type=str,
required=True,
help="Filename of result label data (json)", )
# model (parameter) related
parser.add_argument(
"--model",
type=str,
required=True,
help="Model file parameters to read")
parser.add_argument(
"--model-conf", type=str, default=None, help="Model config file")
parser.add_argument(
"--num-spkrs",
type=int,
default=1,
choices=[1, 2],
help="Number of speakers in the speech", )
parser.add_argument(
"--num-encs",
default=1,
type=int,
help="Number of encoders in the model.")
# search related
parser.add_argument(
"--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=1, help="Beam size")
parser.add_argument(
"--penalty", type=float, default=0.0, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
type=float,
default=0.0,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths.
If maxlenratio<0.0, its absolute value is interpreted
as a constant max output length""", )
parser.add_argument(
"--minlenratio",
type=float,
default=0.0,
help="Input length ratio to obtain min output length", )
parser.add_argument(
"--ctc-weight",
type=float,
default=0.0,
help="CTC weight in joint decoding")
parser.add_argument(
"--weights-ctc-dec",
type=float,
action="append",
help="ctc weight assigned to each encoder during decoding."
"[in multi-encoder mode only]", )
parser.add_argument(
"--ctc-window-margin",
type=int,
default=0,
help="""Use CTC window with margin parameter to accelerate
CTC/attention decoding especially on GPU. Smaller magin
makes decoding faster, but may increase search errors.
If margin=0 (default), this function is disabled""", )
# transducer related
parser.add_argument(
"--search-type",
type=str,
default="default",
choices=["default", "nsc", "tsd", "alsd", "maes"],
help="""Type of beam search implementation to use during inference.
Can be either: default beam search ("default"),
N-Step Constrained beam search ("nsc"), Time-Synchronous Decoding ("tsd"),
Alignment-Length Synchronous Decoding ("alsd") or
modified Adaptive Expansion Search ("maes").""", )
parser.add_argument(
"--nstep",
type=int,
default=1,
help="""Number of expansion steps allowed in NSC beam search or mAES
(nstep > 0 for NSC and nstep > 1 for mAES).""", )
parser.add_argument(
"--prefix-alpha",
type=int,
default=2,
help="Length prefix difference allowed in NSC beam search or mAES.", )
parser.add_argument(
"--max-sym-exp",
type=int,
default=2,
help="Number of symbol expansions allowed in TSD.", )
parser.add_argument(
"--u-max",
type=int,
default=400,
help="Length prefix difference allowed in ALSD.", )
parser.add_argument(
"--expansion-gamma",
type=float,
default=2.3,
help="Allowed logp difference for prune-by-value method in mAES.", )
parser.add_argument(
"--expansion-beta",
type=int,
default=2,
help="""Number of additional candidates for expanded hypotheses
selection in mAES.""", )
parser.add_argument(
"--score-norm",
type=strtobool,
nargs="?",
default=True,
help="Normalize final hypotheses' score by length", )
parser.add_argument(
"--softmax-temperature",
type=float,
default=1.0,
help="Penalization term for softmax function.", )
# rnnlm related
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read")
parser.add_argument(
"--rnnlm-conf",
type=str,
default=None,
help="RNNLM model config file to read")
parser.add_argument(
"--word-rnnlm",
type=str,
default=None,
help="Word RNNLM model file to read")
parser.add_argument(
"--word-rnnlm-conf",
type=str,
default=None,
help="Word RNNLM model config file to read", )
parser.add_argument(
"--word-dict", type=str, default=None, help="Word list to read")
parser.add_argument(
"--lm-weight", type=float, default=0.1, help="RNNLM weight")
# ngram related
parser.add_argument(
"--ngram-model",
type=str,
default=None,
help="ngram model file to read")
parser.add_argument(
"--ngram-weight", type=float, default=0.1, help="ngram weight")
parser.add_argument(
"--ngram-scorer",
type=str,
default="part",
choices=("full", "part"),
help="""if the ngram is set as a part scorer, similar with CTC scorer,
ngram scorer only scores topK hypethesis.
if the ngram is set as full scorer, ngram scorer scores all hypthesis
the decoding speed of part scorer is musch faster than full one""",
)
# streaming related
parser.add_argument(
"--streaming-mode",
type=str,
default=None,
choices=["window", "segment"],
help="""Use streaming recognizer for inference.
`--batchsize` must be set to 0 to enable this mode""", )
parser.add_argument(
"--streaming-window", type=int, default=10, help="Window size")
parser.add_argument(
"--streaming-min-blank-dur",
type=int,
default=10,
help="Minimum blank duration threshold", )
parser.add_argument(
"--streaming-onset-margin", type=int, default=1, help="Onset margin")
parser.add_argument(
"--streaming-offset-margin", type=int, default=1, help="Offset margin")
# non-autoregressive related
# Mask CTC related. See https://arxiv.org/abs/2005.08700 for the detail.
parser.add_argument(
"--maskctc-n-iterations",
type=int,
default=10,
help="Number of decoding iterations."
"For Mask CTC, set 0 to predict 1 mask/iter.", )
parser.add_argument(
"--maskctc-probability-threshold",
type=float,
default=0.999,
help="Threshold probability for CTC output", )
# quantize model related
parser.add_argument(
"--quantize-config",
nargs="*",
help="Quantize config list. E.g.: --quantize-config=[Linear,LSTM,GRU]",
)
parser.add_argument(
"--quantize-dtype",
type=str,
default="qint8",
help="Dtype dynamic quantize")
parser.add_argument(
"--quantize-asr-model",
type=bool,
default=False,
help="Quantize asr model", )
parser.add_argument(
"--quantize-lm-model",
type=bool,
default=False,
help="Quantize lm model", )
return parser
def main(args):
"""Run the main decoding function."""
parser = get_parser()
parser.add_argument(
"--output", metavar="CKPT_DIR", help="path to save checkpoint.")
parser.add_argument(
"--checkpoint_path", type=str, help="path to load checkpoint")
parser.add_argument("--dict-path", type=str, help="path to load checkpoint")
args = parser.parse_args(args)
if args.ngpu == 0 and args.dtype == "float16":
raise ValueError(
f"--dtype {args.dtype} does not support the CPU backend.")
# logging info
if args.verbose == 1:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose == 2:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
logging.info(args)
# check CUDA_VISIBLE_DEVICES
if args.ngpu > 0:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is None:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
elif args.ngpu != len(cvd.split(",")):
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
sys.exit(1)
# TODO(mn5k): support of multiple GPUs
if args.ngpu > 1:
logging.error("The program only supports ngpu=1.")
sys.exit(1)
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# seed setting
random.seed(args.seed)
np.random.seed(args.seed)
logging.info("set random seed = %d" % args.seed)
# validate rnn options
if args.rnnlm is not None and args.word_rnnlm is not None:
logging.error(
"It seems that both --rnnlm and --word-rnnlm are specified. "
"Please use either option.")
sys.exit(1)
# recog
if args.num_spkrs == 1:
if args.num_encs == 1:
# Experimental API that supports custom LMs
if args.api == "v2":
from paddlespeech.s2t.decoders.recog import recog_v2
recog_v2(args)
else:
raise ValueError("Only support --api v2")
else:
if args.api == "v2":
raise NotImplementedError(
f"--num-encs {args.num_encs} > 1 is not supported in --api v2"
)
elif args.num_spkrs == 2:
raise ValueError("asr_mix not supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 35.37234
| 88
| 0.596842
|
64214edec9794a5dd0e9c02b02a970283cbcdbb2
| 2,114
|
py
|
Python
|
main.basic.py
|
GavinPacini/cas-eth-blockchain
|
9da976ed9a9a1949e311b0277f703300191cfb81
|
[
"MIT"
] | 1
|
2021-02-26T23:47:24.000Z
|
2021-02-26T23:47:24.000Z
|
main.basic.py
|
GavinPacini/cas-eth-blockchain
|
9da976ed9a9a1949e311b0277f703300191cfb81
|
[
"MIT"
] | null | null | null |
main.basic.py
|
GavinPacini/cas-eth-blockchain
|
9da976ed9a9a1949e311b0277f703300191cfb81
|
[
"MIT"
] | null | null | null |
import hashlib
import json
import pprint
from time import time, ctime, sleep
class Blockchain(object):
def __init__(self):
self.chain = []
self.pending_transactions = []
self.new_block(previous_hash="A blockchain made for ETHZ's CAS in Applied Information Technology", nonce=100)
def new_block(self, nonce, previous_hash=None):
block = {
'index': len(self.chain) + 1,
'timestamp': ctime(time()),
'transactions': self.pending_transactions,
'nonce': nonce,
'previous_hash': previous_hash or self.hash(self.chain[-1])
}
self.pending_transactions = []
self.chain.append(block)
return block
@property
def last_block(self):
return self.chain[-1]
def new_transaction(self, sender, recipient, amount):
transaction = {
'sender': sender,
'recipient': recipient,
'amount': amount
}
self.pending_transactions.append(transaction)
return self.last_block['index'] + 1
@staticmethod
def hash(block):
string_object = json.dumps(block, sort_keys=True)
block_string = string_object.encode()
raw_hash = hashlib.sha256(block_string)
hex_hash = raw_hash.hexdigest()
return hex_hash
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# Setup a "pretty printer" to make our outputs easier to read
pp = pprint.PrettyPrinter(indent=4)
# Create an instance of our blockchain
blockchain = Blockchain()
# Add some transactions
t1 = blockchain.new_transaction("Gavin", "Gino", '10 BTC')
t2 = blockchain.new_transaction("Gavin", "Manu", '20 BTC')
# Wait, then generate a new block
sleep(5)
blockchain.new_block(12345)
t3 = blockchain.new_transaction("Gino", "Gavin", '5 BTC')
t4 = blockchain.new_transaction("Manu", "Gavin", '10 BTC')
sleep(5)
blockchain.new_block(6789)
# Print our complete blockchain
print("Full Blockchain:")
pp.pprint(blockchain.chain)
| 27.815789
| 117
| 0.634342
|
5b5192ef8528fc745e611649db4e8b9eea53e7f5
| 631
|
py
|
Python
|
allennlp/modules/similarity_functions/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 2
|
2021-04-27T19:56:28.000Z
|
2021-08-19T05:34:37.000Z
|
allennlp/modules/similarity_functions/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 5
|
2021-05-03T14:40:33.000Z
|
2021-05-03T14:40:34.000Z
|
allennlp/modules/similarity_functions/__init__.py
|
justindujardin/allennlp
|
c4559f3751775aa8bc018db417edc119d29d8051
|
[
"Apache-2.0"
] | 1
|
2021-02-04T08:42:23.000Z
|
2021-02-04T08:42:23.000Z
|
"""
A `SimilarityFunction` takes a pair of tensors with the same shape, and computes a similarity
function on the vectors in the last dimension.
"""
from allennlp.modules.similarity_functions.bilinear import BilinearSimilarity
from allennlp.modules.similarity_functions.cosine import CosineSimilarity
from allennlp.modules.similarity_functions.dot_product import DotProductSimilarity
from allennlp.modules.similarity_functions.linear import LinearSimilarity
from allennlp.modules.similarity_functions.multiheaded import MultiHeadedSimilarity
from allennlp.modules.similarity_functions.similarity_function import SimilarityFunction
| 57.363636
| 93
| 0.877971
|
5ce61944edbbd8462f5bc2df82c231292760a4ef
| 25,272
|
py
|
Python
|
tests/unit/test_exam.py
|
agossino/exam2pdf
|
bc396c5dbf030fe7c6eb84723ceafc3a3f3467e1
|
[
"MIT"
] | null | null | null |
tests/unit/test_exam.py
|
agossino/exam2pdf
|
bc396c5dbf030fe7c6eb84723ceafc3a3f3467e1
|
[
"MIT"
] | null | null | null |
tests/unit/test_exam.py
|
agossino/exam2pdf
|
bc396c5dbf030fe7c6eb84723ceafc3a3f3467e1
|
[
"MIT"
] | null | null | null |
import pytest
from pathlib import Path
import random
import exam2pdf
from exam2pdf.exam import SerializeExam
from exam2pdf.utility import ItemLevel, Exam2pdfException
def test_exam():
"""GIVEN an empty Exam
THEN questions attribute is an empty tuple
"""
ex = exam2pdf.Exam()
assert ex.questions == tuple()
def test_exam_init(question1, question2):
"""GIVEN Exam initialized with one/two questions
THEN questions attribute have the given questions
"""
ex1 = exam2pdf.Exam(question1)
ex2 = exam2pdf.Exam(question1, question2)
assert ex1.questions == (question1,)
assert ex2.questions == (question1, question2)
def test_exam_questions_add(question1, question2):
"""GIVEN an Exam
WHEN add_question two questions
THEN added questions are in questions attribute
"""
ex = exam2pdf.Exam()
ex.add_question(question1)
ex.add_question(question2)
assert question1 in ex.questions
assert question2 in ex.questions
def test_exam_questions_add_and_set(question1, question2):
"""GIVEN an Exam
WHEN a question is added
AND a tuple with one question is set
THEN attribute assignment override the added questions
"""
ex = exam2pdf.Exam()
ex.add_question(question1)
ex.questions = (question2,)
assert question1 not in ex.questions
assert question2 in ex.questions
def test_exam_laad_sequence1():
"""GIVEN an Exam
THEN Exam.laad_sequence is found set to default.
"""
ex = exam2pdf.Exam()
assert ex.load_sequence == ()
def test_exam_laad_sequence2():
"""GIVEN an Exam
AND Exam.load_sequence is set
THEN Exam.load_sequence is found set
"""
ex = exam2pdf.Exam()
expected = ("hello", "2", "times")
ex.load_sequence = (expected[0], int(expected[1]), expected[2])
assert ex.load_sequence == expected
def test_exam_show_up1():
"""GIVEN an Exam
THEN Exam.show_up is found set to default.
"""
ex = exam2pdf.Exam()
expected = exam2pdf.exam.ATTRIBUTE_SHOWED_UP
assert ex.show_up == expected
def test_exam_show_up2():
"""GIVEN an Exam
AND Exam.show_up is set
THEN Exam.show_up matches.
"""
text = "one two three"
ex = exam2pdf.Exam()
ex.show_up = text.split()
assert ex.show_up == tuple(text.split())
def test_exam_add_path_parent1(tmp_path):
"""test with a file path
"""
image = Path("images/image.png")
file_path = tmp_path / "A.txt"
file_path.touch()
q1 = exam2pdf.Question("q1 text", "")
q1.answers = (
exam2pdf.Answer("a1 text", image),
exam2pdf.Answer("a2 text", image),
)
q2 = exam2pdf.Question("q2 text", "", image)
q2.add_answer(exam2pdf.Answer("a3 text"))
ex = exam2pdf.Exam(q1, q2)
ex.add_path_parent(file_path)
assert ex.questions[0].image == Path()
assert ex.questions[0].answers[0].image == file_path.parent / image
assert ex.questions[0].answers[1].image == file_path.parent / image
assert ex.questions[1].image == file_path.parent / image
assert ex.questions[1].answers[0].image == Path()
def test_exam_add_path_parent2(tmp_path):
image = Path("images/image.png")
folder_path = tmp_path
q1 = exam2pdf.Question("q1 text", "")
q1.answers = (
exam2pdf.Answer("a1 text", image),
exam2pdf.Answer("a2 text", image),
)
q2 = exam2pdf.Question("q2 text", "", image)
q2.add_answer(exam2pdf.Answer("a3 text"))
ex = exam2pdf.Exam(q1, q2)
ex.add_path_parent(folder_path)
assert ex.questions[0].image == Path()
assert ex.questions[0].answers[0].image == folder_path / image
assert ex.questions[0].answers[1].image == folder_path / image
assert ex.questions[1].image == folder_path / image
assert ex.questions[1].answers[0].image == Path()
def test_exam_load0():
"""test empty iterable
"""
ex = exam2pdf.Exam()
ex.load(iter(()))
assert ex.questions == tuple()
def test_exam_load1():
"""test without setting _attribute_selector
2 rows -> 2 questions with 2 answers each but second answer image is not provided
"""
data = (
dict(
[
("text", "ab"),
("subject", "ac"),
("image", "ad"),
("level", "1"),
("a0 text", "ae"),
("a0 image", "af"),
("a1 text", "ag"),
]
),
dict(
[
("text", "ba"),
("subject", "bc"),
("image", "bd"),
("level", "2"),
("a0 text", "be"),
("a0 image", "bf"),
("a1 text", "bg"),
]
),
)
ex = exam2pdf.Exam()
ex.load(data)
for i in (0, 1):
assert ex.questions[i].text == data[i]["text"]
assert ex.questions[i].subject == data[i]["subject"]
assert ex.questions[i].image == Path(data[i]["image"])
assert ex.questions[i].level == int(data[i]["level"])
assert ex.questions[i].answers[0].text == data[i]["a0 text"]
assert ex.questions[i].answers[0].image == Path(data[i]["a0 image"])
assert ex.questions[i].answers[1].text == data[i]["a1 text"]
assert ex.questions[i].answers[1].image == Path() # default value
# third answer of second question is not provided
with pytest.raises(IndexError):
_ = ex.questions[1].answers[2]
# third question is not provided
with pytest.raises(IndexError):
_ = ex.questions[2]
def test_exam_load2():
"""test without setting _attribute_selector
and missing row
"""
ex = exam2pdf.Exam()
reader = (dict([]), dict([("A", "What?"), ("B", "topic")]))
ex.load(reader)
print(ex)
assert ex.questions[0].text == "What?"
assert ex.questions[0].subject == "topic"
def test_exam_load3():
"""test setting _attribute_selector
"""
data = (
dict(
[
("A text", "A"),
("B text", "B"),
("text", "T"),
("C text", "A3"),
("D text", "A4"),
("subject", "S"),
("level", 2),
("void", ""),
]
),
)
ex = exam2pdf.Exam()
ex.load_sequence = (
"text",
"subject",
"void",
"level",
"A text",
"void",
"B text",
"void",
"C text",
)
ex.load(data)
assert ex.questions[0].text == data[0]["text"]
assert ex.questions[0].subject == data[0]["subject"]
assert ex.questions[0].image == Path()
assert ex.questions[0].level == data[0]["level"]
assert ex.questions[0].answers[0].text == data[0]["A text"]
assert ex.questions[0].answers[0].image == Path()
assert ex.questions[0].answers[1].text == data[0]["B text"]
assert ex.questions[0].answers[1].image == Path()
assert ex.questions[0].answers[2].text == data[0]["C text"]
assert ex.questions[0].answers[2].image == Path()
# no further elements loaded
with pytest.raises(IndexError):
_ = ex.questions[0].answers[3]
with pytest.raises(IndexError):
_ = ex.questions[1].answers[2]
def test_exam_load4():
"""test setting _attribute_selector
"""
data = (
dict([("text", "T"), ("subject", "S"), ("XXX level", 2), ("void", "")]),
)
ex = exam2pdf.Exam()
ex.load_sequence = ("text", "subject", "void", "level")
with pytest.raises(exam2pdf.Exam2pdfException):
ex.load(data)
def test_shuffle():
data = (
dict(
[
("question", " Q1"),
("A", "A1"),
("B", "B1"),
("C", "C1"),
("D", "D1"),
("E", "E1"),
("void", ""),
]
),
dict(
[
("question", "Q2"),
("A", "A2"),
("B", "B2"),
("C", "C2"),
("D", "D2"),
("E", "E2"),
("void", ""),
]
),
)
correct_values = ("D", "A")
ex = exam2pdf.Exam()
ex.load_sequence = (
"question",
"void",
"void",
"void",
"A",
"void",
"B",
"void",
"C",
"void",
"D",
"void",
"E",
)
ex.load(data)
random.seed(1)
ex.answers_shuffle()
for question, value in zip(ex.questions, correct_values):
assert question.correct_option == value
def test_questions_shuffle(dummy_exam):
"""GIVEN exam with five questions
WHEN questions_shuffle is called (questions order is mixed)
THEN questions order is changed
"""
expected_text = ("q3 text", "q4 text", "q5 text", "q1 text", "q2 text")
ex = dummy_exam
random.seed(1)
ex.questions_shuffle()
for i, question in enumerate(ex.questions):
assert question.text == expected_text[i]
def test_exam_print():
data = (
dict(
[
("field A", "A1"),
("field B", "A2"),
("field C", "T"),
("field D", "A3"),
("field E", "A4"),
("field F", "S"),
("field G", 2),
("void", ""),
]
),
)
text, q_image, level, a_image = (
f"text: A1",
f"image: .",
f"level: 2",
f"image: S",
)
ex = exam2pdf.Exam()
ex.load_sequence = ("field A", "void", "void", "field G", "void", "field F")
ex.load(data)
assert text in ex.__str__()
assert q_image in ex.__str__()
assert level in ex.__str__()
assert a_image in ex.__str__()
def test_exam_question():
question1 = exam2pdf.Question("mc quest1 text", "subject")
question1.answers = (
exam2pdf.Answer("Q1 A1"),
exam2pdf.Answer("Q1 A2"),
exam2pdf.Answer("Q1 A3"),
)
question2 = exam2pdf.Question("mc quest2 text", "subject")
question2.answers = (
exam2pdf.Answer("Q2 A1"),
exam2pdf.Answer("Q2 A2"),
exam2pdf.Answer("Q2 A3"),
)
ex = exam2pdf.Exam(question1, question2)
assert ex.questions[0].answers[1].image == Path()
assert ex.questions[0].correct_answer.text == "Q1 A1"
assert ex.questions[1].text == "mc quest2 text"
def test_exam_truefalse_question():
question1 = exam2pdf.TrueFalseQuest("mc quest1 text", "subject")
question1.answers = (
exam2pdf.TrueFalseAnswer(True),
exam2pdf.TrueFalseAnswer(False),
)
question2 = exam2pdf.Question("mc quest2 text", "subject")
question2.answers = (
exam2pdf.TrueFalseAnswer(False),
exam2pdf.TrueFalseAnswer(True),
)
ex = exam2pdf.Exam(question1, question2)
assert ex.questions[0].answers[1].image == Path()
assert ex.questions[0].correct_answer.boolean is True
assert ex.questions[1].text == "mc quest2 text"
assert ex.questions[1].correct_answer.text == "Falso"
def test_exam_mix_question():
question = exam2pdf.Question("mc quest1 text", "subject")
question.answers = (
exam2pdf.Answer("Q1 A1"),
exam2pdf.Answer("Q1 A2"),
exam2pdf.Answer("Q1 A3"),
)
truefalse_quest = exam2pdf.TrueFalseQuest("mc quest2 text", "subject")
truefalse_quest.answers = (
exam2pdf.TrueFalseAnswer(False),
exam2pdf.TrueFalseAnswer(True),
)
ex = exam2pdf.Exam(question, truefalse_quest)
assert ex.questions[0].answers[1].image == Path()
assert ex.questions[0].correct_option == "A"
assert ex.questions[1].text == "mc quest2 text"
assert ex.questions[1].correct_answer.text == "Falso"
def test_from_csv_empty_file(empty_file):
"""GIVEN an empty csv file
WHEN it tries to read
THEN exception is raised
"""
ex = exam2pdf.Exam()
with pytest.raises(exam2pdf.Exam2pdfException):
ex.from_csv(empty_file)
def test_from_csv_no_question(no_question_file):
"""GIVEN a csv file without question
WHEN it tries to read
THEN exception is raised
"""
ex = exam2pdf.Exam()
with pytest.raises(exam2pdf.Exam2pdfException):
ex.from_csv(no_question_file)
def test_from_csv_different_encodings(files_with_different_encoding):
"""GIVEN csv files with different encodings
WHEN they are read
THEN it does not fail
"""
ex = exam2pdf.Exam()
for file_path in files_with_different_encoding:
ex.from_csv(file_path)
def test_from_csv_one_question(tmp_path, question_data_file):
"""GIVEN a csv file with one multi choice
question and three answers with images
WHEN it is read
THEN a sample of correct information are found"""
ex = exam2pdf.Exam()
ex.from_csv(question_data_file)
assert len(ex.questions) == 1
assert ex.questions[0].text == "Q"
assert len(ex.questions[0].answers) == 3
assert ex.questions[0].answers[2].image == tmp_path / "ci"
def test_from_csv_one_truefalse_question(truefalse_question_file):
"""GIVEN a csv file with one truefalse question
WHEN it is read
THEN it recognized as truefalse because False option is found"""
ex = exam2pdf.Exam()
ex.load_sequence = ("question", "void", "void", "void", "A", "void", "B")
ex.from_csv(truefalse_question_file)
assert ex.questions[0].correct_option == "Falso"
def test_from_csv_kwargs(weired_csv_file):
"""GIVEN a csv file
WHEN a legitimate keyword argument for DictReader is used as from_csv
THEN keyword argument is correctly applied"""
fieldnames = (
"question",
"subject",
"image",
"level",
"A",
"Ai",
"B",
"Bi",
"C",
"Ci",
)
ex = exam2pdf.Exam()
ex.from_csv(weired_csv_file, fieldnames=fieldnames, delimiter=";")
assert ex.questions[0].text == "Q"
assert ex.questions[0].level == 1
def test_copy_exam(dummy_exam):
"""GIVEN an exam
WHEN a copy is made
THEN the new one is identical"""
ex = dummy_exam
new_ex = ex.copy()
for ex_question, new_ex_question in zip(ex.questions, new_ex.questions):
assert ex_question.text == new_ex_question.text
assert ex_question.level == new_ex_question.level
assert ex_question.correct_index == new_ex_question.correct_index
assert ex_question.correct_option == new_ex_question.correct_option
for ex_answer, new_ex_answer in zip(
ex_question.answers, new_ex_question.answers
):
assert ex_answer.text == new_ex_answer.text
assert ex_answer.image == new_ex_answer.image
def test_copy_exam_add_question(dummy_exam):
"""GIVEN a exam copy
WHEN a question is added to the copy
THEN the original number of questions does not change"""
ex = dummy_exam
ex_questions_len = len(ex.questions)
new_ex = ex.copy()
new_ex.add_question(exam2pdf.Question("new"))
assert len(ex.questions) == ex_questions_len
def test_copy_mix_exam_add_question(mix_dummy_exam):
ex = mix_dummy_exam
ex_questions_len = len(ex.questions)
new_ex = ex.copy()
new_ex.add_question(exam2pdf.Question("new"))
assert len(ex.questions) == ex_questions_len
def test_copy_exam_add_answer(dummy_exam):
ex = dummy_exam
question_1_answers_len = len(ex.questions[0].answers)
new_ex = ex.copy()
new_ex.questions[0].add_answer(exam2pdf.Answer("q1 a3"))
assert len(ex.questions[0].answers) == question_1_answers_len
def test_copy_exam_set_correct_answer(dummy_exam):
ex = dummy_exam
question_1_correct_index = ex.questions[1].correct_index
new_ex = ex.copy()
new_ex.questions[1].correct_index = question_1_correct_index + 1
assert ex.questions[1].correct_index == question_1_correct_index
def test_copy_exam_shuffle_answers(dummy_exam):
ex = dummy_exam
ex_correct_answers = tuple(
question.correct_index for question in ex.questions
)
new_ex = ex.copy()
new_ex.answers_shuffle()
assert (
tuple(question.correct_index for question in ex.questions)
== ex_correct_answers
)
def test_copy_exam_shuffle_questions(dummy_exam):
ex = dummy_exam
ex_questions = tuple(question.text for question in ex.questions)
new_ex = ex.copy()
new_ex.questions_shuffle()
assert tuple(question.text for question in ex.questions) == ex_questions
# TODO is the right behaviour print an empty pdf?
def test_print_exam(tmp_path):
"""GIVEN an empty Exam instance
WHEN print_exam is called
THEN an empty pdf file is saved"""
pdf_magic_no = b"PDF"
file_path = tmp_path / "Exam"
ex = exam2pdf.Exam()
ex.print_exam(file_path)
try:
data = file_path.read_bytes()
except FileNotFoundError:
assert False, "File not found"
assert data.find(pdf_magic_no) == 1
def test_print_one_exam(tmp_path, dummy_exam_with_img):
"""GIVEN an Exam instance with images
WHEN print_exam is called
THEN a pdf file is saved"""
pdf_magic_no = b"PDF"
file_path = tmp_path / "Exam.pdf"
ex = dummy_exam_with_img
ex.print_exam(file_path)
try:
data = file_path.read_bytes()
except FileNotFoundError:
assert False, "File not found"
assert data.find(pdf_magic_no) == 1
def test_print_exam_without_img_questions(
tmp_path, dummy_exam_questions_without_img
):
"""GIVEN an Exam which question images are not found
WHER print_exam is called
THEN Exception is risen
"""
file_path = tmp_path / "Exam.pdf"
ex = dummy_exam_questions_without_img
with pytest.raises(Exam2pdfException):
ex.print_exam(file_path)
def test_print_exam_without_img_answers(
tmp_path, dummy_exam_answers_without_img
):
"""GIVEN an Exam which answer images are not found
WHEN print_exam is called
THEN Exception is risen
"""
file_path = tmp_path / "Exam.pdf"
ex = dummy_exam_answers_without_img
with pytest.raises(Exam2pdfException):
ex.print_exam(file_path)
def test_print_exam_without_permission(tmp_path, no_write_permission_dir):
"""GIVEN an Exam
WHEN user has no permission to write in the directory
AND print_exam is called
THEN Exception is risen
"""
file_path = no_write_permission_dir / "exam.pdf"
ex = exam2pdf.Exam()
with pytest.raises(Exam2pdfException):
ex.print_exam(file_path)
def test_print_checker_before_exam(tmp_path, dummy_exam_with_img):
"""GIVEN an Exam
WHEN print_checker is called before any print_exam call
THEN Exception is risen
"""
file_path = tmp_path / "Checker.pdf"
ex = dummy_exam_with_img
with pytest.raises(Exam2pdfException):
ex.print_checker(file_path)
def test_print_checker_2calls(tmp_path, dummy_exam_with_img):
"""GIVEN an Exam instance with images
WHEN print_exam is first called, then print_checker
THEN a checker pdf file is saved"""
pdf_magic_no = b"PDF"
exam_file_path = tmp_path / "Exam.pdf"
checker_file_path = tmp_path / "Checker.pdf"
ex = dummy_exam_with_img
ex.print_exam(exam_file_path)
ex.print_checker(checker_file_path)
try:
data = checker_file_path.read_bytes()
except FileNotFoundError:
assert False, "File not found"
assert data.find(pdf_magic_no) == 1
def test_print_checker_1call(tmp_path, dummy_exam_with_img):
"""GIVEN an Exam instance with images
WHEN print is called
THEN exam and checker pdf file are saved"""
pdf_magic_no = b"PDF"
exam_file_path = tmp_path / "Exam.pdf"
checker_file_path = tmp_path / "Checker.pdf"
ex = dummy_exam_with_img
ex.print(exam_file_path, checker_file_path)
try:
exam_data = exam_file_path.read_bytes()
checker_data = checker_file_path.read_bytes()
except FileNotFoundError:
assert False, "File not found"
assert exam_data.find(pdf_magic_no) == 1
assert checker_data.find(pdf_magic_no) == 1
def test_print_two_exams(tmp_path, dummy_exam_with_img):
pdf_magic_no = b"PDF"
file_path = tmp_path / "Exam.pdf"
ex = dummy_exam_with_img
n_copies = 2
ex.print_exam(file_path, n_copies=n_copies)
for num in range(1, n_copies + 1):
out_file = (
tmp_path / f"{file_path.stem}_{num}_{n_copies}{file_path.suffix}"
)
try:
data = out_file.read_bytes()
except FileNotFoundError:
assert False, "File not found"
assert data.find(pdf_magic_no) == 1
def test_print_top_item_style(tmp_path, dummy_exam_with_img):
pdf_magic_no = b"PDF"
file_path = tmp_path / "Exam.pdf"
ex = dummy_exam_with_img
ex.top_item_style = {
"top_item_style": {"fontName": "Helvetica", "fontSize": 14}
}
ex.print_exam(file_path)
try:
data = file_path.read_bytes()
except FileNotFoundError:
assert False, "File not found"
assert data.find(pdf_magic_no) == 1
@pytest.mark.interactive
def test_have_a_look(have_a_look, is_correct):
"""GIVEN a pdf file with some not shuffled question and a correction file
WHEN they are displayed
THEN is the layout correct?
"""
answer = is_correct
assert answer == "y\n"
def test_serialize_empty():
ex = exam2pdf.Exam()
serial = SerializeExam(ex)
assert list(serial.assignment()) == []
assert list(serial.checker()) == []
def test_serialize_assignment(dummy_exam):
ex = dummy_exam
serial = SerializeExam(ex)
expected_sequence = [
"q1 text",
"q1 a1",
"q1 a2",
"q2 text",
"q2 a1",
"q2 a2",
"q3 text",
"q4 text",
"q5 text",
]
expected_sequence.reverse()
for item in serial.assignment():
assert item.text == expected_sequence.pop()
def test_serialize_assignment_shuffle_sub(mix_dummy_exam):
"""GIVEN an Exam with mixed questions
WHEN exam is serialized with answers shuffled (sub item), but questions not
THEN answers are found shuffled and questions in original sequence"""
ex = mix_dummy_exam
serial = SerializeExam(ex, shuffle_sub=True)
random.seed(0)
expected_top_sequence = ["1", "2", "3", "4", "5", "6"]
expected_top_sequence.reverse()
expected_sub_sequence = [
"1",
"3",
"2",
"3",
"2",
"1",
"Vero",
"Falso",
"Vero",
"Falso",
"1",
"3",
"2",
"4",
]
expected_sub_sequence.reverse()
for item in serial.assignment():
if item.item_level == ItemLevel.top:
assert expected_top_sequence.pop() in item.text
if item.item_level == ItemLevel.sub:
assert expected_sub_sequence.pop() in item.text
def test_serialize_assignment_shuffle_top(mix_dummy_exam):
"""GIVEN an Exam with mixed questions
WHEN exam is serialized with questions shuffled (top item), but answers not
THEN questions are found shuffled and answers, in the shuffled questions, in original sequence"""
ex = mix_dummy_exam
serial = SerializeExam(ex, shuffle_item=True)
random.seed(0)
expected_top_sequence = ["5", "3", "2", "1", "6", "4"]
expected_top_sequence.reverse()
expected_sub_sequence = [
"Falso",
"Vero",
"Vero",
"Falso",
"1",
"2",
"3",
"1",
"2",
"3",
"1",
"2",
"3",
"4",
]
expected_sub_sequence.reverse()
for item in serial.assignment():
if item.item_level == ItemLevel.top:
assert expected_top_sequence.pop() in item.text
if item.item_level == ItemLevel.sub:
assert expected_sub_sequence.pop() in item.text
def test_serialize_assignment_shuffle_top_n_copies(dummy_exam):
ex = dummy_exam
n_copies = 3
serial = SerializeExam(ex, shuffle_item=True)
random.seed(0)
expected_top_sequence = [
"3",
"2",
"1",
"5",
"4",
"1",
"3",
"2",
"4",
"5",
"2",
"1",
"5",
"3",
"4",
]
expected_top_sequence.reverse()
for _ in range(n_copies):
for item in serial.assignment():
if item.item_level == ItemLevel.top:
assert expected_top_sequence.pop() in item.text
def test_serialize_correction_one_copy(dummy_exam):
ex = dummy_exam
serial = SerializeExam(ex)
for _1 in serial.assignment():
pass
correction = serial.checker()
item = next(correction)
assert item.item_level == ItemLevel.top
assert "correttore" in item.text
assert "1/1" in item.text
def test_serialize_correction_n_copies(dummy_exam):
ex = dummy_exam
n_copies = 4
expected_num_sequence = list(range(n_copies, 0, -1))
serial = SerializeExam(ex)
for _ in range(n_copies):
for _ in serial.assignment():
pass
for item in serial.checker():
if item.item_level == ItemLevel.top:
assert f"{expected_num_sequence.pop()}/{n_copies}" in item.text
| 27.559433
| 101
| 0.61301
|
38cc6c6c0e652518c4d8e74b11553929111f6b42
| 836
|
py
|
Python
|
assetfactory/images/2021/08/10/results-js-and-go-speedup.py
|
reinhrst/reinhrst.github.io
|
3e9dce26c923fca54589ffd1d19d56af0dd27910
|
[
"CC0-1.0"
] | null | null | null |
assetfactory/images/2021/08/10/results-js-and-go-speedup.py
|
reinhrst/reinhrst.github.io
|
3e9dce26c923fca54589ffd1d19d56af0dd27910
|
[
"CC0-1.0"
] | 6
|
2021-07-01T19:35:47.000Z
|
2022-02-06T10:30:35.000Z
|
assetfactory/images/2021/08/10/results-js-and-go-speedup.py
|
reinhrst/reinhrst.github.io
|
3e9dce26c923fca54589ffd1d19d56af0dd27910
|
[
"CC0-1.0"
] | 1
|
2021-08-11T22:46:47.000Z
|
2021-08-11T22:46:47.000Z
|
DATA = [
("Load JS/WebAssembly", 2, 2, 2, 2, 2),
("Load /tmp/lines.txt", 225, 222, 218, 209, 202),
("From JS new Fzf() until ready to ....",
7825, 8548, 1579, 2592, 15069),
("Calling fzf-lib's fzf.New()", 1255, 3121, 963, 612, 899),
("return from fzfNew() function", 358, 7, 0, 18, 1),
("search() until library has result", 4235, 1394, 12132, 4069, 11805),
("Returning search result to JS callback", 1908, 1378, 416, 1173, 6400),
]
def create_plot(ax):
labels = ["Go", "TinyGo", "GopherJS", "Go with JSON", "GopherJS with JSON"]
bottoms = [0, 0, 0, 0, 0]
for row in DATA:
ax.bar(labels, row[1:], label=row[0], bottom=bottoms)
bottoms = [bottoms[i] + row[1:][i] for i in range(len(bottoms))]
ax.set_ylabel("Time (ms)")
ax.set_ylim([0, 45000])
ax.legend(ncol=2)
| 36.347826
| 79
| 0.578947
|
4a8c6a2bfc7916a443312b39b6d4edb62fa41ca0
| 1,670
|
py
|
Python
|
example_usage_script.py
|
defra-data/EODS-API
|
5017e8c6080e84f1aa0d286eda96394fadd13015
|
[
"MIT"
] | 3
|
2021-06-14T09:06:18.000Z
|
2021-08-19T13:50:49.000Z
|
example_usage_script.py
|
defra-data/EODS-API
|
5017e8c6080e84f1aa0d286eda96394fadd13015
|
[
"MIT"
] | 7
|
2021-07-30T13:24:36.000Z
|
2021-11-16T14:02:20.000Z
|
example_usage_script.py
|
defra-data/EODS-API
|
5017e8c6080e84f1aa0d286eda96394fadd13015
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import eodslib
from datetime import datetime
from pathlib import Path
from dotenv import load_dotenv
import os
if __name__ == "__main__":
start_time = datetime.utcnow()
# USER MUST EDIT THE ENVIRONMENT FILE REFERENCED BELOW, OR CREATE THEIR OWN FILE AND REFERENCE IT
load_dotenv('sample.env')
# set configuration based on contents of the ENVIRONMENT FILE.
conn = {
'domain': os.getenv("HOST"),
'username': os.getenv("API_USER"),
'access_token': os.getenv("API_TOKEN"),
}
# use default path to local "output" directory
output_dir = eodslib.make_output_dir(Path.cwd() / 'output')
# specify a particular ARD to download using 'title' keyword
eods_params = {
'output_dir':output_dir,
'find_least_cloud': True,
'sat_id': 2
}
list_of_layers, df = eodslib.query_catalog(conn, **eods_params)
# list_of_results = []
"""for lyr in list_of_layers:
config_wpsprocess = {'template_xml':'gsdownload_template.xml',
'xml_config':{
'template_layer_name':lyr,
'template_outputformat':'image/tiff',
'template_mimetype':'application/zip'
},
'dl_bool':True
}
execution_dict = eodslib.run_wps(conn, config_wpsprocess, output_dir=output_dir)"""
#list_of_results.append(execution_dict)
#eodslib.output_log(list_of_results)
time_diff_mins = round((datetime.utcnow() - start_time).total_seconds() / 60,2)
print('\n\t### Total processing time (mins) = ' + str(time_diff_mins))
print('\t### Script finished')
| 29.821429
| 101
| 0.64012
|
8da6b4d04399947b1c0a0a0b4bc48d8fafc68ae4
| 1,715
|
py
|
Python
|
shop/views.py
|
FrankCasanova/onlineshop
|
1a9011ce3d49976e2584cdadc33893d04947a73b
|
[
"MIT"
] | null | null | null |
shop/views.py
|
FrankCasanova/onlineshop
|
1a9011ce3d49976e2584cdadc33893d04947a73b
|
[
"MIT"
] | null | null | null |
shop/views.py
|
FrankCasanova/onlineshop
|
1a9011ce3d49976e2584cdadc33893d04947a73b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import get_object_or_404, render
from .models import Category, Product
from cart.forms import CartAddProductForm
from .recommender import Recommender
# Create your views here.
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
language = request.LANGUAGE_CODE
category = get_object_or_404(Category,
translations__language_code=language,
translations__slug=category_slug)
products = products.filter(category=category)
return render(request,
template_name='shop/product/list.html',
context={
'category': category,
'products': products,
'categories': categories,
})
def product_detail(request, id, slug):
language = request.LANGUAGE_CODE
product = get_object_or_404(Product,
id=id,
translations__language_code=language,
translations__slug=slug,
available=True)
cart_product_form = CartAddProductForm()
r = Recommender()
recommended_products = r.suggest_products_for([product], 4)
return render(request,
template_name='shop/product/detail.html',
context={
'product': product,
'cart_product_form': cart_product_form,
'recommended_products': recommended_products
})
| 36.489362
| 74
| 0.573761
|
45b4abde4550d67ec7c610ad77a34e5a4e90ce01
| 113
|
py
|
Python
|
tests/update_test_files.py
|
JNDanielson/mplstereonet
|
6196e3fd8fff5b2868f50dbcc96eef804024f62e
|
[
"MIT"
] | 120
|
2015-07-09T21:18:39.000Z
|
2022-03-10T14:29:02.000Z
|
tests/update_test_files.py
|
JNDanielson/mplstereonet
|
6196e3fd8fff5b2868f50dbcc96eef804024f62e
|
[
"MIT"
] | 32
|
2015-01-09T21:52:30.000Z
|
2021-12-15T20:53:37.000Z
|
tests/update_test_files.py
|
JNDanielson/mplstereonet
|
6196e3fd8fff5b2868f50dbcc96eef804024f62e
|
[
"MIT"
] | 49
|
2015-02-21T21:55:05.000Z
|
2021-09-27T12:13:29.000Z
|
#! /usr/bin/python
import sys
import examples
for filename in sys.argv[1:]:
examples.save_output(filename)
| 14.125
| 34
| 0.734513
|
36196486e81a607bef1beedaf94fa7bc7ab5ff6b
| 9,853
|
py
|
Python
|
src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_utils.py
|
henrypan/azure-cli
|
8de0ab5216ed3dc700546ae9a3c485710322376b
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_utils.py
|
henrypan/azure-cli
|
8de0ab5216ed3dc700546ae9a3c485710322376b
|
[
"MIT"
] | 2
|
2021-03-25T21:38:56.000Z
|
2021-11-15T17:46:45.000Z
|
src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_utils.py
|
Visual-Studio-China/azure-cli-int
|
48c7c7f371a0ecc4ebfd4dcfdc72764beddf5c31
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.util import CLIError
from azure.cli.core.commands.parameters import get_resources_in_subscription
from ._constants import (
ACR_RESOURCE_PROVIDER,
ACR_RESOURCE_TYPE,
STORAGE_RESOURCE_TYPE
)
from ._factory import (
get_arm_service_client,
get_storage_service_client,
get_acr_service_client,
get_acr_api_version
)
def _arm_get_resource_by_name(resource_name, resource_type):
'''Returns the ARM resource in the current subscription with resource_name.
:param str resource_name: The name of resource
:param str resource_type: The type of resource
'''
result = get_resources_in_subscription(resource_type)
elements = [item for item in result if item.name.lower() == resource_name.lower()]
if len(elements) == 0:
raise CLIError(
'No resource with type {} can be found with name: {}'.format(
resource_type, resource_name))
elif len(elements) == 1:
return elements[0]
else:
raise CLIError(
'More than one resources with type {} are found with name: {}'.format(
resource_type, resource_name))
def get_resource_group_name_by_resource_id(resource_id):
'''Returns the resource group name from parsing the resource id.
:param str resource_id: The resource id
'''
resource_id = resource_id.lower()
resource_group_keyword = '/resourcegroups/'
return resource_id[resource_id.index(resource_group_keyword) + len(resource_group_keyword):
resource_id.index('/providers/')]
def get_resource_group_name_by_registry_name(registry_name):
'''Returns the resource group name for the container registry.
:param str registry_name: The name of container registry
'''
arm_resource = _arm_get_resource_by_name(registry_name, ACR_RESOURCE_TYPE)
return get_resource_group_name_by_resource_id(arm_resource.id)
def get_resource_group_name_by_storage_account_name(storage_account_name):
'''Returns the resource group name for the storage account.
:param str storage_account_name: The name of storage account
'''
arm_resource = _arm_get_resource_by_name(storage_account_name, STORAGE_RESOURCE_TYPE)
return get_resource_group_name_by_resource_id(arm_resource.id)
def get_registry_by_name(registry_name, resource_group_name=None):
'''Returns a tuple of Registry object and resource group name.
:param str registry_name: The name of container registry
:param str resource_group_name: The name of resource group
'''
if resource_group_name is None:
resource_group_name = get_resource_group_name_by_registry_name(registry_name)
client = get_acr_service_client().registries
return client.get(resource_group_name, registry_name), resource_group_name
def get_access_key_by_storage_account_name(storage_account_name, resource_group_name=None):
'''Returns access key for the storage account.
:param str storage_account_name: The name of storage account
:param str resource_group_name: The name of resource group
'''
if resource_group_name is None:
resource_group_name = get_resource_group_name_by_storage_account_name(storage_account_name)
client = get_storage_service_client().storage_accounts
return client.list_keys(resource_group_name, storage_account_name).keys[0].value #pylint: disable=no-member
def arm_deploy_template_new_storage(resource_group_name, #pylint: disable=too-many-arguments
registry_name,
location,
sku,
storage_account_name,
admin_user_enabled,
deployment_name=None):
'''Deploys ARM template to create a container registry with a new storage account.
:param str resource_group_name: The name of resource group
:param str registry_name: The name of container registry
:param str location: The name of location
:param str sku: The SKU of the container registry
:param str storage_account_name: The name of storage account
:param bool admin_user_enabled: Enable admin user
:param str deployment_name: The name of the deployment
'''
from azure.mgmt.resource.resources.models import DeploymentProperties
from azure.cli.core.util import get_file_json
import os
parameters = _parameters(
registry_name=registry_name,
location=location,
sku=sku,
admin_user_enabled=admin_user_enabled,
storage_account_name=storage_account_name)
file_path = os.path.join(os.path.dirname(__file__), 'template.json')
template = get_file_json(file_path)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
return _arm_deploy_template(
get_arm_service_client().deployments, resource_group_name, deployment_name, properties)
def arm_deploy_template_existing_storage(resource_group_name, #pylint: disable=too-many-arguments
registry_name,
location,
sku,
storage_account_name,
admin_user_enabled,
deployment_name=None):
'''Deploys ARM template to create a container registry with an existing storage account.
:param str resource_group_name: The name of resource group
:param str registry_name: The name of container registry
:param str location: The name of location
:param str sku: The SKU of the container registry
:param str storage_account_name: The name of storage account
:param bool admin_user_enabled: Enable admin user
:param str deployment_name: The name of the deployment
'''
from azure.mgmt.resource.resources.models import DeploymentProperties
from azure.cli.core.util import get_file_json
import os
storage_account_resource_group = \
get_resource_group_name_by_storage_account_name(storage_account_name)
parameters = _parameters(
registry_name=registry_name,
location=location,
sku=sku,
admin_user_enabled=admin_user_enabled,
storage_account_name=storage_account_name,
storage_account_resource_group=storage_account_resource_group)
file_path = os.path.join(os.path.dirname(__file__), 'template_existing_storage.json')
template = get_file_json(file_path)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
return _arm_deploy_template(
get_arm_service_client().deployments, resource_group_name, deployment_name, properties)
def _arm_deploy_template(deployments_client,
resource_group_name,
deployment_name,
properties):
'''Deploys ARM template to create a container registry.
:param obj deployments_client: ARM deployments service client
:param str resource_group_name: The name of resource group
:param str deployment_name: The name of the deployment
:param DeploymentProperties properties: The properties of a deployment
'''
if deployment_name is None:
import random
deployment_name = '{0}_{1}'.format(ACR_RESOURCE_PROVIDER, random.randint(100, 800))
return deployments_client.create_or_update(resource_group_name, deployment_name, properties)
def _parameters(registry_name, #pylint: disable=too-many-arguments
location,
sku,
admin_user_enabled,
storage_account_name,
storage_account_resource_group=None):
'''Returns a dict of deployment parameters.
:param str registry_name: The name of container registry
:param str location: The name of location
:param str sku: The SKU of the container registry
:param bool admin_user_enabled: Enable admin user
:param str storage_account_name: The name of storage account
:param str storage_account_resource_group: The resource group of storage account
'''
parameters = {
'registryName': {'value': registry_name},
'registryLocation': {'value': location},
'registrySku': {'value': sku},
'adminUserEnabled': {'value': admin_user_enabled},
'storageAccountName': {'value': storage_account_name}
}
customized_api_version = get_acr_api_version()
if customized_api_version:
parameters['registryApiVersion'] = {'value': customized_api_version}
if storage_account_resource_group:
parameters['storageAccountResourceGroup'] = {'value': storage_account_resource_group}
return parameters
def random_storage_account_name(registry_name):
from datetime import datetime
client = get_storage_service_client().storage_accounts
prefix = registry_name[:18].lower()
while True:
time_stamp_suffix = datetime.utcnow().strftime('%H%M%S')
storage_account_name = ''.join([prefix, time_stamp_suffix])[:24]
if client.check_name_availability(storage_account_name).name_available: #pylint: disable=no-member
return storage_account_name
def get_location_from_resource_group(resource_group_name):
group = get_arm_service_client().resource_groups.get(resource_group_name)
return group.location #pylint: disable=no-member
| 45.197248
| 111
| 0.699076
|
b91308d534ae636e348d3253a1c51d864b34e3ad
| 895
|
py
|
Python
|
user_post/migrations/0002_post.py
|
anirvansen/graphql_in_python
|
f7ec3709123ce481719147cafac70070c0eb0628
|
[
"MIT"
] | null | null | null |
user_post/migrations/0002_post.py
|
anirvansen/graphql_in_python
|
f7ec3709123ce481719147cafac70070c0eb0628
|
[
"MIT"
] | null | null | null |
user_post/migrations/0002_post.py
|
anirvansen/graphql_in_python
|
f7ec3709123ce481719147cafac70070c0eb0628
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-22 12:24
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('user_post', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('last_updated', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='user_post.user')),
],
),
]
| 33.148148
| 117
| 0.610056
|
56aafe048891eb00a525d23f707fe2a206da75f1
| 2,003
|
py
|
Python
|
sarathi.py
|
filius-fall/sarathi
|
126a693d91d9bd70872a723f6f67bf445519d707
|
[
"MIT"
] | 3
|
2020-11-28T21:51:10.000Z
|
2021-01-26T09:04:57.000Z
|
sarathi.py
|
filius-fall/sarathi
|
126a693d91d9bd70872a723f6f67bf445519d707
|
[
"MIT"
] | 12
|
2021-04-10T13:04:40.000Z
|
2021-04-18T13:25:54.000Z
|
sarathi.py
|
filius-fall/sarathi
|
126a693d91d9bd70872a723f6f67bf445519d707
|
[
"MIT"
] | 2
|
2021-04-10T13:09:32.000Z
|
2021-04-13T15:56:31.000Z
|
"""Sarathi - A discord bot to steer through the battlefield of knowledge"""
import sys
import os
import discord
from discord.ext import commands
from dotenv import load_dotenv
import til
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
GUILD = os.getenv("DISCORD_GUILD")
bot = commands.Bot(
command_prefix="/",
description="A small bot to help me manage my knowledge base on my blog.",
case_insensitive=True,
)
@bot.event
async def on_ready():
"""Behaviour when ready"""
guild = discord.utils.find(lambda g: g.name == GUILD, bot.guilds)
sys.stdout.write(
f'{bot.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})\n'
)
members = '\n - '.join([member.name for member in guild.members])
sys.stdout.write(f'Guild Members:\n - {members}\n')
@bot.event
async def on_member_join(member):
await member.create_dm()
await member.dm_channel.send(
f'Hi {member.name}, welcome to my Discord server!'
)
@bot.command(
name="til",
help=(
"A command to help manage the today-i-learned database of my blog. "
"Use as `/til add <input>` or, `/til find <topic>` or `/til <input>`."
))
async def today_i_learned(ctx, *query):
"""Today I Learned"""
await ctx.send("Processing...")
response = til.process_query(*query)
if isinstance(response, str):
await ctx.send(response)
elif isinstance(response, list):
for item in response:
if isinstance(item, discord.Embed):
await ctx.send(embed=item)
else:
await ctx.send(item)
@bot.event
async def on_error(event, *args, **kwargs):
with open('err.log', 'a') as f:
if event == 'on_message':
f.write(f'Unhandled message: {args[0]}\n')
else:
raise Exception(
"Error encountered: {} x {} x {}".format(event, args, kwargs))
def main():
bot.run(TOKEN)
if __name__ == "__main__":
main()
| 25.0375
| 78
| 0.620569
|
6012024e22aa4e05e7ace771b30a40d8a6fe12d6
| 1,377
|
py
|
Python
|
adminmgr/media/code/A3/task1/BD_85_130_185_279_XNvO6Z1.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9
|
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/A3/task1/BD_85_130_185_279_XNvO6Z1.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6
|
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/A3/task1/BD_85_130_185_279_9ab4wge.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4
|
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.types import StructType
spark = SparkSession \
.builder \
.appName("StructuredNetworkWordCount") \
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to localhost:9999
#("ID","language","Date","source","len","likes","RTs","Hashtags","Usernames","Userid","name","Place","followers","friends")
userSchema = StructType().add("ID", "string").add("language", "string").add("Date", "string").add("source", "string").add("len", "string").add("likes", "string").add("RTs", "string").add("Hashtags", "string").add("Usernames", "string").add("Userid", "string").add("name", "string").add("Place", "string").add("followers", "string").add("friends", "string")
csvDF = spark \
.readStream \
.option("sep", ";") \
.schema(userSchema) \
.csv('Stream')
#hCounts = csvDF.groupBy("Hashtags").count().orderBy("count", ascending=0)
csvDF.createOrReplaceTempView("updates")
hCounts=spark.sql("select Hashtags,count(*) as count from updates group by Hashtags order by count desc LIMIT 5")
query = hCounts \
.writeStream \
.outputMode("complete") \
.format("console") \
.option("numRows",'5') \
.start()
query.awaitTermination(100)
query.stop()
| 44.419355
| 356
| 0.691358
|
5ba1f4371b7ea2632dcfd691bc4683cd15cdf4b1
| 11,114
|
py
|
Python
|
src/oci/service_catalog/models/application_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/service_catalog/models/application_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/service_catalog/models/application_summary.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ApplicationSummary(object):
"""
The model for summary of an application in service catalog.
"""
#: A constant which can be used with the pricing_type property of a ApplicationSummary.
#: This constant has a value of "FREE"
PRICING_TYPE_FREE = "FREE"
#: A constant which can be used with the pricing_type property of a ApplicationSummary.
#: This constant has a value of "BYOL"
PRICING_TYPE_BYOL = "BYOL"
#: A constant which can be used with the pricing_type property of a ApplicationSummary.
#: This constant has a value of "PAYGO"
PRICING_TYPE_PAYGO = "PAYGO"
#: A constant which can be used with the package_type property of a ApplicationSummary.
#: This constant has a value of "STACK"
PACKAGE_TYPE_STACK = "STACK"
def __init__(self, **kwargs):
"""
Initializes a new ApplicationSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param entity_id:
The value to assign to the entity_id property of this ApplicationSummary.
:type entity_id: str
:param entity_type:
The value to assign to the entity_type property of this ApplicationSummary.
:type entity_type: str
:param display_name:
The value to assign to the display_name property of this ApplicationSummary.
:type display_name: str
:param is_featured:
The value to assign to the is_featured property of this ApplicationSummary.
:type is_featured: bool
:param publisher:
The value to assign to the publisher property of this ApplicationSummary.
:type publisher: oci.service_catalog.models.PublisherSummary
:param short_description:
The value to assign to the short_description property of this ApplicationSummary.
:type short_description: str
:param logo:
The value to assign to the logo property of this ApplicationSummary.
:type logo: oci.service_catalog.models.UploadData
:param pricing_type:
The value to assign to the pricing_type property of this ApplicationSummary.
Allowed values for this property are: "FREE", "BYOL", "PAYGO", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type pricing_type: str
:param package_type:
The value to assign to the package_type property of this ApplicationSummary.
Allowed values for this property are: "STACK", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type package_type: str
"""
self.swagger_types = {
'entity_id': 'str',
'entity_type': 'str',
'display_name': 'str',
'is_featured': 'bool',
'publisher': 'PublisherSummary',
'short_description': 'str',
'logo': 'UploadData',
'pricing_type': 'str',
'package_type': 'str'
}
self.attribute_map = {
'entity_id': 'entityId',
'entity_type': 'entityType',
'display_name': 'displayName',
'is_featured': 'isFeatured',
'publisher': 'publisher',
'short_description': 'shortDescription',
'logo': 'logo',
'pricing_type': 'pricingType',
'package_type': 'packageType'
}
self._entity_id = None
self._entity_type = None
self._display_name = None
self._is_featured = None
self._publisher = None
self._short_description = None
self._logo = None
self._pricing_type = None
self._package_type = None
@property
def entity_id(self):
"""
**[Required]** Gets the entity_id of this ApplicationSummary.
Identifier of the application from a service catalog.
:return: The entity_id of this ApplicationSummary.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this ApplicationSummary.
Identifier of the application from a service catalog.
:param entity_id: The entity_id of this ApplicationSummary.
:type: str
"""
self._entity_id = entity_id
@property
def entity_type(self):
"""
**[Required]** Gets the entity_type of this ApplicationSummary.
The type of an application in the service catalog.
:return: The entity_type of this ApplicationSummary.
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""
Sets the entity_type of this ApplicationSummary.
The type of an application in the service catalog.
:param entity_type: The entity_type of this ApplicationSummary.
:type: str
"""
self._entity_type = entity_type
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this ApplicationSummary.
The name that service catalog should use to display this application.
:return: The display_name of this ApplicationSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this ApplicationSummary.
The name that service catalog should use to display this application.
:param display_name: The display_name of this ApplicationSummary.
:type: str
"""
self._display_name = display_name
@property
def is_featured(self):
"""
Gets the is_featured of this ApplicationSummary.
Indicates whether the application is featured.
:return: The is_featured of this ApplicationSummary.
:rtype: bool
"""
return self._is_featured
@is_featured.setter
def is_featured(self, is_featured):
"""
Sets the is_featured of this ApplicationSummary.
Indicates whether the application is featured.
:param is_featured: The is_featured of this ApplicationSummary.
:type: bool
"""
self._is_featured = is_featured
@property
def publisher(self):
"""
Gets the publisher of this ApplicationSummary.
:return: The publisher of this ApplicationSummary.
:rtype: oci.service_catalog.models.PublisherSummary
"""
return self._publisher
@publisher.setter
def publisher(self, publisher):
"""
Sets the publisher of this ApplicationSummary.
:param publisher: The publisher of this ApplicationSummary.
:type: oci.service_catalog.models.PublisherSummary
"""
self._publisher = publisher
@property
def short_description(self):
"""
Gets the short_description of this ApplicationSummary.
A short description of the application.
:return: The short_description of this ApplicationSummary.
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""
Sets the short_description of this ApplicationSummary.
A short description of the application.
:param short_description: The short_description of this ApplicationSummary.
:type: str
"""
self._short_description = short_description
@property
def logo(self):
"""
Gets the logo of this ApplicationSummary.
:return: The logo of this ApplicationSummary.
:rtype: oci.service_catalog.models.UploadData
"""
return self._logo
@logo.setter
def logo(self, logo):
"""
Sets the logo of this ApplicationSummary.
:param logo: The logo of this ApplicationSummary.
:type: oci.service_catalog.models.UploadData
"""
self._logo = logo
@property
def pricing_type(self):
"""
Gets the pricing_type of this ApplicationSummary.
Summary of the pricing types available across all packages in the application.
Allowed values for this property are: "FREE", "BYOL", "PAYGO", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The pricing_type of this ApplicationSummary.
:rtype: str
"""
return self._pricing_type
@pricing_type.setter
def pricing_type(self, pricing_type):
"""
Sets the pricing_type of this ApplicationSummary.
Summary of the pricing types available across all packages in the application.
:param pricing_type: The pricing_type of this ApplicationSummary.
:type: str
"""
allowed_values = ["FREE", "BYOL", "PAYGO"]
if not value_allowed_none_or_none_sentinel(pricing_type, allowed_values):
pricing_type = 'UNKNOWN_ENUM_VALUE'
self._pricing_type = pricing_type
@property
def package_type(self):
"""
Gets the package_type of this ApplicationSummary.
The type of the packages withing the application.
Allowed values for this property are: "STACK", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The package_type of this ApplicationSummary.
:rtype: str
"""
return self._package_type
@package_type.setter
def package_type(self, package_type):
"""
Sets the package_type of this ApplicationSummary.
The type of the packages withing the application.
:param package_type: The package_type of this ApplicationSummary.
:type: str
"""
allowed_values = ["STACK"]
if not value_allowed_none_or_none_sentinel(package_type, allowed_values):
package_type = 'UNKNOWN_ENUM_VALUE'
self._package_type = package_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.402332
| 245
| 0.648551
|
7519b24a485668504b93db4186a6b1a778025604
| 18,459
|
py
|
Python
|
pandas/tests/io/test_common.py
|
jordanrmerrick/pandas
|
e18415e64c66a5c125a6e6a9e9aa9fa97eb01403
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 4
|
2020-03-31T23:31:24.000Z
|
2021-08-06T13:47:39.000Z
|
pandas/tests/io/test_common.py
|
jordanrmerrick/pandas
|
e18415e64c66a5c125a6e6a9e9aa9fa97eb01403
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 6
|
2021-05-31T01:10:55.000Z
|
2021-07-19T00:37:03.000Z
|
pandas/tests/io/test_common.py
|
jordanrmerrick/pandas
|
e18415e64c66a5c125a6e6a9e9aa9fa97eb01403
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-06T10:33:40.000Z
|
2021-03-06T10:33:40.000Z
|
"""
Tests for the pandas.io.common functionalities
"""
import codecs
import errno
from functools import partial
from io import (
BytesIO,
StringIO,
)
import mmap
import os
from pathlib import Path
import tempfile
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
import pandas.io.common as icom
class CustomFSPath:
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
# Functions that consume a string path and return a string or path-like object
path_types = [str, CustomFSPath, Path]
try:
from py.path import local as LocalPath
path_types.append(LocalPath)
except ImportError:
pass
HERE = os.path.abspath(os.path.dirname(__file__))
# https://github.com/cython/cython/issues/1720
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestCommonIOCapabilities:
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = "~/sometest"
expanded_name = icom._expand_user(filename)
assert expanded_name != filename
assert os.path.isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = "/somefolder/sometest"
expanded_name = icom._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
def test_stringify_path_pathlib(self):
rel_path = icom.stringify_path(Path("."))
assert rel_path == "."
redundant_path = icom.stringify_path(Path("foo//bar"))
assert redundant_path == os.path.join("foo", "bar")
@td.skip_if_no("py.path")
def test_stringify_path_localpath(self):
path = os.path.join("foo", "bar")
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert icom.stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath("foo/bar.csv")
result = icom.stringify_path(p)
assert result == "foo/bar.csv"
def test_stringify_file_and_path_like(self):
# GH 38125: do not stringify file objects that are also path-like
fsspec = pytest.importorskip("fsspec")
with tm.ensure_clean() as path:
with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
assert fsspec_obj == icom.stringify_path(fsspec_obj)
@pytest.mark.parametrize(
"extension,expected",
[
("", None),
(".gz", "gzip"),
(".bz2", "bz2"),
(".zip", "zip"),
(".xz", "xz"),
(".GZ", "gzip"),
(".BZ2", "bz2"),
(".ZIP", "zip"),
(".XZ", "xz"),
],
)
@pytest.mark.parametrize("path_type", path_types)
def test_infer_compression_from_path(self, extension, expected, path_type):
path = path_type("foo/bar.csv" + extension)
compression = icom.infer_compression(path, compression="infer")
assert compression == expected
@pytest.mark.parametrize("path_type", [str, CustomFSPath, Path])
def test_get_handle_with_path(self, path_type):
# ignore LocalPath: it creates strange paths: /absolute/~/sometest
with tempfile.TemporaryDirectory(dir=Path.home()) as tmp:
filename = path_type("~/" + Path(tmp).name + "/sometest")
with icom.get_handle(filename, "w") as handles:
assert Path(handles.handle.name).is_absolute()
assert os.path.expanduser(filename) == handles.handle.name
def test_get_handle_with_buffer(self):
input_buffer = StringIO()
with icom.get_handle(input_buffer, "r") as handles:
assert handles.handle == input_buffer
assert not input_buffer.closed
input_buffer.close()
def test_iterator(self):
with pd.read_csv(StringIO(self.data1), chunksize=1) as reader:
result = pd.concat(reader, ignore_index=True)
expected = pd.read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
with pd.read_csv(StringIO(self.data1), chunksize=1) as it:
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
@pytest.mark.parametrize(
"reader, module, error_class, fn_ext",
[
(pd.read_csv, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
(pd.read_feather, "pyarrow", IOError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
(pd.read_json, "os", ValueError, "json"),
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
],
)
def test_read_non_existent(self, reader, module, error_class, fn_ext):
pytest.importorskip(module)
path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
msg1 = fr"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Expected object or value"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
fr"'.+does_not_exist\.{fn_ext}'"
)
msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}"
with pytest.raises(
error_class,
match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@pytest.mark.parametrize(
"reader, module, error_class, fn_ext",
[
(pd.read_csv, "os", FileNotFoundError, "csv"),
(pd.read_table, "os", FileNotFoundError, "csv"),
(pd.read_fwf, "os", FileNotFoundError, "txt"),
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
(pd.read_feather, "pyarrow", IOError, "feather"),
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
(pd.read_stata, "os", FileNotFoundError, "dta"),
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
(pd.read_json, "os", ValueError, "json"),
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
],
)
def test_read_expands_user_home_dir(
self, reader, module, error_class, fn_ext, monkeypatch
):
pytest.importorskip(module)
path = os.path.join("~", "does_not_exist." + fn_ext)
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
msg1 = fr"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
msg2 = fr"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
msg3 = "Unexpected character found when decoding 'false'"
msg4 = "path_or_buf needs to be a string file path or file-like"
msg5 = (
fr"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
fr"'.+does_not_exist\.{fn_ext}'"
)
msg6 = fr"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
msg7 = (
fr"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
)
msg8 = fr"Failed to open local file.+does_not_exist\.{fn_ext}"
with pytest.raises(
error_class,
match=fr"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
):
reader(path)
@pytest.mark.parametrize(
"reader, module, path",
[
(pd.read_csv, "os", ("io", "data", "csv", "iris.csv")),
(pd.read_table, "os", ("io", "data", "csv", "iris.csv")),
(
pd.read_fwf,
"os",
("io", "data", "fixed_width", "fixed_width_format.txt"),
),
(pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")),
(
pd.read_feather,
"pyarrow",
("io", "data", "feather", "feather-0_3_1.feather"),
),
(
pd.read_hdf,
"tables",
("io", "data", "legacy_hdf", "datetimetz_object.h5"),
),
(pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")),
(pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")),
(pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")),
(
pd.read_pickle,
"os",
("io", "data", "pickle", "categorical.0.25.0.pickle"),
),
],
)
@pytest.mark.filterwarnings(
"ignore:CategoricalBlock is deprecated:DeprecationWarning"
)
def test_read_fspath_all(self, reader, module, path, datapath):
pytest.importorskip(module)
path = datapath(*path)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith(".pickle"):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"writer_name, writer_kwargs, module",
[
("to_csv", {}, "os"),
("to_excel", {"engine": "xlwt"}, "xlwt"),
("to_feather", {}, "pyarrow"),
("to_html", {}, "os"),
("to_json", {}, "os"),
("to_latex", {}, "os"),
("to_pickle", {}, "os"),
("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"),
],
)
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
p1 = tm.ensure_clean("string")
p2 = tm.ensure_clean("fspath")
df = pd.DataFrame({"A": [1, 2]})
with p1 as string, p2 as fspath:
pytest.importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
writer(string, **writer_kwargs)
with open(string, "rb") as f:
expected = f.read()
writer(mypath, **writer_kwargs)
with open(fspath, "rb") as f:
result = f.read()
assert result == expected
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) IO HDF5
def test_write_fspath_hdf5(self):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
pytest.importorskip("tables")
df = pd.DataFrame({"A": [1, 2]})
p1 = tm.ensure_clean("string")
p2 = tm.ensure_clean("fspath")
with p1 as string, p2 as fspath:
mypath = CustomFSPath(fspath)
df.to_hdf(mypath, key="bar")
df.to_hdf(string, key="bar")
result = pd.read_hdf(fspath, key="bar")
expected = pd.read_hdf(string, key="bar")
tm.assert_frame_equal(result, expected)
@pytest.fixture
def mmap_file(datapath):
return datapath("io", "data", "csv", "test_mmap.csv")
class TestMMapWrapper:
def test_constructor_bad_file(self, mmap_file):
non_file = StringIO("I am not a file")
non_file.fileno = lambda: -1
# the error raised is different on Windows
if is_platform_windows():
msg = "The parameter is incorrect"
err = OSError
else:
msg = "[Errno 22]"
err = mmap.error
with pytest.raises(err, match=msg):
icom._MMapWrapper(non_file)
target = open(mmap_file)
target.close()
msg = "I/O operation on closed file"
with pytest.raises(ValueError, match=msg):
icom._MMapWrapper(target)
def test_get_attr(self, mmap_file):
with open(mmap_file) as target:
wrapper = icom._MMapWrapper(target)
attrs = dir(wrapper.mmap)
attrs = [attr for attr in attrs if not attr.startswith("__")]
attrs.append("__next__")
for attr in attrs:
assert hasattr(wrapper, attr)
assert not hasattr(wrapper, "foo")
def test_next(self, mmap_file):
with open(mmap_file) as target:
wrapper = icom._MMapWrapper(target)
lines = target.readlines()
for line in lines:
next_line = next(wrapper)
assert next_line.strip() == line.strip()
with pytest.raises(StopIteration, match=r"^$"):
next(wrapper)
def test_unknown_engine(self):
with tm.ensure_clean() as path:
df = tm.makeDataFrame()
df.to_csv(path)
with pytest.raises(ValueError, match="Unknown engine"):
pd.read_csv(path, engine="pyt")
def test_binary_mode(self):
"""
'encoding' shouldn't be passed to 'open' in binary mode.
GH 35058
"""
with tm.ensure_clean() as path:
df = tm.makeDataFrame()
df.to_csv(path, mode="w+b")
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
@pytest.mark.parametrize("encoding", ["utf-16", "utf-32"])
@pytest.mark.parametrize("compression_", ["bz2", "xz"])
def test_warning_missing_utf_bom(self, encoding, compression_):
"""
bz2 and xz do not write the byte order mark (BOM) for utf-16/32.
https://stackoverflow.com/questions/55171439
GH 35681
"""
df = tm.makeDataFrame()
with tm.ensure_clean() as path:
with tm.assert_produces_warning(UnicodeWarning):
df.to_csv(path, compression=compression_, encoding=encoding)
# reading should fail (otherwise we wouldn't need the warning)
msg = r"UTF-\d+ stream does not start with BOM"
with pytest.raises(UnicodeError, match=msg):
pd.read_csv(path, compression=compression_, encoding=encoding)
def test_is_fsspec_url():
assert icom.is_fsspec_url("gcs://pandas/somethingelse.com")
assert icom.is_fsspec_url("gs://pandas/somethingelse.com")
# the following is the only remote URL that is handled without fsspec
assert not icom.is_fsspec_url("http://pandas/somethingelse.com")
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
assert not icom.is_fsspec_url("/local/path")
assert not icom.is_fsspec_url("relative/local/path")
@pytest.mark.parametrize("encoding", [None, "utf-8"])
@pytest.mark.parametrize("format", ["csv", "json"])
def test_codecs_encoding(encoding, format):
# GH39247
expected = tm.makeDataFrame()
with tm.ensure_clean() as path:
with codecs.open(path, mode="w", encoding=encoding) as handle:
getattr(expected, f"to_{format}")(handle)
with codecs.open(path, mode="r", encoding=encoding) as handle:
if format == "csv":
df = pd.read_csv(handle, index_col=0)
else:
df = pd.read_json(handle)
tm.assert_frame_equal(expected, df)
def test_codecs_get_writer_reader():
# GH39247
expected = tm.makeDataFrame()
with tm.ensure_clean() as path:
with open(path, "wb") as handle:
with codecs.getwriter("utf-8")(handle) as encoded:
expected.to_csv(encoded)
with open(path, "rb") as handle:
with codecs.getreader("utf-8")(handle) as encoded:
df = pd.read_csv(encoded, index_col=0)
tm.assert_frame_equal(expected, df)
@pytest.mark.parametrize(
"io_class,mode,msg",
[
(BytesIO, "t", "a bytes-like object is required, not 'str'"),
(StringIO, "b", "string argument expected, got 'bytes'"),
],
)
def test_explicit_encoding(io_class, mode, msg):
# GH39247; this test makes sure that if a user provides mode="*t" or "*b",
# it is used. In the case of this test it leads to an error as intentionally the
# wrong mode is requested
expected = tm.makeDataFrame()
with io_class() as buffer:
with pytest.raises(TypeError, match=msg):
expected.to_csv(buffer, mode=f"w{mode}")
@pytest.mark.parametrize("encoding_errors", [None, "strict", "replace"])
@pytest.mark.parametrize("format", ["csv", "json"])
def test_encoding_errors(encoding_errors, format):
# GH39450
msg = "'utf-8' codec can't decode byte"
bad_encoding = b"\xe4"
if format == "csv":
return
content = bad_encoding + b"\n" + bad_encoding
reader = pd.read_csv
else:
content = (
b'{"'
+ bad_encoding * 2
+ b'": {"'
+ bad_encoding
+ b'":"'
+ bad_encoding
+ b'"}}'
)
reader = partial(pd.read_json, orient="index")
with tm.ensure_clean() as path:
file = Path(path)
file.write_bytes(content)
if encoding_errors != "replace":
with pytest.raises(UnicodeDecodeError, match=msg):
reader(path, encoding_errors=encoding_errors)
else:
df = reader(path, encoding_errors=encoding_errors)
decoded = bad_encoding.decode(errors=encoding_errors)
expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])
tm.assert_frame_equal(df, expected)
def test_bad_encdoing_errors():
# GH 39777
with tm.ensure_clean() as path:
with pytest.raises(ValueError, match="Invalid value for `encoding_errors`"):
icom.get_handle(path, "w", errors="bad")
def test_errno_attribute():
# GH 13872
with pytest.raises(FileNotFoundError, match="\\[Errno 2\\]") as err:
pd.read_csv("doesnt_exist")
assert err.errno == errno.ENOENT
| 34.828302
| 88
| 0.588439
|
756e9baadc6e56f605f2fb80b6ef32b2c8aa40d2
| 11,715
|
py
|
Python
|
src/wechaty/user/contact.py
|
PIG208/python-wechaty
|
f6a3a6765d9265905e1ff39142d45eacf87180fd
|
[
"Apache-2.0"
] | 640
|
2020-02-10T06:39:20.000Z
|
2022-03-31T07:56:45.000Z
|
src/wechaty/user/contact.py
|
RuoChen-ing/python-wechaty
|
d915823660ef5de6f1f599bdcc6e45f4a4122581
|
[
"Apache-2.0"
] | 245
|
2020-02-28T18:58:50.000Z
|
2022-03-28T04:10:24.000Z
|
src/wechaty/user/contact.py
|
RuoChen-ing/python-wechaty
|
d915823660ef5de6f1f599bdcc6e45f4a4122581
|
[
"Apache-2.0"
] | 140
|
2019-12-17T02:40:06.000Z
|
2022-03-29T02:23:15.000Z
|
"""
Python Wechaty - https://github.com/wechaty/python-wechaty
Authors: Huan LI (李卓桓) <https://github.com/huan>
Jingjing WU (吴京京) <https://github.com/wj-Mcat>
2020-now @ Copyright Wechaty
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import asyncio
import dataclasses
import json
from typing import (
TYPE_CHECKING,
Dict,
List,
Optional,
Type,
Union,
)
from pyee import AsyncIOEventEmitter # type: ignore
from wechaty.exceptions import WechatyPayloadError, WechatyOperationError
from wechaty_puppet import (
ContactGender,
ContactPayload,
ContactQueryFilter,
ContactType,
get_logger,
FileBox
)
# from wechaty.utils import type_check
from ..accessory import Accessory
if TYPE_CHECKING:
# pytype: disable=pyi-error
from .tag import Tag
# pytype: disable=pyi-error
from .message import Message
# pytype: disable=pyi-error
from .url_link import UrlLink
log = get_logger('Contact')
# pylint:disable=R0904
class Contact(Accessory[ContactPayload], AsyncIOEventEmitter):
"""
contact object
"""
_pool: Dict[str, 'Contact'] = {}
def __init__(self, contact_id: str):
"""
initialization
"""
super().__init__()
self.contact_id: str = contact_id
def get_id(self) -> str:
"""
get contact_id
:return:
"""
return self.contact_id
@classmethod
def load(cls: Type[Contact], contact_id: str) -> Contact:
"""
load contact by contact_id
:param contact_id:
:return: created contact instance
"""
# create new contact and set to pool
if contact_id in cls._pool:
return cls._pool[contact_id]
# create new contact object
new_contact = cls(contact_id) # , *args, **kwargs)
cls._pool[contact_id] = new_contact
return new_contact
@classmethod
async def find(cls: Type[Contact], query: Union[str, ContactQueryFilter]) \
-> Optional[Contact]:
"""
find a single target contact
:param query:
:return:
"""
log.info('find() <%s, %s>', cls, query)
contact_list = await cls.find_all(query)
if len(contact_list) == 0:
return None
return contact_list[0]
@classmethod
async def find_all(cls: Type[Contact],
query: Optional[Union[str, ContactQueryFilter]] = None
) -> List[Contact]:
"""
find all contact friends
:param query:
:return:
"""
log.info('find_all() <%s, %s>', cls, query)
contact_ids = await cls.get_puppet().contact_list()
# filter Contact by contact id to make sure its valid if contact_id.startswith('wxid_')
contacts: List[Contact] = [cls.load(contact_id) for contact_id in contact_ids]
# load contact parallel using asyncio.gather method
# async load
await asyncio.gather(*[contact.ready() for contact in contacts])
if query is not None:
if isinstance(query, str):
contacts = list(
filter(
lambda x: False if not x.payload else
(x.payload.alias.__contains__(query)) or
(x.payload.id.__contains__(query)) or
(x.payload.name.__contains__(query)) or
(x.payload.weixin.__contains__(query)),
contacts
)
)
if isinstance(query, ContactQueryFilter):
new_query: Dict = dataclasses.asdict(query)
contacts = list(
filter(
lambda x: x.payload and (
(x.payload.alias == new_query.get('alias') or not new_query.get('alias')) and
(x.payload.id == new_query.get('id') or not new_query.get('id')) and
(x.payload.name == new_query.get('name') or not new_query.get('name')) and
(x.payload.weixin == new_query.get('weixin') or not new_query.get('weixin'))
),
contacts
)
)
return contacts
async def ready(self, force_sync: bool = False) -> None:
"""
load contact object from puppet
:return:
"""
if force_sync or not self.is_ready():
try:
self.payload = await self.puppet.contact_payload(
self.contact_id)
log.info('load contact <%s>', self)
except IOError as e:
log.info('can"t load contact %s payload, message : %s',
self.name,
str(e.args))
raise WechatyPayloadError('can"t load contact payload')
def __str__(self) -> str:
"""
get contact string representation
"""
if not self.is_ready():
return 'Contact <{}>'.format(self.contact_id)
if self.payload.alias.strip() != '':
identity = self.payload.alias
elif self.payload.name.strip() != '':
identity = self.payload.name
elif self.contact_id.strip() != '':
identity = self.contact_id
else:
identity = 'loading ...'
return 'Contact <%s> <%s>' % (self.contact_id, identity)
async def say(self, message: Union[str, Message, FileBox, Contact, UrlLink]
) -> Optional[Message]:
"""
say something
:param message: message content
"""
if not message:
log.error('can"t say nothing')
return None
if not self.is_ready():
await self.ready()
# import some class because circular dependency
from wechaty.user.url_link import UrlLink
if isinstance(message, str):
# say text
msg_id = await self.puppet.message_send_text(
conversation_id=self.contact_id,
message=message
)
elif isinstance(message, Contact):
msg_id = await self.puppet.message_send_contact(
contact_id=message.contact_id,
conversation_id=self.contact_id
)
elif isinstance(message, FileBox):
msg_id = await self.puppet.message_send_file(
conversation_id=self.contact_id,
file=message
)
elif isinstance(message, UrlLink):
# use this way to resolve circulation dependency import
msg_id = await self.puppet.message_send_url(
conversation_id=self.contact_id,
url=json.dumps(dataclasses.asdict(message.payload))
)
# elif isinstance(message, MiniProgram):
# msg_id = await self.puppet.message_send_mini_program(
# self.contact_id, message.payload)
else:
log.info('unsupported tags %s', message)
raise WechatyOperationError('unsupported tags')
if msg_id is not None:
msg = self.wechaty.Message.load(msg_id)
await msg.ready()
return msg
return None
@property
def name(self) -> str:
"""
get contact name
"""
return '' if not self.is_ready() else self.payload.name
async def alias(self,
new_alias: Optional[str] = None
) -> Union[None, str]:
"""
get/set alias
"""
log.info('Contact alias <%s>', new_alias)
if not self.is_ready():
await self.ready()
if self.payload is None:
raise WechatyPayloadError('can"t load contact payload <%s>' % self)
try:
alias = await self.puppet.contact_alias(self.contact_id, new_alias)
# reload the contact payload
await self.ready(force_sync=True)
return alias
# pylint:disable=W0703
except Exception as exception:
log.info(
'Contact alias(%s) rejected: %s',
new_alias, str(exception.args))
return None
def is_friend(self) -> Optional[bool]:
"""
Check if contact is friend
False for not friend of the bot, null for unknown.
"""
if not self.payload or not self.payload.friend:
return None
return self.payload.friend
def is_offical(self) -> bool:
"""
Check if it's a offical account
:params:
:return:
"""
if self.payload is None:
return False
return self.payload.type == ContactType.CONTACT_TYPE_OFFICIAL
def is_personal(self) -> bool:
"""
Check if it's a personal account
"""
if self.payload is None:
return False
return self.payload.type == ContactType.CONTACT_TYPE_PERSONAL
def type(self) -> ContactType:
"""
get contact type
"""
if self.payload is None:
raise WechatyPayloadError('contact payload not found')
return self.payload.type
def star(self) -> Optional[bool]:
"""
check if it's a star account
"""
if self.payload is None:
return None
return self.payload.star
def gender(self) -> ContactGender:
"""
get contact gender info
"""
if self.payload is not None:
return self.payload.gender
return ContactGender.CONTACT_GENDER_UNSPECIFIED
def province(self) -> Optional[str]:
"""
get the province of the account
"""
if self.payload is None:
return None
return self.payload.province
def city(self) -> Optional[str]:
"""
get the city of the account
"""
if self.payload is None:
return None
return self.payload.city
async def avatar(self, file_box: Optional[FileBox] = None) -> FileBox:
"""
get the avatar of the account
"""
avatar = await self.puppet.contact_avatar(
contact_id=self.contact_id, file_box=file_box)
return avatar
async def tags(self) -> List[Tag]:
"""
Get all tags of contact
"""
log.info('load contact tags for %s', self)
tag_ids = await self.puppet.tag_contact_list(self.contact_id)
tags = [self.wechaty.Tag.load(tag_id)
for tag_id in tag_ids]
return tags
async def sync(self) -> None:
"""
sync the contact data
"""
await self.ready()
def is_self(self) -> bool:
"""
check if it's the self account
"""
return self.wechaty.contact_id == self.contact_id
def weixin(self) -> Optional[str]:
"""
Get the weixin number from a contact.
"""
if self.payload is None:
return None
return self.payload.weixin
| 29.885204
| 105
| 0.559624
|
2ca99431123082b7de02f26e13012749f9a00dc1
| 2,477
|
py
|
Python
|
buildingspy/tests/test_development_error_dictionary.py
|
wanaylor/NewBuildingsPy
|
a80ea41600c80569dfb381ed9629161a5f17224e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-11-17T12:36:21.000Z
|
2019-11-17T12:36:21.000Z
|
buildingspy/tests/test_development_error_dictionary.py
|
wanaylor/NewBuildingsPy
|
a80ea41600c80569dfb381ed9629161a5f17224e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
buildingspy/tests/test_development_error_dictionary.py
|
wanaylor/NewBuildingsPy
|
a80ea41600c80569dfb381ed9629161a5f17224e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#!/usr/bin/env python
import unittest
class Test_development_error_dictionary(unittest.TestCase):
"""
This class contains the unit tests for
:mod:`buildingspy.development.error_dictionary.ErrorDictionary`.
"""
def test_keys(self):
import buildingspy.development.error_dictionary as e
err_dic = e.ErrorDictionary()
k = err_dic.keys()
k_expected = ['differentiated if',
'experiment annotation',
'file not found',
'invalid connect',
'numerical Jacobians',
'parameter with start value only',
'redeclare non-replaceable',
'redundant consistent initial conditions',
'type incompatibility',
'type inconsistent definition equations',
'unspecified initial conditions',
'unused connector']
self.assertEqual(len(k), len(k_expected), "Wrong number of keys.")
for i in range(len(k)):
self.assertEqual(k[i], k_expected[i], "Wrong key, expected \"{}\".".format(k_expected[i]))
def test_tool_messages(self):
import buildingspy.development.error_dictionary as e
err_dic = e.ErrorDictionary()
k = err_dic.tool_messages()
k_expected = ['Differentiating (if',
'Warning: Failed to interpret experiment annotation',
'which was not found',
'The model contained invalid connect statements.',
'Number of numerical Jacobians:',
"Warning: The following parameters don't have any value, only a start value",
'Warning: Redeclaration of non-replaceable requires type equivalence',
'Redundant consistent initial conditions:',
'but they must be compatible',
'Type inconsistent definition equation',
'Dymola has selected default initial condition',
'Warning: The following connector variables are not used in the model']
self.assertEqual(len(k), len(k_expected), "Wrong number of tool messages.")
for i in range(len(k)):
self.assertEqual(k[i], k_expected[i], "Wrong tool message, expected \"{}\".".format(k_expected[i]))
if __name__ == '__main__':
unittest.main()
| 44.232143
| 111
| 0.572063
|
14bca1dc75a95b3c4c32fd5b6699ddc3de9060f0
| 1,064
|
py
|
Python
|
adat/telepules/parse_precincts.py
|
korenmiklos/106
|
9925a2bda18915eb43d7bdd2b54d4d7aa113bc66
|
[
"MIT"
] | 1
|
2018-04-07T20:44:45.000Z
|
2018-04-07T20:44:45.000Z
|
adat/telepules/parse_precincts.py
|
korenmiklos/106
|
9925a2bda18915eb43d7bdd2b54d4d7aa113bc66
|
[
"MIT"
] | null | null | null |
adat/telepules/parse_precincts.py
|
korenmiklos/106
|
9925a2bda18915eb43d7bdd2b54d4d7aa113bc66
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, csv, re
import lxml.html
import glob
'''
Jelolt-id: 2014 / OEVK # / jelolt sorszam
'''
TELEPULES_RE = re.compile(r'<h2>(.*?) szavazókörei</h2>', re.UNICODE and re.DOTALL)
def find_city(html, regex):
matches = regex.search(html)
return matches.group(1)
def parse_file(filename, datastore):
html = open(filename,'r', encoding='latin2').read()
telepules_id = re.search('M\d{2}/T\d{3}', filename).group(0)
telepules_nev = find_city(html, TELEPULES_RE)
datastore.append(dict(telepules_id=telepules_id, telepules_nev=telepules_nev))
def write_csv(list_name, datastore):
writer = csv.DictWriter(open('{}.csv'.format(list_name), 'w', encoding='utf-8'), fieldnames=datastore[0].keys())
writer.writeheader()
for row in datastore:
writer.writerow(row)
if __name__ == '__main__':
datastore = []
file_list = glob.glob('html/M??/T???/v21.html')
for filename in file_list:
parse_file(filename, datastore)
write_csv('telepules_kodok', datastore)
| 28
| 116
| 0.677632
|
6832aba33cca9c297c1a642810345ed066eadc07
| 3,708
|
py
|
Python
|
selfdrive/test/process_replay/test_processes.py
|
StingrayCharles/openpilot
|
6a48212422ef05792dde058e36c5c3099f17f619
|
[
"MIT"
] | 114
|
2020-02-24T14:18:01.000Z
|
2022-03-19T03:42:00.000Z
|
selfdrive/test/process_replay/test_processes.py
|
StingrayCharles/openpilot
|
6a48212422ef05792dde058e36c5c3099f17f619
|
[
"MIT"
] | 15
|
2020-02-25T03:37:44.000Z
|
2021-09-08T01:51:15.000Z
|
selfdrive/test/process_replay/test_processes.py
|
StingrayCharles/openpilot
|
6a48212422ef05792dde058e36c5c3099f17f619
|
[
"MIT"
] | 55
|
2020-02-24T09:43:04.000Z
|
2022-02-15T04:52:00.000Z
|
#!/usr/bin/env python3
import os
import requests
import sys
import tempfile
from selfdrive.test.process_replay.compare_logs import compare_logs
from selfdrive.test.process_replay.process_replay import replay_process, CONFIGS
from tools.lib.logreader import LogReader
segments = [
"0375fdf7b1ce594d|2019-06-13--08-32-25--3", # HONDA.ACCORD
"99c94dc769b5d96e|2019-08-03--14-19-59--2", # HONDA.CIVIC
"cce908f7eb8db67d|2019-08-02--15-09-51--3", # TOYOTA.COROLLA_TSS2
"7ad88f53d406b787|2019-07-09--10-18-56--8", # GM.VOLT
"704b2230eb5190d6|2019-07-06--19-29-10--0", # HYUNDAI.KIA_SORENTO
"b6e1317e1bfbefa6|2019-07-06--04-05-26--5", # CHRYSLER.JEEP_CHEROKEE
"7873afaf022d36e2|2019-07-03--18-46-44--0", # SUBARU.IMPREZA
]
def get_segment(segment_name):
route_name, segment_num = segment_name.rsplit("--", 1)
rlog_url = "https://commadataci.blob.core.windows.net/openpilotci/%s/%s/rlog.bz2" \
% (route_name.replace("|", "/"), segment_num)
r = requests.get(rlog_url)
if r.status_code != 200:
return None
with tempfile.NamedTemporaryFile(delete=False, suffix=".bz2") as f:
f.write(r.content)
return f.name
if __name__ == "__main__":
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
ref_commit_fn = os.path.join(process_replay_dir, "ref_commit")
if not os.path.isfile(ref_commit_fn):
print("couldn't find reference commit")
sys.exit(1)
ref_commit = open(ref_commit_fn).read().strip()
print("***** testing against commit %s *****" % ref_commit)
results = {}
for segment in segments:
print("***** testing route segment %s *****\n" % segment)
results[segment] = {}
rlog_fn = get_segment(segment)
if rlog_fn is None:
print("failed to get segment %s" % segment)
sys.exit(1)
lr = LogReader(rlog_fn)
for cfg in CONFIGS:
log_msgs = replay_process(cfg, lr)
log_fn = os.path.join(process_replay_dir, "%s_%s_%s.bz2" % (segment, cfg.proc_name, ref_commit))
if not os.path.isfile(log_fn):
url = "https://commadataci.blob.core.windows.net/openpilotci/"
req = requests.get(url + os.path.basename(log_fn))
if req.status_code != 200:
results[segment][cfg.proc_name] = "failed to download comparison log"
continue
with tempfile.NamedTemporaryFile(suffix=".bz2") as f:
f.write(req.content)
f.flush()
f.seek(0)
cmp_log_msgs = list(LogReader(f.name))
else:
cmp_log_msgs = list(LogReader(log_fn))
diff = compare_logs(cmp_log_msgs, log_msgs, cfg.ignore)
results[segment][cfg.proc_name] = diff
os.remove(rlog_fn)
failed = False
with open(os.path.join(process_replay_dir, "diff.txt"), "w") as f:
f.write("***** tested against commit %s *****\n" % ref_commit)
for segment, result in list(results.items()):
f.write("***** differences for segment %s *****\n" % segment)
print("***** results for segment %s *****" % segment)
for proc, diff in list(result.items()):
f.write("*** process: %s ***\n" % proc)
print("\t%s" % proc)
if isinstance(diff, str):
print("\t\t%s" % diff)
failed = True
elif len(diff):
cnt = {}
for d in diff:
f.write("\t%s\n" % str(d))
k = str(d[1])
cnt[k] = 1 if k not in cnt else cnt[k] + 1
for k, v in sorted(cnt.items()):
print("\t\t%s: %s" % (k, v))
failed = True
if failed:
print("TEST FAILED")
else:
print("TEST SUCCEEDED")
print("\n\nTo update the reference logs for this test run:")
print("./update_refs.py")
sys.exit(int(failed))
| 31.159664
| 102
| 0.626214
|
f84247220f6053653cf009996af977f08a9c24fb
| 827
|
bzl
|
Python
|
third_party/tf_runtime/workspace.bzl
|
erik-888/tensorflow
|
d207f1fccd696966312d0b2b3c9a84b53ca64ca7
|
[
"Apache-2.0"
] | 1
|
2020-03-23T07:42:17.000Z
|
2020-03-23T07:42:17.000Z
|
third_party/tf_runtime/workspace.bzl
|
a5204662/tensorflow
|
d207f1fccd696966312d0b2b3c9a84b53ca64ca7
|
[
"Apache-2.0"
] | null | null | null |
third_party/tf_runtime/workspace.bzl
|
a5204662/tensorflow
|
d207f1fccd696966312d0b2b3c9a84b53ca64ca7
|
[
"Apache-2.0"
] | null | null | null |
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "e48f4cd1e8c2de3dacfac21835e1b6b070c0e00c"
TFRT_SHA256 = "e4d8cda2f6e10c85dee5ec3d133b4f662200fa01a9c1f69043eab8614b3039a3"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| 39.380952
| 124
| 0.698912
|
70cf156e129213cf22f39a3e5b2b7630b3ae176a
| 640
|
py
|
Python
|
kora/install/pg10.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 91
|
2020-05-26T05:54:51.000Z
|
2022-03-09T07:33:44.000Z
|
kora/install/pg10.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 12
|
2020-10-03T10:09:11.000Z
|
2021-03-06T23:12:21.000Z
|
kora/install/pg10.py
|
wannaphong/kora
|
8a9034097d07b14094e077769c02a0b4857d179b
|
[
"MIT"
] | 16
|
2020-07-07T18:39:29.000Z
|
2021-03-06T03:46:49.000Z
|
import os
# install PostgreSQL 10
os.system("apt install postgresql postgresql-contrib")
os.system("service postgresql start")
os.system("sudo -u postgres psql -c 'CREATE USER root WITH SUPERUSER'")
# update %%sql and add pg special commands
os.system('pip install -U ipython-sql')
os.system('pip install pgspecial')
os.system('pip install psycopg2-binary') # avoid warning
# config for %%sql
magic = get_ipython().run_line_magic
magic('load_ext', 'sql')
magic('config', 'SqlMagic.displaycon=False')
magic('config', 'SqlMagic.feedback=False')
magic('config', 'SqlMagic.autopandas=True')
magic('sql', 'postgresql+psycopg2://@/postgres')
| 32
| 71
| 0.746875
|
5237ce1ced16a55f1aefd8017007786f891bb90e
| 2,813
|
py
|
Python
|
notebook_item.py
|
kevin-funderburg/alfred-microsoft-onenote-navigator
|
90453c5f9f72b502b95520a2e425e06a8eea0708
|
[
"MIT"
] | 57
|
2019-07-15T14:52:20.000Z
|
2022-02-21T13:48:49.000Z
|
notebook_item.py
|
kevin-funderburg/alfred-microsoft-onenote-navigator
|
90453c5f9f72b502b95520a2e425e06a8eea0708
|
[
"MIT"
] | 19
|
2019-06-14T20:14:51.000Z
|
2022-03-27T21:53:13.000Z
|
notebook_item.py
|
kevin-funderburg/alfred-microsoft-onenote-navigator
|
90453c5f9f72b502b95520a2e425e06a8eea0708
|
[
"MIT"
] | 6
|
2019-06-12T09:19:00.000Z
|
2021-06-13T18:45:00.000Z
|
import re
import os
ONENOTE_USER_INFO_CACHE = "~/Library/Containers/com.microsoft.onenote.mac/" \
"Data/Library/Application Support/Microsoft/UserInfoCache/"
ONENOTE_USER_UID = None
ICON_PAGE = 'icons/page.png'
ICON_SECTION = 'icons/section.png'
ICON_NOTEBOOK = 'icons/notebook.png'
ICON_SECTION_GROUP = 'icons/sectiongroup.png'
class NotebookItem:
def __init__(self, row):
self.Type = row[str('Type')]
self.GOID = row[str('GOID')]
self.GUID = row[str('GUID')]
self.GOSID = row[str('GOSID')]
self.ParentGOID = row[str('ParentGOID')]
self.GrandparentGOIDs = row[str('GrandparentGOIDs')]
self.ContentRID = row[str('ContentRID')]
self.RootRevGenCount = row[str('RootRevGenCount')]
self.LastModifiedTime = row[str('LastModifiedTime')]
self.RecentTime = row[str('RecentTime')]
self.PinTime = row[str('PinTime')]
self.Color = row[str('Color')]
self.Title = row[str('Title')]
self.last_grandparent = self.GrandparentGOIDs
self.path = None
self.icon = None
self.url = None
self.set_last_grandparent()
self.set_url()
self.set_icon()
def has_parent(self):
return self.ParentGOID is not None
def has_grandparent(self):
return self.GrandparentGOIDs is not None
def set_last_grandparent(self):
if self.has_grandparent():
if len(self.GrandparentGOIDs) > 50:
grandparents = self.split_grandparents()
self.last_grandparent = grandparents[-1]
def split_grandparents(self):
new_ids = []
items = self.GrandparentGOIDs.split('}')
for i in range(len(items) - 1):
if i % 2 == 0:
new_ids.append("{0}}}{1}}}".format(items[i], items[i + 1]))
i += 1
return new_ids
def set_path(self, path):
self.path = path.replace('.one#', '/')
def set_icon(self):
if self.Type == 4:
self.icon = ICON_NOTEBOOK
elif self.Type == 3:
self.icon = ICON_SECTION_GROUP
elif self.Type == 2:
self.icon = ICON_SECTION
else:
self.icon = ICON_PAGE
def set_url(self):
if self.Type == 4:
self.url = 'onenote:https://d.docs.live.net/{0}/Documents/{1}'.format(get_user_uid(), self.Title)
else:
self.url = 'onenote:#page-id={0}&end'.format(self.GUID)
def get_user_uid():
global ONENOTE_USER_UID
if ONENOTE_USER_UID is None:
files = os.listdir(os.path.expanduser(ONENOTE_USER_INFO_CACHE))
for f in files:
if 'LiveId.db' in f:
ONENOTE_USER_UID = re.search('(.*)_LiveId\\.db', f).group(1)
return ONENOTE_USER_UID
| 31.965909
| 109
| 0.594028
|
d412461aa79f0b0eaab193b2ed84835c995e9b15
| 814
|
py
|
Python
|
test/test_time_stamp.py
|
r7l/python-gitea-api
|
31d3dba27ea7e551e2048a1230c4ab4d73365006
|
[
"MIT"
] | 1
|
2022-02-09T23:43:26.000Z
|
2022-02-09T23:43:26.000Z
|
test/test_time_stamp.py
|
r7l/python-gitea-api
|
31d3dba27ea7e551e2048a1230c4ab4d73365006
|
[
"MIT"
] | null | null | null |
test/test_time_stamp.py
|
r7l/python-gitea-api
|
31d3dba27ea7e551e2048a1230c4ab4d73365006
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import gitea_api
from gitea_api.models.time_stamp import TimeStamp # noqa: E501
from gitea_api.rest import ApiException
class TestTimeStamp(unittest.TestCase):
"""TimeStamp unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTimeStamp(self):
"""Test TimeStamp"""
# FIXME: construct object with mandatory attributes with example values
# model = gitea_api.models.time_stamp.TimeStamp() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 20.35
| 79
| 0.679361
|
467238fbcb2a2f29a57b1e402661fb855d477d07
| 5,167
|
py
|
Python
|
src/main.py
|
TomasGB/Voice-Assistant
|
96e08a3c1ad2081f8559949bcd7833a8b2be405d
|
[
"MIT"
] | 2
|
2021-01-08T19:26:57.000Z
|
2021-09-12T03:45:32.000Z
|
src/main.py
|
TomasGB/Voice-Assistant
|
96e08a3c1ad2081f8559949bcd7833a8b2be405d
|
[
"MIT"
] | 1
|
2021-04-09T09:00:35.000Z
|
2021-04-09T13:51:37.000Z
|
src/main.py
|
TomasGB/Voice-Assistant
|
96e08a3c1ad2081f8559949bcd7833a8b2be405d
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import os
import datetime
import time
import pyaudio
from speak import speak, takeCommand
import subprocess
from apiCredentials import weather_Key
import functionalities as func
import triggers as trig
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import threading
os.system('cls')
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
pathChromeDriver = "C:/Program Files (x86)/chromedriver.exe"
def auth_googleCalendar():
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
return service
if __name__ == '__main__':
service = auth_googleCalendar()
api = func.AuthTwitter()
driver = webdriver.Chrome(pathChromeDriver)
driver.minimize_window()
while True:
query = takeCommand().lower()
if query in trig.WAKE_TRIGGERS:
print("Hanna: Hola , ¿en que te puedo ayudar?")
speak("Hola, ¿en que te puedo ayudar?")
query = takeCommand().lower()
if query in trig.NOTE_TAKING_TRIGGERS:
print('Hanna: ¿Que querés que escriba?')
speak('¿Que querés que escriba?')
text = takeCommand().lower()
print('Hanna: ¿Que nombre le pongo?')
func.takeNote(text)
speak('Listo!')
elif query in trig.TIME_TRIGGERS:
currentTime = datetime.datetime.now().strftime("%H:%M")
print(f"Hanna: Son las, {currentTime}, horas")
speak(f"Son las, {currentTime}, horas")
elif query in trig.DAY_TRIGGERS:
currentDate = datetime.datetime.now().strftime("%d, del ,%m")
print(f"Hanna: Hoy es el, {currentDate}")
speak(f"Hoy es el, {currentDate}")
elif query in trig.WHEATHER_TRIGGERS:
print('Hanna: ¿En que ciudad?')
speak('¿En que ciudad?')
city = takeCommand().lower()
func.weatherRequest(city, weather_Key)
elif query in trig.YOUTUBE_TRIGGERS:
youtubeURL = "https://www.youtube.com/"
func.openWebsite(youtubeURL, driver)
elif query in trig.TWITCH_TRIGGERS:
twitchURL = "https://www.twitch.tv/"
func.openWebsite(twitchURL, driver)
elif query in trig.WIKIPEDIA_TRIGGERS:
print('Hanna: Buscando...')
speak("buscando...")
func.getInformation(query)
elif query in trig.SONG_TRIGGERS:
print('Hanna: ¿que canción busco?')
speak('¿que canción busco?')
song = takeCommand().lower()
print(f"Hanna: Buscando la cancion, {song}")
speak(f"Buscando la cancion, {song}")
t = threading.Thread(
target=func.playVideoOnYoutube, args=(song, driver,))
t.start()
#func.playVideoOnYoutube(song, driver)
elif query in trig.VIDEO_TRIGGERS:
print('Hanna: ¿Que video busco?')
speak('¿que video busco?')
video = takeCommand().lower()
print(f"Hanna: Buscando el video, {video}")
speak(f"buscando la video, {video}")
t = threading.Thread(
target=func.playVideoOnYoutube, args=(video, driver,))
t.start()
#func.playVideoOnYoutube(video, driver)
elif query in trig.GOOGLE_CALENDAR_TRIGGERS:
print("Hanna: Buscando eventos...")
speak("Buscando eventos")
func.getEvents(10, service)
elif query in trig.CHECK_STREAMERS_TRIGGERS:
func.checkStreamers()
elif query in trig.READ_TWEETS_TRIGGERS:
func.getLatestTweets(api)
elif query in trig.READ_TRENDS_TRIGGERS:
func.getTrendsOnTwitter(api)
elif query in trig.PUBLISH_TWEET_TRIGGERS:
func.publishTweet(api)
elif query in trig.SLEEP_TRIGGERS:
print('Hanna: Hasta luego!')
speak('Hasta luego!')
break
else:
pass
| 35.14966
| 77
| 0.587188
|
33612fe36d862a36a6e5c3786db8ff3797cd1b70
| 2,547
|
py
|
Python
|
apps/crop_img.py
|
VladimirYugay/PIFu
|
8f80e7ee539098e53c419a518f6f180dbdec97c5
|
[
"MIT"
] | null | null | null |
apps/crop_img.py
|
VladimirYugay/PIFu
|
8f80e7ee539098e53c419a518f6f180dbdec97c5
|
[
"MIT"
] | null | null | null |
apps/crop_img.py
|
VladimirYugay/PIFu
|
8f80e7ee539098e53c419a518f6f180dbdec97c5
|
[
"MIT"
] | null | null | null |
import os
import cv2
import numpy as np
from pathlib import Path
import argparse
def get_bbox(msk):
rows = np.any(msk, axis=1)
cols = np.any(msk, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return rmin, rmax, cmin, cmax
def process_img(img, msk, bbox=None):
if bbox is None:
bbox = get_bbox(msk > 100)
cx = (bbox[3] + bbox[2]) // 2
cy = (bbox[1] + bbox[0]) // 2
w = img.shape[1]
h = img.shape[0]
height = int(1.138 * (bbox[1] - bbox[0]))
hh = height // 2
# crop
dw = min(cx, w - cx, hh)
if cy - hh < 0:
img = cv2.copyMakeBorder(img, hh - cy, 0, 0, 0, cv2.BORDER_CONSTANT, value=[0, 0, 0])
msk = cv2.copyMakeBorder(msk, hh - cy, 0, 0, 0, cv2.BORDER_CONSTANT, value=0)
cy = hh
if cy + hh > h:
img = cv2.copyMakeBorder(img, 0, cy + hh - h, 0, 0, cv2.BORDER_CONSTANT, value=[0, 0, 0])
msk = cv2.copyMakeBorder(msk, 0, cy + hh - h, 0, 0, cv2.BORDER_CONSTANT, value=0)
img = img[cy - hh:(cy + hh), cx - dw:cx + dw, :]
msk = msk[cy - hh:(cy + hh), cx - dw:cx + dw]
dw = img.shape[0] - img.shape[1]
if dw != 0:
img = cv2.copyMakeBorder(img, 0, 0, dw // 2, dw // 2, cv2.BORDER_CONSTANT, value=[0, 0, 0])
msk = cv2.copyMakeBorder(msk, 0, 0, dw // 2, dw // 2, cv2.BORDER_CONSTANT, value=0)
img = cv2.resize(img, (512, 512))
msk = cv2.resize(msk, (512, 512))
kernel = np.ones((3, 3), np.uint8)
msk = cv2.erode((255 * (msk > 100)).astype(np.uint8), kernel, iterations=1)
return img, msk
def main():
'''
given foreground mask, this script crops and resizes an input image and mask for processing.
'''
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_image', type=str, help='if the image has alpha channel, it will be used as mask')
parser.add_argument('-m', '--input_mask', type=str)
parser.add_argument('-o', '--out_path', type=str, default='./sample_images')
args = parser.parse_args()
img = cv2.imread(args.input_image, cv2.IMREAD_UNCHANGED)
if img.shape[2] == 4:
msk = img[:, :, 3:]
img = img[:, :, :3]
else:
msk = cv2.imread(args.input_mask, cv2.IMREAD_GRAYSCALE)
img_new, msk_new = process_img(img, msk)
img_name = Path(args.input_image).stem
cv2.imwrite(os.path.join(args.out_path, img_name + '.png'), img_new)
cv2.imwrite(os.path.join(args.out_path, img_name + '_mask.png'), msk_new)
if __name__ == "__main__":
main()
| 31.8375
| 120
| 0.586572
|
42a0244af2fc5c6a4f401badc90ae192a5504a04
| 2,546
|
py
|
Python
|
cc/api/tests/test_support.py
|
creativecommons/cc.api
|
11ad601463b16a77066069ca646d3df284092230
|
[
"MIT"
] | 2
|
2017-12-25T14:11:48.000Z
|
2020-04-04T23:02:10.000Z
|
cc/api/tests/test_support.py
|
creativecommons/cc.api
|
11ad601463b16a77066069ca646d3df284092230
|
[
"MIT"
] | 4
|
2019-02-12T17:23:14.000Z
|
2021-01-04T16:16:26.000Z
|
cc/api/tests/test_support.py
|
creativecommons/cc.api
|
11ad601463b16a77066069ca646d3df284092230
|
[
"MIT"
] | 7
|
2015-06-08T11:17:55.000Z
|
2021-04-12T13:16:09.000Z
|
import os
import lxml.html
from cc.api.tests.test_common import *
####################
## Path constants ##
####################
RELAX_OPTIONS = os.path.join(RELAX_PATH, 'options.relax.xml')
RELAX_SELECT = os.path.join(RELAX_PATH, 'select.relax.xml')
##################
## Test classes ##
##################
class TestSupport(TestApi):
def test_support_jurisdictions(self):
"""/support/jurisdictions served properly."""
res = self.app.get('/support/jurisdictions')
body = self.makexml(res.body)
assert relax_validate(RELAX_OPTIONS, body)
def test_javascript(self):
"""Test javascript wrapper over /support/jurisdictions."""
res = self.app.get('/support/jurisdictions')
jsres = self.app.get('/support/jurisdictions.js')
opts = lxml.html.fromstring(res.body)
jsopts = jsres.body.strip().split('\n')
assert len(opts) == len(jsopts)
for i,opt in enumerate(opts):
assert "document.write('%s');" % lxml.html.tostring(opt) == jsopts[i]
# attempt with select tag...
res = self.app.get('/support/jurisdictions?select=testing')
jsres = self.app.get('/support/jurisdictions.js?select=testing')
opts = lxml.html.fromstring(res.body)
jsopts = jsres.body.strip().split('\n')
# <select> <options> </select>
assert (1 + len(opts) + 1) == len(jsopts)
assert jsopts[0] == "document.write('<select name=\"testing\">');"
assert opts.attrib['name'] == 'testing'
jsopts = jsopts[1:-1] # strip off select tags
for i,opt in enumerate(opts):
assert "document.write('%s');" % lxml.html.tostring(opt) == jsopts[i]
def test_ignore_extra_args(self):
"""Extra arguments are ignored."""
res = self.app.get('/support/jurisdictions?foo=bar')
body = self.makexml(res.body)
assert relax_validate(RELAX_OPTIONS, body)
''' NOTE: locale el causes server error; fix in next implementation
def test_locale(self):
"""Test locale parameter."""
for locale in self.data.locales():
res = self.app.get('/support/jurisdictions?locale=%s' % locale)
body = self.makexml(res.body)
assert relax_validate(RELAX_OPTIONS, body)
'''
def test_select(self):
"""Test select parameter."""
res = self.app.get('/support/jurisdictions?select=foo')
body = res.body.replace('&', '&')
assert relax_validate(RELAX_SELECT, body)
| 33.946667
| 81
| 0.599372
|
c3e9af5dc1da2d8bc1a441dd1cb07ed1c62eed86
| 1,112
|
py
|
Python
|
nanome/_internal/_ui/_serialization/_mesh_serializer.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 3
|
2020-07-02T13:08:27.000Z
|
2021-11-24T14:32:53.000Z
|
nanome/_internal/_ui/_serialization/_mesh_serializer.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 11
|
2020-09-14T17:01:47.000Z
|
2022-02-18T04:00:52.000Z
|
nanome/_internal/_ui/_serialization/_mesh_serializer.py
|
nanome-ai/nanome-plugin-api
|
f2ce6a5e3123ee7449a90c2659f3891124289f4a
|
[
"MIT"
] | 5
|
2020-08-12T16:30:03.000Z
|
2021-12-06T18:04:23.000Z
|
from . import _UIBaseSerializer
from .. import _Mesh
from nanome.util import IntEnum
from nanome._internal._util._serializers import _ColorSerializer, _TypeSerializer
class _MeshSerializer(_TypeSerializer):
def __init__(self):
self.color = _ColorSerializer()
def version(self):
return 1
def name(self):
return "Mesh"
def serialize(self, version, value, context):
if (version == 0):
safe_id = (context._plugin_id << 24) & 0x7FFFFFFF
safe_id |= value._content_id
else:
safe_id = value._content_id
context.write_int(safe_id)
context.write_using_serializer(self.color, value._mesh_color)
def deserialize(self, version, context):
value = _Mesh._create()
value._content_id = context.read_int()
if (version == 0):
id_mask = 0x00FFFFFF
value._content_id &= id_mask
value._mesh_color = context.read_using_serializer(self.color)
return value
_UIBaseSerializer.register_type("Mesh", _UIBaseSerializer.ContentType.emesh, _MeshSerializer())
| 30.054054
| 95
| 0.668165
|
228c9892273265b3ecf817a877d8c3edaa5e58d4
| 2,694
|
py
|
Python
|
ambramelin/util/credentials.py
|
Palisand/ambramelin
|
264da5c3592dc9287bdda3c1383a04420439d07b
|
[
"MIT"
] | null | null | null |
ambramelin/util/credentials.py
|
Palisand/ambramelin
|
264da5c3592dc9287bdda3c1383a04420439d07b
|
[
"MIT"
] | null | null | null |
ambramelin/util/credentials.py
|
Palisand/ambramelin
|
264da5c3592dc9287bdda3c1383a04420439d07b
|
[
"MIT"
] | null | null | null |
import subprocess
from abc import ABC, abstractmethod
from typing import Optional
from ambramelin.util.errors import AmbramelinError
class CredentialManagerError(AmbramelinError):
pass
class CredentialManager(ABC):
@abstractmethod
def get_password(self, account: str) -> Optional[str]:
pass
@abstractmethod
def set_password(self, account: str, password: str) -> None:
pass
@abstractmethod
def del_password(self, account: str) -> None:
pass
def password_exists(self, account: str) -> bool:
if self.get_password(account) is None:
return False
return True
class KeychainManager(CredentialManager):
def get_password(self, account: str) -> Optional[str]:
try:
res = subprocess.run(
[
"security",
"find-generic-password",
"-a",
account,
"-s",
"ambramelin",
"-w",
],
check=True,
capture_output=True,
)
except subprocess.CalledProcessError:
return None
else:
return res.stdout.decode().strip()
def set_password(self, account: str, password: str) -> None:
try:
subprocess.run(
[
"security",
"add-generic-password",
"-a",
account,
"-s",
"ambramelin",
"-w",
password,
],
check=True,
capture_output=True,
)
except subprocess.CalledProcessError as error:
raise CredentialManagerError("Failed to set password.") from error
print(f"Password for '{account}' added to keychain.")
def del_password(self, account: str) -> None:
try:
subprocess.run(
[
"security",
"delete-generic-password",
"-a",
account,
"-s",
"ambramelin",
],
check=True,
capture_output=True,
)
except subprocess.CalledProcessError as error:
raise CredentialManagerError("Failed to delete password.") from error
print(f"Password for '{account}' deleted from keychain.")
# TODO: add other managers
# https://docs.docker.com/engine/reference/commandline/login/#credentials-store
managers = {
"keychain": KeychainManager(),
}
| 27.212121
| 81
| 0.5
|
c57b1f7fca9e23fd0350fd7984fe1eb1e1a23a73
| 4,627
|
py
|
Python
|
data/Resources/Scripts/bleUartCommands.py
|
robbitay/ConstPort
|
d948ceb5f0e22504640578e3ef31e3823b29c1c3
|
[
"Unlicense"
] | null | null | null |
data/Resources/Scripts/bleUartCommands.py
|
robbitay/ConstPort
|
d948ceb5f0e22504640578e3ef31e3823b29c1c3
|
[
"Unlicense"
] | null | null | null |
data/Resources/Scripts/bleUartCommands.py
|
robbitay/ConstPort
|
d948ceb5f0e22504640578e3ef31e3823b29c1c3
|
[
"Unlicense"
] | null | null | null |
import sys, os, re
BleModCmd_GetBridgeInfo = 0x01
BleModCmd_GetStatus = 0x02
BleModCmd_Pair = 0x03
BleModCmd_Unpair = 0x04
BleModCmd_UpdateStart = 0x05
BleModCmd_FlashWriteRow = 0x06
BleModCmd_UpdateComplete = 0x07
BleModCmd_ButtonHeldDone = 0x08
BleModCmd_SetRadioSettings = 0x09
BleModCmd_GetRadioSettings = 0x0A
BleModCmd_SetQosConfig = 0x0B
BleModCmd_GetQosConfig = 0x0C
BleModCmd_RadioUpdateStart = 0x0D
BleModCmd_RadioFlashWriteRow = 0x0E
BleModCmd_RadioUpdateComplete = 0x0F
BleModCmd_BleConnected = 0x10
BleModCmd_BleDisconnected = 0x11
BleModCmd_Register = 0x12
BleModCmd_Reset = 0x13
BleModCmd_GetAllState = 0x14
BleModCmd_GetRegistrationInfo = 0x15
BleModCmd_GetVoltageLevels = 0x16
BleModCmd_Deploy = 0x17
BleModCmd_ForceNextPair = 0x18
BleModCmd_SetFailSafeOption = 0x19
BleModCmd_GetFailSafeOption = 0x1A
BleModCmd_GetOperatingValues = 0x1B
BleModCmd_GetResetCauses = 0x1C
BleModCmd_ClearResetCauses = 0x1D
BleModCmd_SetDebugModeEnabled = 0x1E
BleModCmd_GetDebugModeEnabled = 0x1F
BleModCmd_GetLastPacketTime = 0x20
BleModCmd_GetAllVersions = 0x21
BleModCmd_GetRadioUpdateStatus = 0x22
BleModCmd_GetHoppingTable = 0x23
BleModCmd_SendPacket = 0x24
BleModCmd_GetAppPicVersion = 0x25
BleModCmd_GetRadioPicVersion = 0x26
BleModCmd_SetCriticalBluetooth = 0x27
BleModCmd_SetWiegandLedMode = 0x28
BleModCmd_GetWiegandLedMode = 0x29
BleModCmd_DebugOutput = 0x2A
BleModCmd_BootloaderStart = 0x2B
ATTN_CHAR = 0x7E
CMD_HEADER_SIZE = 4
debugWriteHex = True
debugPrefix = str(chr(0x01))
infoPrefix = str(chr(0x02))
alertPrefix = str(chr(0x03))
importantPrefix = str(chr(0x04))
wroteNewLine = True
def WriteCharacter(c):
#
sys.stdout.write(c)
#
def WriteString(prefix, line):
#
global wroteNewLine
for c in line:
#
if (wroteNewLine):
#
for p in prefix:
#
WriteCharacter(p)
#
wroteNewLine = False
#
WriteCharacter(c)
if (c == '\n'): wroteNewLine = True
#
#
def DEBUG_Write(line):
#
WriteString(debugPrefix, line)
sys.stdout.flush()
#
def DEBUG_WriteLine(line):
#
WriteString(debugPrefix, line + "\n")
sys.stdout.flush()
#
def INFO_Write(line):
#
WriteString(infoPrefix, line)
sys.stdout.flush()
#
def INFO_WriteLine(line):
#
WriteString(infoPrefix, line + "\n")
sys.stdout.flush()
#
def ALERT_Write(line):
#
WriteString(alertPrefix, line)
sys.stdout.flush()
#
def ALERT_WriteLine(line):
#
WriteString(alertPrefix, line + "\n")
sys.stdout.flush()
#
def IMPORTANT_Write(line):
#
WriteString(importantPrefix, line)
sys.stdout.flush()
#
def IMPORTANT_WriteLine(line):
#
WriteString(importantPrefix, line + "\n")
sys.stdout.flush()
#
print("bleUartCommands.py started!")
sys.stdout.flush()
counter = 0
justWroteNewLine = True
dataBuffer = []
while(True):
#
newCharacters = sys.stdin.read(1)
# print("Char \'%s\'" % (newCharacter))
for newCharacter in newCharacters:
#
newCharacter = ord(newCharacter)
if (len(dataBuffer) == 0):
#
if (newCharacter == ATTN_CHAR):
#
dataBuffer.append(newCharacter)
#
else:
#
ALERT_WriteLine("Dropped 0x%02X" % (newCharacter))
#
#
else:
#
dataBuffer.append(newCharacter)
if (len(dataBuffer) >= CMD_HEADER_SIZE):
#
attn = dataBuffer[0]
cmd = dataBuffer[1]
length = dataBuffer[2] + (dataBuffer[3] >> 8)
payload = dataBuffer[4:]
if (len(dataBuffer) == CMD_HEADER_SIZE + length):
#
if (debugWriteHex):
#
DEBUG_Write("CMD %02X %u byte(s): { " % (cmd, length))
for bIndex in range(len(dataBuffer)):
#
b = dataBuffer[bIndex]
if (bIndex == 0): DEBUG_Write("ATTN ")
elif (bIndex == 1): DEBUG_Write("[%02X] " % b)
elif (bIndex == 2): DEBUG_Write("[%02X" % b)
elif (bIndex == 3): DEBUG_Write("%02X] " % b)
else: DEBUG_Write("%02X " % b)
#
DEBUG_WriteLine("}")
#
# if (cmd == BleModCmd_GetBridgeInfo):
# #
# INFO_WriteLine("GetBridgeInfo")
# #
# elif (cmd == BleModCmd_GetStatus):
# #
# INFO_WriteLine("GetStatus")
# #
# elif (cmd == BleModCmd_DebugOutput):
# #
# INFO_Write("b-")
# for p in payload[1:]:
# #
# INFO_Write("%c" % p)
# #
# INFO_WriteLine("")
# #
# else:
# #
# ALERT_WriteLine("Unknown %u byte CMD %02X!" % (length, cmd))
# #
dataBuffer = []
#
#
#
#
#
| 22.352657
| 68
| 0.646855
|
c5846495420841c3067e90128668f1ccdc4b1a3f
| 7,887
|
py
|
Python
|
maml/apps/symbolic/_sis.py
|
anooptp/maml
|
fdd95f3d60c9281d871d89b25b073e87b6ba4e52
|
[
"BSD-3-Clause"
] | 161
|
2020-01-26T08:24:41.000Z
|
2022-03-29T06:42:42.000Z
|
maml/apps/symbolic/_sis.py
|
anooptp/maml
|
fdd95f3d60c9281d871d89b25b073e87b6ba4e52
|
[
"BSD-3-Clause"
] | 195
|
2020-01-25T19:35:20.000Z
|
2022-03-28T13:14:30.000Z
|
maml/apps/symbolic/_sis.py
|
anooptp/maml
|
fdd95f3d60c9281d871d89b25b073e87b6ba4e52
|
[
"BSD-3-Clause"
] | 46
|
2020-03-30T12:56:39.000Z
|
2022-03-27T12:53:23.000Z
|
"""
Sure Independence Screening
https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf
"""
import logging
from itertools import combinations
from typing import Optional, Dict, List
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import get_scorer
from maml.apps.symbolic._selectors import BaseSelector, DantzigSelector
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def _get_coeff(x, y):
coeff, _, _, _ = np.linalg.lstsq(x, y, rcond=-1)
return coeff
def _eval(x, y, coeff, metric):
metric_func = get_scorer(metric)
lr = LinearRegression(fit_intercept=False)
lr.coef_ = coeff # type: ignore
lr.intercept_ = 0
return metric_func(lr, x, y)
def _best_combination(x, y, find_sel, find_sel_new, metric: str = "neg_mean_absolute_error"):
if len(find_sel_new) == 1:
comb_best = np.append(find_sel, find_sel_new)
coeff_best = _get_coeff(x[:, comb_best], y)
score_best = _eval(x[:, comb_best], y, coeff_best, metric)
return comb_best, coeff_best, score_best
combs = combinations(np.append(find_sel, find_sel_new), len(find_sel) + 1)
coeff_best = _get_coeff(x[:, find_sel], y)
score_best = _eval(x[:, find_sel], y, coeff_best, metric)
comb_best = find_sel
for ind_comb in combs:
d = x[:, ind_comb]
coeff = _get_coeff(d, y)
score = _eval(d, y, coeff, metric)
if score > score_best:
score_best = score
comb_best = ind_comb
coeff_best = coeff
return comb_best, coeff_best, score_best
class SIS:
"""
Sure independence screening method.
The method consists of two steps:
1. Screen
2. Select
"""
def __init__(self, gamma=0.1, selector: Optional[BaseSelector] = None, verbose: bool = True):
"""
Sure independence screening
Args:
gamma (float): ratio between selected features and original feature sizes
selector (BaseSelector): selector after the screening
verbose (bool): whether to output information along the way
"""
self.gamma = gamma
self.selector = selector
self.verbose = verbose
def run(self, x, y, select_options=None):
"""
Run the SIS with selector
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
select_options (dict): options in the optimizations provided
to scipy.optimize.minimize. If the selector is using cvxpy
optimization package, this option is fed into cp.Problem.solve
Returns: selected feature indices
"""
screened_indices = self.screen(x, y)
if self.verbose:
logger.info(f"After the screening step, {len(screened_indices)}/{x.shape[1]} features remains")
x_screen = x[:, screened_indices]
final_selected = self.select(x_screen, y, select_options)
if self.verbose:
logger.info(f"After the selection step, {len(final_selected)}/{x.shape[1]} features remains")
return screened_indices[final_selected]
def screen(self, x, y):
"""
Simple screening method by comparing the correlation between features
and the target
Args:
x (np.ndarray): input array
y (np.ndarray): target array
Returns: top indices
"""
n = x.shape[1]
omega = x.T.dot(y)
sorted_omega = np.argsort(omega)[::-1]
d = int(n * self.gamma)
top_indices = sorted_omega[:d]
return top_indices
def select(self, x, y, options=None):
"""
Select features using selectors
Args:
x (np.ndarray): input array
y (np.ndarray): target array
options (dict): options for the optimization
Returns:
"""
return self.selector.select(x, y, options)
def compute_residual(self, x, y):
"""
Compute residual
Args:
x (np.ndarray): input array
y (np.ndarray): target array
Returns: residual vector
"""
return self.selector.compute_residual(x, y)
def set_selector(self, selector: BaseSelector):
"""
Set new selector
Args:
selector (BaseSelector): a feature selector
Returns:
"""
self.selector = selector
def set_gamma(self, gamma):
"""
Set gamma
Args:
gamma(float): new gamma value
"""
self.gamma = gamma
def update_gamma(self, step: float = 0.5):
"""
Update the sis object so that sis.select
return at least one feature
Args:
step(float): ratio to update the parameters
"""
self.set_gamma(self.gamma * (1 + step))
class ISIS:
"""Iterative SIS"""
def __init__(self, sis: SIS = SIS(gamma=0.1, selector=DantzigSelector(0.1)), l0_regulate: bool = True):
"""
Args:
sis(SIS): sis object
l0_regulate(bool): Whether to regulate features in each iteration, default True
"""
self.sis = sis
self.selector = sis.selector
self.l0_regulate = l0_regulate
self.coeff = [] # type: ignore
self.find_sel = [] # type: ignore
def run(
self,
x,
y,
max_p: int = 10,
metric: str = "neg_mean_absolute_error",
options: Optional[Dict] = None,
step: float = 0.5,
):
"""
Run the ISIS
Args:
x:
y:
max_p(int): Number of feature desired
metric (str): scorer function, used with
sklearn.metrics.get_scorer
options:
step(float): step to update gamma with
Returns:
find_sel(np.array): np.array of index of selected features
coeff(np.array): np.array of coeff of selected features
"""
assert max_p <= x.shape[1]
findex = np.array(np.arange(0, x.shape[1]))
find_sel = self.sis.select(x, y, options)
self.coeff = _get_coeff(x[:, find_sel], y)
if len(find_sel) >= max_p:
self.coeff = _get_coeff(x[:, find_sel[:max_p]], y)
return find_sel[:max_p]
new_findex = np.array(list(set(findex) - set(find_sel)))
new_y = self.sis.compute_residual(x, y)
new_x = x[:, new_findex]
while len(find_sel) < max_p:
find_sel_new: List[int] = []
try:
find_sel_new = self.sis.run(new_x, new_y, options)
except ValueError:
while len(find_sel_new) == 0:
self.sis.update_gamma(step)
find_sel_new = self.sis.run(new_x, new_y)
if self.l0_regulate:
find_sel, _, _ = _best_combination(x, y, find_sel, new_findex[find_sel_new], metric)
else:
find_sel = np.append(find_sel, new_findex[find_sel_new])
new_findex = np.array(list(set(findex) - set(find_sel)))
new_y = self.sis.compute_residual(new_x, new_y)
new_x = x[:, new_findex]
self.coeff = _get_coeff(x[:, find_sel], y)
self.find_sel = find_sel
return find_sel
def evaluate(self, x: np.ndarray, y: np.ndarray, metric: str = "neg_mean_absolute_error") -> float:
"""
Evaluate the linear models using x, and y test data
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
metric (str): scorer function, used with
sklearn.metrics.get_scorer
Returns:
"""
return _eval(x[:, self.find_sel], y, self.coeff, metric)
| 30.218391
| 107
| 0.583745
|
193d26bafaa03ad1b0899292ab445312dac89d24
| 24,307
|
py
|
Python
|
deepvariant/realigner/realigner.py
|
blackwer/deepvariant
|
4a6f09ba69839ae211aab3c02d13ab9edd5620dd
|
[
"BSD-3-Clause"
] | null | null | null |
deepvariant/realigner/realigner.py
|
blackwer/deepvariant
|
4a6f09ba69839ae211aab3c02d13ab9edd5620dd
|
[
"BSD-3-Clause"
] | null | null | null |
deepvariant/realigner/realigner.py
|
blackwer/deepvariant
|
4a6f09ba69839ae211aab3c02d13ab9edd5620dd
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Correct read alignment by realigning the read to its most likely haplotype.
This is achieved by constructing de-Bruijn graphs in candidate regions with
potential variations, and determining the mostly likely X haplotypes (where X is
the ploidy).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import os.path
from absl import flags
import tensorflow as tf
from third_party.nucleus.io import sam
from third_party.nucleus.util import ranges
from third_party.nucleus.util import utils
from deepvariant.protos import realigner_pb2
from deepvariant.realigner import window_selector
from deepvariant.realigner.python import debruijn_graph
from deepvariant.realigner.python import fast_pass_aligner
from deepvariant.vendor import timer
from google.protobuf import text_format
_UNSET_WS_INT_FLAG = -1
flags.DEFINE_bool('ws_use_window_selector_model', False,
'Activate the use of window selector models.')
flags.DEFINE_string(
'ws_window_selector_model', None,
'Path to a text format proto of the window selector model to use.')
flags.DEFINE_integer(
'ws_min_num_supporting_reads', _UNSET_WS_INT_FLAG,
'Minimum number of supporting reads to call a reference position for local '
'assembly.')
flags.DEFINE_integer(
'ws_max_num_supporting_reads', _UNSET_WS_INT_FLAG,
'Maximum number of supporting reads to call a reference position for local '
'assembly.')
flags.DEFINE_integer(
'ws_min_mapq', 20,
'Minimum read alignment quality to consider in calling a reference '
'position for local assembly.')
flags.DEFINE_integer(
'ws_min_base_quality', 20,
'Minimum base quality to consider in calling a reference position for '
'local assembly.')
flags.DEFINE_integer(
'ws_min_windows_distance', 80,
'Minimum distance between candidate windows for local assembly.')
flags.DEFINE_integer(
'ws_max_window_size', 1000,
'Maximum window size to consider for local assembly. Large noisy regions '
'are skipped for realignment.')
flags.DEFINE_integer(
'ws_region_expansion_in_bp', 20,
'Number of bases to expand the region when calculating windows; larger '
'values add overhead but allow larger nearby events to contribute evidence '
'for assembling an region even if they are not contained by the region.')
flags.DEFINE_integer('dbg_min_k', 10, 'Initial k-mer size to build the graph.')
flags.DEFINE_integer(
'dbg_max_k', 101,
'Maximum k-mer size. Larger k-mer size is used to resolve graph cycles.')
flags.DEFINE_integer('dbg_step_k', 1,
'Increment size for k to try in resolving graph cycles.')
flags.DEFINE_integer(
'dbg_min_mapq', 14,
'Minimum read alignment quality to consider in building the graph.')
flags.DEFINE_integer(
'dbg_min_base_quality', 15,
'Minimum base quality in a k-mer sequence to consider in building the '
'graph.')
flags.DEFINE_integer('dbg_min_edge_weight', 2,
'Minimum number of supporting reads to keep an edge.')
flags.DEFINE_integer(
'dbg_max_num_paths', 256,
'Maximum number of paths within a graph to consider for realignment. '
'Set max_num_paths to 0 to have unlimited number of paths.')
flags.DEFINE_integer('aln_match', 4,
'Match score (expected to be a non-negative score).')
flags.DEFINE_integer('aln_mismatch', 6,
'Mismatch score (expected to be a non-negative score).')
flags.DEFINE_integer(
'aln_gap_open', 8, 'Gap open score (expected to be a non-negative score). '
'Score for a gap of length g is -(gap_open + (g - 1) * gap_extend).')
flags.DEFINE_integer(
'aln_gap_extend', 2,
'Gap extend score (expected to be a non-negative score). '
'Score for a gap of length g is -(gap_open + (g - 1) * gap_extend).')
flags.DEFINE_integer('aln_k', 23, 'k-mer size used to index target sequence.')
flags.DEFINE_float('aln_error_rate', .01, 'Estimated sequencing error rate.')
flags.DEFINE_string(
'realigner_diagnostics', '',
'Root directory where the realigner should place diagnostic output (such as'
' a dump of the DeBruijn graph, and a log of metrics reflecting the graph '
'and realignment to the haplotypes). If empty, no diagnostics are output.'
)
flags.DEFINE_bool(
'emit_realigned_reads', False,
'If True, we will emit realigned reads if our realigner_diagnostics are '
'also enabled.')
flags.DEFINE_bool(
'use_fast_pass_aligner', True,
'If True, fast_pass_aligner (improved performance) implementation is used ')
flags.DEFINE_integer(
'max_num_mismatches', 2,
'Num of maximum allowed mismatches for quick read to '
'haplotype alignment.')
flags.DEFINE_float(
'realignment_similarity_threshold', 0.16934,
'Similarity threshold used in realigner in Smith-Waterman'
'alignment.')
flags.DEFINE_integer('kmer_size', 32,
'K-mer size for fast pass alinger reads index.')
# Margin added to the reference sequence for the aligner module.
_REF_ALIGN_MARGIN = 20
_DEFAULT_MIN_SUPPORTING_READS = 2
_DEFAULT_MAX_SUPPORTING_READS = 300
_ALLELE_COUNT_LINEAR_MODEL_DEFAULT = realigner_pb2.WindowSelectorModel(
model_type=realigner_pb2.WindowSelectorModel.ALLELE_COUNT_LINEAR,
allele_count_linear_model=realigner_pb2.WindowSelectorModel
.AlleleCountLinearModel(
bias=-0.683379,
coeff_soft_clip=2.997000,
coeff_substitution=-0.086644,
coeff_insertion=2.493585,
coeff_deletion=1.795914,
coeff_reference=-0.059787,
decision_boundary=3))
# ---------------------------------------------------------------------------
# Set configuration settings.
# ---------------------------------------------------------------------------
def window_selector_config(flags_obj):
"""Creates a WindowSelectorOptions proto based on input and default settings.
Args:
flags_obj: configuration FLAGS.
Returns:
realigner_pb2.WindowSelector protobuf.
Raises:
ValueError: If either ws_{min,max}_supporting_reads are set and
ws_use_window_selector_model is True.
Or if ws_window_selector_model > ws_max_num_supporting_reads.
Or if ws_use_window_selector_model is False and
ws_window_selector_model is not None.
"""
if not flags_obj.ws_use_window_selector_model:
if flags_obj.ws_window_selector_model is not None:
raise ValueError('Cannot specify a ws_window_selector_model '
'if ws_use_window_selector_model is False.')
min_num_supporting_reads = (
_DEFAULT_MIN_SUPPORTING_READS
if flags_obj.ws_min_num_supporting_reads == _UNSET_WS_INT_FLAG else
flags_obj.ws_min_num_supporting_reads)
max_num_supporting_reads = (
_DEFAULT_MAX_SUPPORTING_READS
if flags_obj.ws_max_num_supporting_reads == _UNSET_WS_INT_FLAG else
flags_obj.ws_max_num_supporting_reads)
window_selector_model = realigner_pb2.WindowSelectorModel(
model_type=realigner_pb2.WindowSelectorModel.VARIANT_READS,
variant_reads_model=realigner_pb2.WindowSelectorModel
.VariantReadsThresholdModel(
min_num_supporting_reads=min_num_supporting_reads,
max_num_supporting_reads=max_num_supporting_reads))
else:
if flags_obj.ws_min_num_supporting_reads != _UNSET_WS_INT_FLAG:
raise ValueError('Cannot use both ws_min_num_supporting_reads and '
'ws_use_window_selector_model flags.')
if flags_obj.ws_max_num_supporting_reads != _UNSET_WS_INT_FLAG:
raise ValueError('Cannot use both ws_max_num_supporting_reads and '
'ws_use_window_selector_model flags.')
if flags_obj.ws_window_selector_model is None:
window_selector_model = _ALLELE_COUNT_LINEAR_MODEL_DEFAULT
else:
with tf.io.gfile.GFile(flags_obj.ws_window_selector_model) as f:
window_selector_model = text_format.Parse(
f.read(), realigner_pb2.WindowSelectorModel())
if (window_selector_model.model_type ==
realigner_pb2.WindowSelectorModel.VARIANT_READS):
model = window_selector_model.variant_reads_model
if model.max_num_supporting_reads < model.min_num_supporting_reads:
raise ValueError('ws_min_supporting_reads should be smaller than '
'ws_max_supporting_reads.')
ws_config = realigner_pb2.WindowSelectorOptions(
min_mapq=flags_obj.ws_min_mapq,
min_base_quality=flags_obj.ws_min_base_quality,
min_windows_distance=flags_obj.ws_min_windows_distance,
max_window_size=flags_obj.ws_max_window_size,
region_expansion_in_bp=flags_obj.ws_region_expansion_in_bp,
window_selector_model=window_selector_model)
return ws_config
def realigner_config(flags_obj):
"""Creates a RealignerOptions proto based on input and default settings.
Args:
flags_obj: configuration FLAGS.
Returns:
realigner_pb2.RealignerOptions protobuf.
Raises:
ValueError: If we observe invalid flag values.
"""
ws_config = window_selector_config(flags_obj)
dbg_config = realigner_pb2.DeBruijnGraphOptions(
min_k=flags_obj.dbg_min_k,
max_k=flags_obj.dbg_max_k,
step_k=flags_obj.dbg_step_k,
min_mapq=flags_obj.dbg_min_mapq,
min_base_quality=flags_obj.dbg_min_base_quality,
min_edge_weight=flags_obj.dbg_min_edge_weight,
max_num_paths=flags_obj.dbg_max_num_paths)
aln_config = realigner_pb2.AlignerOptions(
match=flags_obj.aln_match,
mismatch=flags_obj.aln_mismatch,
gap_open=flags_obj.aln_gap_open,
gap_extend=flags_obj.aln_gap_extend,
k=flags_obj.aln_k,
error_rate=flags_obj.aln_error_rate,
max_num_of_mismatches=flags_obj.max_num_mismatches,
realignment_similarity_threshold=flags_obj
.realignment_similarity_threshold,
kmer_size=flags_obj.kmer_size)
diagnostics = realigner_pb2.Diagnostics(
enabled=bool(flags_obj.realigner_diagnostics),
output_root=flags_obj.realigner_diagnostics,
emit_realigned_reads=flags_obj.emit_realigned_reads)
return realigner_pb2.RealignerOptions(
ws_config=ws_config,
dbg_config=dbg_config,
aln_config=aln_config,
diagnostics=diagnostics)
class DiagnosticLogger(object):
"""Writes diagnostic information about the assembler."""
def __init__(self,
config,
graph_filename='graph.dot',
metrics_filename='realigner_metrics.csv',
realigned_reads_filename='realigned_reads.bam'):
self.config = config
self.graph_filename = graph_filename
self.metrics_filename = metrics_filename
self.realigned_reads_filename = realigned_reads_filename
# Setup diagnostics outputs if requested.
if self.enabled:
self._csv_file = open(self._root_join(self.metrics_filename), 'w')
self._csv_writer = csv.writer(self._csv_file)
self._write_csv_line('window', 'k', 'n_haplotypes', 'time')
else:
self._csv_file = None
self._csv_writer = None
def close(self):
if self.enabled:
self._csv_file.close()
@property
def enabled(self):
return self.config and self.config.enabled
def _root_join(self, path, makedirs=True):
fullpath = os.path.join(self.config.output_root, path)
subdir = os.path.dirname(fullpath)
if makedirs and subdir:
tf.io.gfile.makedirs(subdir)
return fullpath
def _write_csv_line(self, *args):
assert self.enabled, 'only callable when diagnostics are on'
self._csv_writer.writerow(args)
def _file_for_region(self, region, basename):
"""Returns the path to a file in a region-specific subdirectory."""
assert self.enabled, 'only callable when diagnostics are on'
return self._root_join(os.path.join(ranges.to_literal(region), basename))
def log_realigned_reads(self, region, reads, shared_header=None):
"""Logs, if enabled, the realigned reads for region."""
if self.enabled and self.config.emit_realigned_reads and shared_header is not None:
path = self._file_for_region(region, self.realigned_reads_filename)
with sam.SamWriter(path, header=shared_header) as writer:
for read in reads:
writer.write(read)
def log_graph_metrics(self, region, graph, candidate_haplotypes,
graph_building_time):
"""Logs, if enabled, graph construction information for region."""
if self.enabled:
if graph:
dest_file = self._file_for_region(region, self.graph_filename)
with tf.io.gfile.GFile(dest_file, 'w') as f:
f.write(graph.graphviz())
self._write_csv_line(
ranges.to_literal(region), graph.kmer_size if graph else 'NA',
len(candidate_haplotypes), graph_building_time)
class AssemblyRegion(object):
"""A region to assemble, holding the region Range and the reads.
It is not safe to directly modify any of the attributes here. Use the accessor
functions to add a read to the reads.
Attributes:
candidate_haplotypes: realigner.CandidateHaplotypes for this region.
reads: list[reads_pb2.Read]. Reads for this region.
region: range_pb2.Range. This is the span of the assembled region on the
genome.
read_span: range_pb2.Range. This is the span of reads added to this region.
The read_span in general is expected to be wider than the region itself,
since we often include all reads that overlap the region at all. It is
possible that read_span will be smaller than region, which can happen, for
example, when we only have reads starts in the middle of the region.
Here's a picture of when this can happen:
ref : acgtACGTACgtgt
region : ------
read1 : GGa
read_span: ---
"""
def __init__(self, candidate_haplotypes):
self.candidate_haplotypes = candidate_haplotypes
self.reads = []
self._read_span = None
def __str__(self):
return ('AssemblyRegion(region={}, span={}) with {} haplotypes and {} '
'reads').format(
ranges.to_literal(self.region),
ranges.to_literal(self.read_span), len(self.haplotypes),
len(self.reads))
@property
def haplotypes(self):
"""Returns the haplotypes list[str] of our candidate_haplotypes."""
return self.candidate_haplotypes.haplotypes
@property
def region(self):
return self.candidate_haplotypes.span
@property
def read_span(self):
if self._read_span is None and self.reads:
spans = [utils.read_range(r) for r in self.reads]
self._read_span = ranges.make_range(spans[0].reference_name,
min(s.start for s in spans),
max(s.end for s in spans))
return self._read_span
def add_read(self, read):
self.reads.append(read)
self._read_span = None # Adding a read invalidates our _read_span cache.
def assign_reads_to_assembled_regions(assembled_regions, reads):
"""Assign each read to the maximally overlapped window.
Args:
assembled_regions: list[AssemblyRegion], list of AssemblyRegion to assign
reads to. Does not assume AssemblyRegion are sorted.
reads: iterable[learning.genomics.genomics.Read], to be processed. Does not
assume the reads are sorted.
Returns:
[AssemblyRegion], information on assigned reads for each assembled region.
list[learning.genomics.genomics.Read], the list of unassigned reads.
"""
regions = [ar.region for ar in assembled_regions]
unassigned_reads = []
for read in reads:
read_range = utils.read_range(read)
window_i = ranges.find_max_overlapping(read_range, regions)
if window_i is not None:
assembled_regions[window_i].add_read(read)
else:
unassigned_reads.append(read)
return unassigned_reads
class Realigner(object):
"""Realign reads in regions to assembled haplotypes.
This class helps us to realign reads in regions by:
(1) Create smaller windows in which to operate over the region. These windows
are created by finding evidence of genetic variation surrounded by stretches
of reference-matching seqence.
(2) Build a de-Bruijn assembly graph of the window. Edges are pruned if they
don't meet the required weight threshold. Every remaining haplotype is listed
by traversing the graph.
(3) Realign reads using a Smith-Waterman algorithm to the best candidate
haplotype and then realign that haplotype to the reference sequence to modify
the read's alignment.
"""
def __init__(self, config, ref_reader, shared_header=None):
"""Creates a new Realigner.
Args:
config: realigner_pb2.RealignerOptions protobuf.
ref_reader: GenomeReferenceFai, indexed reference genome to query bases.
shared_header: header info from the input bam file
"""
self.config = config
self.ref_reader = ref_reader
self.diagnostic_logger = DiagnosticLogger(self.config.diagnostics)
self.shared_header = shared_header
def call_debruijn_graph(self, windows, reads):
"""Helper function to call debruijn_graph module."""
windows_haplotypes = []
# Build and process de-Bruijn graph for each window.
sam_reader = sam.InMemorySamReader(reads)
for window in windows:
if window.end - window.start > self.config.ws_config.max_window_size:
continue
if not self.ref_reader.is_valid(window):
continue
ref = self.ref_reader.query(window)
window_reads = list(sam_reader.query(window))
with timer.Timer() as t:
graph = debruijn_graph.build(ref, window_reads, self.config.dbg_config)
graph_building_time = t.GetDuration()
if not graph:
candidate_haplotypes = [ref]
else:
candidate_haplotypes = graph.candidate_haplotypes()
if candidate_haplotypes and candidate_haplotypes != [ref]:
candidate_haplotypes_info = realigner_pb2.CandidateHaplotypes(
span=window, haplotypes=candidate_haplotypes)
windows_haplotypes.append(candidate_haplotypes_info)
self.diagnostic_logger.log_graph_metrics(window, graph,
candidate_haplotypes,
graph_building_time)
return windows_haplotypes
def call_fast_pass_aligner(self, assembled_region):
"""Helper function to call fast pass aligner module."""
if not assembled_region.reads:
return []
contig = assembled_region.region.reference_name
ref_start = max(
0,
min(assembled_region.read_span.start, assembled_region.region.start) -
_REF_ALIGN_MARGIN)
ref_end = min(
self.ref_reader.contig(contig).n_bases,
max(assembled_region.read_span.end, assembled_region.region.end) +
_REF_ALIGN_MARGIN)
ref_prefix = self.ref_reader.query(
ranges.make_range(contig, ref_start, assembled_region.region.start))
ref = self.ref_reader.query(assembled_region.region)
# If we can't create the ref suffix then return the original alignments.
if ref_end <= assembled_region.region.end:
return assembled_region.reads
else:
ref_suffix = self.ref_reader.query(
ranges.make_range(contig, assembled_region.region.end, ref_end))
ref_seq = ref_prefix + ref + ref_suffix
fast_pass_realigner = fast_pass_aligner.FastPassAligner()
# Read sizes may vary. We need this for realigner initialization and sanity
# checks.
self.config.aln_config.read_size = len(
assembled_region.reads[0].aligned_sequence)
fast_pass_realigner.set_options(self.config.aln_config)
fast_pass_realigner.set_reference(ref_seq)
fast_pass_realigner.set_ref_start(contig, ref_start)
fast_pass_realigner.set_ref_prefix_len(len(ref_prefix))
fast_pass_realigner.set_ref_suffix_len(len(ref_suffix))
fast_pass_realigner.set_haplotypes([
ref_prefix + target + ref_suffix
for target in assembled_region.haplotypes
])
return fast_pass_realigner.realign_reads(assembled_region.reads)
def realign_reads(self, reads, region):
"""Run realigner.
This is the main function that
- parses the input reads and reference sequence.
- select candidate windows for local assembly (WindowSelector (ws)
module).
- Windows larger than max_window_size are skipped.
- build pruned De-Bruijn graph for each candidate window (DeBruijnGraph
(dbg) module).
- Graphs with more than max_num_paths candidate haplotypes or
with reference sequence as the only candidate are skipped.
- Align reads based on candidate haplotypes (Aligner (aln) module).
- Output all input reads (whether they required realignment or not).
Args:
reads: [`third_party.nucleus.protos.Read` protos]. The list of input reads
to realign.
region: A `third_party.nucleus.protos.Range` proto. Specifies the region
on the genome we should process.
Returns:
[realigner_pb2.CandidateHaplotypes]. Information on the list of candidate
haplotypes.
[`third_party.nucleus.protos.Read` protos]. The realigned
reads for the region. NOTE THESE READS MAY NO LONGER BE IN THE SAME
ORDER AS BEFORE.
"""
# Compute the windows where we need to assemble in the region.
candidate_windows = window_selector.select_windows(self.config.ws_config,
self.ref_reader, reads,
region)
# Assemble each of those regions.
candidate_haplotypes = self.call_debruijn_graph(candidate_windows, reads)
# Create our simple container to store candidate / read mappings.
assembled_regions = [AssemblyRegion(ch) for ch in candidate_haplotypes]
# Our realigned_reads start off with all of the unassigned reads.
realigned_reads = assign_reads_to_assembled_regions(assembled_regions,
reads)
# Walk over each region and align the reads in that region, adding them to
# our realigned_reads.
for assembled_region in assembled_regions:
if flags.FLAGS.use_fast_pass_aligner:
realigned_reads_copy = self.call_fast_pass_aligner(assembled_region)
else:
raise ValueError('--use_fast_pass_aligner is always true. '
'The older implementation is deprecated and removed.')
realigned_reads.extend(realigned_reads_copy)
self.diagnostic_logger.log_realigned_reads(region, realigned_reads,
self.shared_header)
return candidate_haplotypes, realigned_reads
| 40.511667
| 87
| 0.719957
|
edfb0f7195b7eb33c9f3539f6cf4cea688aaac10
| 279
|
py
|
Python
|
LeetCode/Python/1748. Sum of Unique Elements.py
|
rayvantsahni/Competitive-Programming-Codes
|
39ba91b69ad8ce7dce554f7817c2f0d5545ef471
|
[
"MIT"
] | 1
|
2021-07-05T14:01:36.000Z
|
2021-07-05T14:01:36.000Z
|
LeetCode/Python/1748. Sum of Unique Elements.py
|
rayvantsahni/Competitive-Programming-and-Interview-Prep
|
39ba91b69ad8ce7dce554f7817c2f0d5545ef471
|
[
"MIT"
] | null | null | null |
LeetCode/Python/1748. Sum of Unique Elements.py
|
rayvantsahni/Competitive-Programming-and-Interview-Prep
|
39ba91b69ad8ce7dce554f7817c2f0d5545ef471
|
[
"MIT"
] | null | null | null |
class Solution:
def sumOfUnique(self, nums: List[int]) -> int:
from collections import Counter
c = Counter(nums)
_sum = 0
for key in c:
if c.get(key) == 1:
_sum += key
return _sum
| 21.461538
| 50
| 0.451613
|
27b72b67cdb490c3a40ff3b1c192020f3aa8cee3
| 4,908
|
py
|
Python
|
DPGAnalysis/SiStripTools/test/seedmultiplicity_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 1
|
2020-10-08T06:48:26.000Z
|
2020-10-08T06:48:26.000Z
|
DPGAnalysis/SiStripTools/test/seedmultiplicity_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | null | null | null |
DPGAnalysis/SiStripTools/test/seedmultiplicity_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("SeedMultiplicity")
#prepare options
options = VarParsing.VarParsing("analysis")
options.register ('globalTag',
"DONOTEXIST",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"GlobalTag")
#options.globalTag = "DONOTEXIST::All"
options.parseArguments()
#
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
fileMode = cms.untracked.string("FULLMERGE")
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cout.placeholder = cms.untracked.bool(False)
process.MessageLogger.cout.threshold = cms.untracked.string("WARNING")
process.MessageLogger.cout.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cout.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000)
)
process.MessageLogger.cerr.placeholder = cms.untracked.bool(False)
process.MessageLogger.cerr.threshold = cms.untracked.string("WARNING")
process.MessageLogger.cerr.default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(100000)
)
#----Remove too verbose PrimaryVertexProducer
process.MessageLogger.suppressInfo.append("pixelVerticesAdaptive")
process.MessageLogger.suppressInfo.append("pixelVerticesAdaptiveNoBS")
#----Remove too verbose BeamSpotOnlineProducer
process.MessageLogger.suppressInfo.append("testBeamSpot")
process.MessageLogger.suppressInfo.append("onlineBeamSpot")
process.MessageLogger.suppressWarning.append("testBeamSpot")
process.MessageLogger.suppressWarning.append("onlineBeamSpot")
#----Remove too verbose TrackRefitter
process.MessageLogger.suppressInfo.append("newTracksFromV0")
process.MessageLogger.suppressInfo.append("newTracksFromOtobV0")
#------------------------------------------------------------------
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
# skipBadFiles = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
from Configuration.GlobalRuns.reco_TLR_41X import customisePPData
process=customisePPData(process)
process.load("DPGAnalysis.SiStripTools.sipixelclustermultiplicityprod_cfi")
process.load("DPGAnalysis.SiStripTools.sistripclustermultiplicityprod_cfi")
process.seqMultProd = cms.Sequence(process.spclustermultprod+process.ssclustermultprod)
process.load("DPGAnalysis.SiStripTools.multiplicitycorr_cfi")
process.multiplicitycorr.correlationConfigurations = cms.VPSet(
cms.PSet(xMultiplicityMap = cms.InputTag("ssclustermultprod"),
xDetSelection = cms.uint32(0), xDetLabel = cms.string("TK"), xBins = cms.uint32(1000), xMax=cms.double(50000),
yMultiplicityMap = cms.InputTag("spclustermultprod"),
yDetSelection = cms.uint32(0), yDetLabel = cms.string("Pixel"), yBins = cms.uint32(1000), yMax=cms.double(20000),
rBins = cms.uint32(200), scaleFactor =cms.untracked.double(5.))
)
process.load("DPGAnalysis.SiStripTools.seedmultiplicitymonitor_cfi")
process.seedmultiplicitymonitor.multiplicityCorrelations = cms.VPSet(
cms.PSet(multiplicityMap = cms.InputTag("ssclustermultprod"),
detSelection = cms.uint32(0), detLabel = cms.string("TK"), nBins = cms.uint32(1000), nBinsEta = cms.uint32(100), maxValue=cms.double(100000)
),
cms.PSet(multiplicityMap = cms.InputTag("spclustermultprod"),
detSelection = cms.uint32(0), detLabel = cms.string("Pixel"), nBins = cms.uint32(1000), nBinsEta = cms.uint32(100), maxValue=cms.double(20000)
)
)
process.p0 = cms.Path(process.siPixelRecHits + process.ckftracks + process.seqMultProd + process.multiplicitycorr + process.seedmultiplicitymonitor )
#----GlobalTag ------------------------
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
process.TFileService = cms.Service('TFileService',
fileName = cms.string('seedmultiplicity.root')
)
#print process.dumpPython()
| 41.243697
| 156
| 0.726773
|
98ef88a4b5b9e8bc8f23fb160413c50b4837e709
| 9,485
|
py
|
Python
|
midonet/neutron/tests/unit/test_extension_tunnelzone.py
|
midokura/python-neutron-plugin-midonet
|
a123b0f769c4a0d218bcd6764383ab6c5c9351df
|
[
"Apache-2.0"
] | null | null | null |
midonet/neutron/tests/unit/test_extension_tunnelzone.py
|
midokura/python-neutron-plugin-midonet
|
a123b0f769c4a0d218bcd6764383ab6c5c9351df
|
[
"Apache-2.0"
] | null | null | null |
midonet/neutron/tests/unit/test_extension_tunnelzone.py
|
midokura/python-neutron-plugin-midonet
|
a123b0f769c4a0d218bcd6764383ab6c5c9351df
|
[
"Apache-2.0"
] | 1
|
2015-01-14T16:55:34.000Z
|
2015-01-14T16:55:34.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from webob import exc
from neutron.openstack.common import uuidutils
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_api_v2_extension
from midonet.neutron.extensions import tunnelzone
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class TunnelzoneTestCase(test_api_v2_extension.ExtensionTestCase):
"""Test the endpoints for the tunnel zones and tunnel zone hosts."""
fmt = 'json'
def setUp(self):
super(TunnelzoneTestCase, self).setUp()
plural_mappings = {'tunnelzone': 'tunnelzones',
'tunnelzonehost': 'tunnelzonehosts'}
self._setUpExtension(
'midonet.neutron.plugin.MidonetPluginV2',
tunnelzone.TUNNELZONE, tunnelzone.RESOURCE_ATTRIBUTE_MAP,
tunnelzone.Tunnelzone, '', plural_mappings=plural_mappings)
def test_get_tunnelzones(self):
return_value = [{'id': _uuid(),
'name': 'example_name',
'type': 'GRE',
'tenant_id': _uuid()}]
instance = self.plugin.return_value
instance.get_tunnelzones.return_value = return_value
res = self.api.get(_get_path('tunnelzones', fmt=self.fmt))
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.get_tunnelzones.assert_called_once_with(
mock.ANY, fields=mock.ANY, filters=mock.ANY)
res = self.deserialize(res)
self.assertIn('tunnelzones', res)
def test_get_tunnelzone(self):
tz_id = _uuid()
return_value = {'id': tz_id,
'name': 'example_name',
'type': 'GRE',
'tenant_id': _uuid()}
instance = self.plugin.return_value
instance.get_tunnelzone.return_value = return_value
res = self.api.get(_get_path('tunnelzones/%s' % tz_id, fmt=self.fmt))
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.get_tunnelzone.assert_called_once_with(
mock.ANY, str(tz_id), fields=mock.ANY)
res = self.deserialize(res)
self.assertIn('tunnelzone', res)
def test_create_tunnelzone(self):
tz_id = _uuid()
data = {'tunnelzone': {'name': 'example_name',
'type': 'GRE',
'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.create_tunnelzone.return_value = {}
res = self.api.post(_get_path('tunnelzones', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
instance.create_tunnelzone.assert_called_once_with(
mock.ANY, tunnelzone=data)
return_value = copy.deepcopy(data['tunnelzone'])
return_value['id'] = tz_id
instance.get_tunnelzone.return_value = return_value
res = self.api.get(_get_path('tunnelzones/%s' % tz_id, fmt=self.fmt))
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.get_tunnelzone.assert_called_once_with(
mock.ANY, str(tz_id), fields=mock.ANY)
def test_update_tunnelzone(self):
tz_id = _uuid()
data = {'tunnelzone': {'name': 'example_name',
'type': 'GRE'}}
return_value = copy.deepcopy(data['tunnelzone'])
return_value['id'] = tz_id
instance = self.plugin.return_value
instance.update_tunnelzone.return_value = {}
res = self.api.put(_get_path('tunnelzones/%s' % tz_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.update_tunnelzone.assert_called_once_with(
mock.ANY, str(tz_id), tunnelzone=data)
def test_delete_tunnelzone(self):
tz_id = _uuid()
instance = self.plugin.return_value
instance.delete_tunnelzone.return_value = {}
res = self.api.delete(_get_path('tunnelzones/%s' % tz_id))
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
instance.delete_tunnelzone.assert_called_once_with(
mock.ANY, str(tz_id))
# Tunnelzone Host
def test_get_tunnlzonehosts(self):
tz_id = _uuid()
return_value = [{'id': _uuid(),
'host_id': _uuid(),
'ip_address': '10.0.1.1',
'tenant_id': _uuid()}]
instance = self.plugin.return_value
instance.get_tunnelzone_tunnelzonehosts.return_value = return_value
res = self.api.get(_get_path(
'tunnelzones/%s/tunnelzonehosts' % tz_id, fmt=self.fmt))
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.get_tunnelzone_tunnelzonehosts.assert_called_once_with(
mock.ANY, filters=mock.ANY, fields=mock.ANY,
tunnelzone_id=str(tz_id))
res = self.deserialize(res)
self.assertIn('tunnelzonehosts', res)
def test_get_tunnlzonehost(self):
tz_id = _uuid()
tz_host_id = _uuid()
return_value = {'id': _uuid(),
'host_id': _uuid(),
'ip_address': '10.0.1.1',
'tenant_id': _uuid()}
instance = self.plugin.return_value
instance.get_tunnelzone_tunnelzonehost.return_value = return_value
res = self.api.get(_get_path(
'tunnelzones/%s/tunnelzonehosts/%s' % (tz_id, tz_host_id),
fmt=self.fmt))
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.get_tunnelzone_tunnelzonehost.assert_called_once_with(
mock.ANY, str(tz_host_id), fields=mock.ANY,
tunnelzone_id=str(tz_id))
res = self.deserialize(res)
self.assertIn('tunnelzonehost', res)
def test_create_tunnlzonehost(self):
tz_id = _uuid()
tz_host_id = _uuid()
data = {'tunnelzonehost': {'host_id': _uuid(),
'ip_address': '10.0.1.1',
'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.create_tunnelzone_tunnelzonehost.return_value = {}
res = self.api.post(_get_path(
'tunnelzones/%s/tunnelzonehosts' % tz_id, fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
instance.create_tunnelzone_tunnelzonehost.assert_called_once_with(
mock.ANY, tunnelzone_id=str(tz_id), tunnelzonehost=data)
return_value = copy.deepcopy(data['tunnelzonehost'])
return_value['id'] = tz_host_id
instance.get_tunnelzone_tunnelzonehost.return_value = return_value
res = self.api.get(_get_path(
'tunnelzones/%s/tunnelzonehosts/%s' % (tz_id, tz_host_id),
fmt=self.fmt))
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.get_tunnelzone_tunnelzonehost.assert_called_once_with(
mock.ANY, str(tz_host_id), tunnelzone_id=str(tz_id),
fields=mock.ANY)
def test_update_tunnelzonehost(self):
tz_id = _uuid()
tz_host_id = _uuid()
data = {'tunnelzonehost': {'host_id': _uuid(),
'ip_address': '10.0.1.1'}}
return_value = copy.deepcopy(data['tunnelzonehost'])
return_value['id'] = tz_host_id
instance = self.plugin.return_value
instance.update_tunnelzone_tunnelzonehost.return_value = {}
tz_host_uri = _get_path(
'tunnelzones/%s/tunnelzonehosts/%s' % (tz_id, tz_host_id),
fmt=self.fmt)
res = self.api.put(tz_host_uri, self.serialize(data),
content_type='application/%s' % self.fmt)
self.assertEqual(exc.HTTPOk.code, res.status_int)
instance.update_tunnelzone_tunnelzonehost.assert_called_once_with(
mock.ANY, str(tz_host_id), tunnelzone_id=str(tz_id),
tunnelzonehost=data)
def test_delete_tunnelzonehost(self):
tz_id = _uuid()
tz_host_id = _uuid()
instance = self.plugin.return_value
instance.delete_tunnelzone_tunnelzonehost.return_value = {}
res = self.api.delete(_get_path(
'tunnelzones/%s/tunnelzonehosts/%s' % (tz_id, tz_host_id)))
self.assertEqual(exc.HTTPNoContent.code, res.status_int)
instance.delete_tunnelzone_tunnelzonehost.assert_called_once_with(
mock.ANY, str(tz_host_id), tunnelzone_id=str(tz_id))
class TunnelzoneTestCaseXml(TunnelzoneTestCase):
fmt = 'xml'
| 41.600877
| 78
| 0.62699
|
2741c198193060243fd6fb6dbd210d16ebbbe347
| 518
|
py
|
Python
|
filetransfers/backends/xsendfile.py
|
khyer/django-filetransfers
|
bb18c6d454f61acbb79727c2dfc566fc9e6bf1c4
|
[
"BSD-3-Clause"
] | null | null | null |
filetransfers/backends/xsendfile.py
|
khyer/django-filetransfers
|
bb18c6d454f61acbb79727c2dfc566fc9e6bf1c4
|
[
"BSD-3-Clause"
] | null | null | null |
filetransfers/backends/xsendfile.py
|
khyer/django-filetransfers
|
bb18c6d454f61acbb79727c2dfc566fc9e6bf1c4
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import HttpResponse
from django.utils.encoding import smart_str
def serve_file(request, file, save_as, content_type, **kwargs):
"""Lets the web server serve the file using the X-Sendfile extension"""
response = HttpResponse(content_type=content_type)
response['X-Sendfile'] = file.path
if save_as:
response['Content-Disposition'] = smart_str('attachment; filename=%s' % save_as)
if file.size is not None:
response['Content-Length'] = file.size
return response
| 39.846154
| 88
| 0.722008
|
984e93a0baf8f1e13c27b91faf013d2b3ae82448
| 11,866
|
py
|
Python
|
simpy_events/event.py
|
loicpw/simpy-events
|
70160bb433a192d267d5c5fb093129c4ffe938d5
|
[
"MIT"
] | 1
|
2020-02-19T07:50:00.000Z
|
2020-02-19T07:50:00.000Z
|
simpy_events/event.py
|
loicpw/simpy-events
|
70160bb433a192d267d5c5fb093129c4ffe938d5
|
[
"MIT"
] | null | null | null |
simpy_events/event.py
|
loicpw/simpy-events
|
70160bb433a192d267d5c5fb093129c4ffe938d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
from itertools import chain
class Context:
""" context object forwarded to event handlers by `EventDispatcher`
contains following attributes:
+ `event`, the `Event` instance
+ `hook`, the name of the hook
"""
def __init__(self, **attributes):
""" initializes a new `Context` with keyword arguments
creates an attribute for each provided keyword arg.
"""
self.__dict__.update(attributes)
class EventDispatcher:
""" Responsible for dispatching an event to `Event`'s handlers
uses the `Event`'s sequence of `topics` to get all handlers for
a given `hook` and call them sequentially.
"""
def dispatch(self, event, hook, data):
""" dispatch the event to each topic in `Event.topics`.
args:
+ `event`, the `Event` instance
+ `hook`, the name of the hook to dispatch
+ `data`, data associated to the event
.. seealso:: `Event.dispatch`
Each `topic` is expected to be a mapping containing
a sequence of handlers for a given `hook`. The `topic`
will be ignored if it doesn't contain the `hook` key.
For each sequence of handlers found for `hook`, a `tuple` is
created to ensure consistency while iterating (it's likely
handlers are removed / added while dispatching).
Handlers are then called sequentially with the following
arguments:
+ `context`, a `Context` object
+ `data`
"""
context = Context(
event=event,
hook=hook,
)
for topic in [tuple(topic.get(hook, ())) for topic in event.topics]:
for hdlr in topic:
hdlr(context, data)
class Callbacks(collections.MutableSequence):
""" Replace the 'callbacks' list in `simpy.events.Event` objects.
Internally used to replace the single list of callbacks in
`simpy.events.Event` objects.
.. seealso:: `Event`
It allows to add the `Event`'s hooks before, when
and after the `simpy.events.Event` object is processed by
`simpy` (that is when the items from its "callbacks" list are
called).
`Callbacks` is intended to replace the original `callbacks` list
of the `simpy.events.Event` object When iterated, it chains the
functions attached to `before`, `callbacks` and `after`.
In order to behave as expected by `simpy`, adding or removing
items from a `Callbacks` object works as expected by `simpy`:
`Callbacks` is a `collections.MutableSequence` and callables
added or removed from it will be called by `simpy` as regular
callbacks, i.e *f(event)* where *event* is a
`simpy.events.Event` object.
When used to replace the `simpy.events.Event`'s callbacks
attribute, it ensures the correct order is maintained if the
original `simpy.events.Event`'s callbacks attribute was itself a
`Callbacks` object, example: ::
cross_red_light = Event(name='cross red light')
get_caught = Event(name='caught on camera')
evt = cross_red_light(env.timeout(1))
yield get_caught(evt)
In this example, the call order will be as follows ::
- cross_red_light's before
- get_caught's before
- cross_red_light's callbacks
- get_caught's callbacks
- cross_red_light's after
- get_caught's after
"""
def __init__(self, event, before, callbacks, after):
""" Attach the `Callbacks` obj to a `simpy.events.Event` obj.
`event` is the `simpy.events.Event` object whose `callbacks`
attribute is going to be replaced by this `Callbacks` object.
`before`, `callbacks` and `after` are callables which will
be called respectively before, when and after the `event` is
actually processed by `simpy`.
.. note:: the current `event.callbacks` attribute may
already be a `Callbacks` object, see `Callbacks`
description for details.
"""
if isinstance(event.callbacks, Callbacks):
cbks = event.callbacks
self.callbacks = cbks.callbacks
self.before = cbks.before
self.after = cbks.after
else:
self.callbacks = event.callbacks
self.before = []
self.after = []
self.before.append(before)
self.after.append(after)
self.callbacks.append(callbacks)
def __getitem__(self, index):
""" return callable item from 'callbacks' list """
return self.callbacks[index]
def __setitem__(self, index, value):
""" set callable item in 'callbacks' list """
self.callbacks[index] = value
def __delitem__(self, index):
""" del callable item from 'callbacks' list """
del self.callbacks[index]
def __len__(self):
""" return number of callable items in 'callbacks' list """
return len(self.callbacks)
def insert(self, index, value):
""" insert callable item in 'callbacks' list """
self.callbacks.insert(index, value)
def __iter__(self):
""" return an iterator chaining the lists of callbacks:
- 'before'
- 'callbacks'
- 'after'
"""
return iter(chain(self.before, self.callbacks, self.after))
class Event:
""" `Event` provides a node to access the event system.
an `Event` is an endpoint that allows to dispatch a `hook` to a
set of handlers. A `hook` identifies a particular state for the
`Event`, note `Event` is intended to be used to *wrapp*
`simpy.events.Event` objects.
+ **enable**: triggered when `Event.enabled` is set to `True`
+ **disable**: triggered when `Event.enabled` is set to to `False`
+ **before**: just before the `simpy.events.Event` is processed
by `simpy`
+ **callbacks**: when the `simpy.events.Event` is processed by
`simpy` (i.e when callbacks are called)
+ **after**: just after the `simpy.events.Event` is processed
by `simpy`
`Event` provides two options to dispatch an event through the
event system:
+ immediately dispatch a `hook` with `Event.dispatch`: although
this method is used internally it may be used to dispatch any
arbitrary `hook` immediately.
+ call the `Event` providing a `simpy.events.Event` object, so
the 'before', 'callbacks' and 'after' hooks will be dispatched
automatically when the event is processed by the `simpy` loop.
.. seealso:: `Event.__call__`
`Event` is initialized with optional `metadata` attributes,
provided as keyword args, which will be kept alltogather in
`Event.metadata` attribute.
**handlers**:
Handlers are attached to an `Event` using the `Event.topics`
list, which is expected to contain a sequence of mappings, each
mapping holding itself a sequence of callable handlers for a
given `hook`, for ex ::
evt = Event()
topic1 = {
'before': [h1, h2, h3],
'after': [h4, h5],
}
evt.topics.append(topic1)
.. note:: a topic is not expected to contain all the possible
hook keys, it will be ignored if the hook is not found.
**events dispatching**:
`Event.dispatcher` holds a dispatcher object (such as
`EventDispatcher`) that is called by the `Event` when
dispatching a hook.
Note setting `Event.dispatcher` to `None` will prevent anything
from being dispatched for the `Event` instance.
.. seealso:: `Event.dispatch`
`Event.enabled` offers a switch to enable / disable dispatching.
It also allows to notify handlers when the `Event` is enabled or
disabled, for instance when adding / removing an `Event` in the
simulation.
"""
def __init__(self, **metadata):
""" Initialized a new `Event` object with optional `metadata`
`metadata` keyword args are kept in `Event.metadata`.
"""
self.metadata = metadata
self.topics = []
self.dispatcher = None
self._enabled = False
@property
def enabled(self):
""" enable / disable dispatching for the `Event`.
when the value of `Event.enabled` is changed the following
hooks are dispatched:
+ **enable** is dispatched just after the value is changed
+ **disable** is dispatched just before the value is changed
.. seealso:: `Event.dispatch`
"""
return self._enabled
@enabled.setter
def enabled(self, value):
if value != self._enabled:
if value:
self._enabled = value
self.dispatch('enable')
else:
self.dispatch('disable')
self._enabled = value
def __call__(self, event):
""" Automatically trigger the `Event` when `event` is processed.
The `Event` will be attached to the provided
`simpy.events.Event` object via its callbacks, and the
following hooks will be dispatched when `event` is processed
by `simpy` (i.e when its callbacks are called) :
+ **before**: just before `event` is processed
+ **callbacks**: when `event` is processed
+ **after**: just after `event` is processed
Replaces the `simpy.events.Event` callbacks attribute by a
`Callbacks` instance so the hooks subscribed to this `Event`
will be called when the `simpy.events.Event` is processed
by `simpy`.
When the `simpy.events.Event` is processed, then calls
`Event.dispatch` respectively for 'before', 'callbacks' and
'after' hooks.
return the `simpy.events.Event` object.
example usage in a typical `simpy` process ::
something_happens = Event(name='important', context='test')
def my_process(env):
[...]
yield something_happens(env.timeout(1))
"""
# the partial function is intended to be called by simpy when
# the event is processed (i.e "f(event)") see class Callbacks
# for more details.
_dispatch = self.dispatch
hooks = []
for hook in ('before', 'callbacks', 'after'):
def dispatch(event, hook=hook):
_dispatch(hook, event)
hooks.append(dispatch)
event.callbacks = Callbacks(event, *hooks)
return event
def dispatch(self, hook, data=None):
""" immediately dispatch `hook` for this `Event`.
+ `hook` is the name of the hook to dispatch, for instance
'before', 'after'...etc.
+ `data` is an optional object to forward to the handlers.
It will be `None` by default.
Does nothing if `Event.enabled` is `False` or
`Event.dispatcher` is `None`.
calls the `dispatcher.dispatch` method with the following
arguments:
+ `event`: the `Event` instance
+ `hook`
+ `data`
"""
if self._enabled:
dispatcher = self.dispatcher
if dispatcher is not None:
dispatcher.dispatch(event=self, hook=hook, data=data)
| 34.9
| 76
| 0.592365
|
78d6ac3380bdf99d55c4b80a9643ee0868be5c2a
| 585
|
py
|
Python
|
pypy/objspace/std/test/test_prebuiltint.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-06-02T23:02:09.000Z
|
2021-06-02T23:02:09.000Z
|
pypy/objspace/std/test/test_prebuiltint.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2021-03-30T18:08:41.000Z
|
2021-03-30T18:08:41.000Z
|
pypy/objspace/std/test/test_prebuiltint.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2022-03-30T11:42:37.000Z
|
2022-03-30T11:42:37.000Z
|
import pytest
from pypy.objspace.std.test import test_intobject
@pytest.mark.skipif('config.option.runappdirect')
class AppTestInt(test_intobject.AppTestInt):
spaceconfig = {"objspace.std.withprebuiltint": True}
def setup_class(cls):
space = cls.space
cls.w_start = space.wrap(space.config.objspace.std.prebuiltintfrom)
cls.w_stop = space.wrap(space.config.objspace.std.prebuiltintto)
def test_prebuiltint(self):
def f(x):
assert x is (-(x + 3 - 3) * 5 // (-5))
for i in range(self.start, self.stop):
f(i)
| 32.5
| 75
| 0.659829
|
9abd6621f80576a0cb65edc8c9e72485881894f2
| 2,514
|
py
|
Python
|
techreview2/techapp/migrations/0001_initial.py
|
elb-dev/ITC-172
|
df7acdad309c44cfd3b7580132d28d2d7b9713c4
|
[
"Apache-2.0"
] | null | null | null |
techreview2/techapp/migrations/0001_initial.py
|
elb-dev/ITC-172
|
df7acdad309c44cfd3b7580132d28d2d7b9713c4
|
[
"Apache-2.0"
] | null | null | null |
techreview2/techapp/migrations/0001_initial.py
|
elb-dev/ITC-172
|
df7acdad309c44cfd3b7580132d28d2d7b9713c4
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.4 on 2019-01-16 19:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productname', models.CharField(max_length=255)),
('productentrydate', models.DateField()),
('producturl', models.URLField(blank=True, null=True)),
('productdescription', models.TextField()),
],
options={
'db_table': 'product',
},
),
migrations.CreateModel(
name='ProductType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typename', models.CharField(max_length=255)),
('productdescription', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'db_table': 'producttype',
},
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reviewtitle', models.CharField(max_length=255)),
('reviewdate', models.DateField()),
('reviewrating', models.SmallIntegerField()),
('reviewtext', models.TextField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='techapp.Product')),
('user', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'reviews',
},
),
migrations.AddField(
model_name='product',
name='producttype',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='techapp.ProductType'),
),
migrations.AddField(
model_name='product',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
),
]
| 37.522388
| 114
| 0.56245
|
d4a2af2f22cfde971274970772a03873fdbbaa32
| 3,867
|
py
|
Python
|
speechless/recording.py
|
AdamBraun/speechless
|
f693a339ea4f16f56c8e995619d950a4912e52a0
|
[
"MIT"
] | 92
|
2017-02-05T22:01:13.000Z
|
2021-03-08T10:27:46.000Z
|
speechless/recording.py
|
AdamBraun/speechless
|
f693a339ea4f16f56c8e995619d950a4912e52a0
|
[
"MIT"
] | 9
|
2017-06-14T19:18:41.000Z
|
2020-05-07T11:50:12.000Z
|
speechless/recording.py
|
AdamBraun/speechless
|
f693a339ea4f16f56c8e995619d950a4912e52a0
|
[
"MIT"
] | 28
|
2017-02-15T18:04:42.000Z
|
2020-04-30T14:55:31.000Z
|
import array
from itertools import dropwhile
from pathlib import Path
from sys import byteorder
import librosa
import numpy
from numpy import ndarray, abs, max, flipud, concatenate
from speechless import configuration
from speechless.labeled_example import LabeledExample, LabeledExampleFromFile
from speechless.tools import timestamp, mkdir
class Recorder:
def __init__(self,
silence_threshold_for_unnormalized_audio: float = .03,
chunk_size: int = 1024,
sample_rate: int = 16000,
silence_until_terminate_in_s: int = 3):
self.silence_threshold_for_not_normalized_sound = silence_threshold_for_unnormalized_audio
self.chunk_size = chunk_size
self.sample_rate = sample_rate
self.silence_until_terminate_in_s = silence_until_terminate_in_s
def _is_silent(self, audio: ndarray):
return max(audio) < self.silence_threshold_for_not_normalized_sound
def _normalize(self, audio: ndarray) -> ndarray:
return audio / max(abs(audio))
def _trim_silence(self, audio: ndarray) -> ndarray:
def trim_start(sound: ndarray) -> ndarray:
return numpy.array(list(dropwhile(lambda x: x < self.silence_threshold_for_not_normalized_sound, sound)))
def trim_end(sound: ndarray) -> ndarray:
return flipud(trim_start(flipud(sound)))
return trim_start(trim_end(audio))
def record(self):
"""Records from the microphone and returns the data as an array of signed shorts."""
print("Wait in silence to begin recording; wait in silence to terminate")
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=self.sample_rate, input=True, output=True,
frames_per_buffer=self.chunk_size)
silent_chunk_count = 0
has_recording_started = False
is_first_chunk = False
chunks = []
while True:
chunk_as_array = array.array('f', stream.read(self.chunk_size))
# drop first, as it is often loud noise
if not is_first_chunk:
is_first_chunk = True
continue
if byteorder == 'big':
chunk_as_array.byteswap()
chunk = numpy.array(chunk_as_array)
chunks.append(chunk)
silent = self._is_silent(chunk)
print("Silent: " + str(silent))
if has_recording_started:
if silent:
silent_chunk_count += 1
if silent_chunk_count * self.chunk_size > self.silence_until_terminate_in_s * self.sample_rate:
break
else:
silent_chunk_count = 0
elif not silent:
has_recording_started = True
stream.stop_stream()
stream.close()
print("Stopped recording.")
p.terminate()
return self._normalize(self._trim_silence(concatenate(chunks)))
def record_to_file(self, path: Path) -> LabeledExample:
"Records from the microphone and outputs the resulting data to 'path'. Returns a labeled example for analysis."
librosa.output.write_wav(str(path), self.record(), self.sample_rate)
return LabeledExampleFromFile(path)
def record_plot_and_save(
recorder: Recorder = Recorder(),
recording_directory: Path = configuration.default_data_directories.recording_directory) -> LabeledExample:
from speechless.labeled_example_plotter import LabeledExamplePlotter
mkdir(recording_directory)
name = "recording-{}".format(timestamp())
example = recorder.record_to_file(recording_directory / "{}.wav".format(name))
LabeledExamplePlotter(example).save_spectrogram(recording_directory)
return example
| 34.837838
| 119
| 0.660719
|
97e1d5c4bcca2b48a471ffe6637b91dc65aaeb75
| 921
|
py
|
Python
|
medium/python3/c0094_199_binary-tree-right-side-view/00_leetcode_0094.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
medium/python3/c0094_199_binary-tree-right-side-view/00_leetcode_0094.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
medium/python3/c0094_199_binary-tree-right-side-view/00_leetcode_0094.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | 3
|
2018-02-09T02:46:48.000Z
|
2021-02-20T08:32:03.000Z
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#199. Binary Tree Right Side View
#Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
#For example:
#Given the following binary tree,
# 1 <---
# / \
#2 3 <---
# \ \
# 5 4 <---
# You should return [1, 3, 4].
#Credits:
#Special thanks to @amrsaqr for adding this problem and creating all test cases.
## Definition for a binary tree node.
## class TreeNode:
## def __init__(self, x):
## self.val = x
## self.left = None
## self.right = None
#class Solution:
# def rightSideView(self, root):
# """
# :type root: TreeNode
# :rtype: List[int]
# """
# Time Is Money
| 29.709677
| 143
| 0.624321
|
32e57abf3ef5bd64788184ba3b501c1c7e11183f
| 1,081
|
py
|
Python
|
configs/nowd/nl_gc/trainpp/res101_d_nl_gc_nowd_lnnostd_ws5e-1_trainval1.py
|
yinmh17/CCNet
|
d5e90fe5ccfa16389fd25bdd3e2160ffe2dfbd22
|
[
"MIT"
] | 1
|
2019-07-24T05:27:29.000Z
|
2019-07-24T05:27:29.000Z
|
configs/nowd/nl_gc/trainpp/res101_d_nl_gc_nowd_lnnostd_ws5e-1_trainval1.py
|
yinmh17/CCNet
|
d5e90fe5ccfa16389fd25bdd3e2160ffe2dfbd22
|
[
"MIT"
] | 1
|
2019-07-21T19:44:01.000Z
|
2019-07-21T19:44:01.000Z
|
configs/nowd/nl_gc/trainpp/res101_d_nl_gc_nowd_lnnostd_ws5e-1_trainval1.py
|
yinmh17/CCNet
|
d5e90fe5ccfa16389fd25bdd3e2160ffe2dfbd22
|
[
"MIT"
] | 1
|
2019-07-21T06:28:24.000Z
|
2019-07-21T06:28:24.000Z
|
model = dict(
type='basenet',
pretrained='',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
block_num=[3, 4, 23, 3],
),
att=dict(
with_att=False,
type='glore',
att_stage=[False,False,True,False],
att_pos='after_add',
att_location=[[],[],[5,11,17],[]],
),
module=dict(
type='nl_nowd',
downsample=True,
whiten_type=['ln_nostd'],
weight_init_scale=0.5,
with_gc=True,
use_out=False,
out_bn=False,
)
)
train_cfg = dict(
batch_size=8,
learning_rate=1e-2,
momentum=0.9,
num_steps=100000,
power=0.9,
random_seed=1234,
restore_from='./dataset/resnet101-imagenet.pth',
save_num_images=2,
start_iters=0,
save_from=99500,
save_pred_every=100,
snapshot_dir='snapshots/trainval1/',
weight_decay=0.0005
)
data_cfg = dict(
data_dir='cityscapes',
data_list='./dataset/list/cityscapes/trainval.lst',
ignore_label=255,
input_size='769,769',
num_classes=19,
)
| 21.62
| 55
| 0.582794
|
07c80da4850ddd5161ec25d560fa2ddf3e9d2a57
| 15,105
|
py
|
Python
|
configs/custom_my.py
|
zvvzuzin/stone_detection
|
2287e4d7dfc356c230e0465b3278befbbe77f8eb
|
[
"MIT"
] | null | null | null |
configs/custom_my.py
|
zvvzuzin/stone_detection
|
2287e4d7dfc356c230e0465b3278befbbe77f8eb
|
[
"MIT"
] | null | null | null |
configs/custom_my.py
|
zvvzuzin/stone_detection
|
2287e4d7dfc356c230e0465b3278befbbe77f8eb
|
[
"MIT"
] | null | null | null |
_base_ = '/home/vasily/proj/mmdetection/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py'
classes = ["stone"]
CLASSES = classes
# learning policy
num_classes = 1
# '../_base_/models/mask_rcnn_r50_fpn.py'
# model settings
model = dict(
type='MaskRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
in_channels=1,),
roi_head=dict(
bbox_head=dict(num_classes=1),
mask_head=dict(num_classes=1)))
# model = dict(
# type='MaskRCNN',
# pretrained='torchvision://resnet50',
# backbone=dict(
# type='ResNet',
# depth=50,
# num_stages=4,
# out_indices=(0, 1, 2, 3),
# frozen_stages=1,
# norm_cfg=dict(type='BN', requires_grad=True),
# norm_eval=True,
# style='pytorch'),
# neck=dict(...),
# rpn_head=dict(...),
# roi_head=dict(...))
# model = dict(
# type='MaskRCNN',
# pretrained='torchvision://resnet50',
# backbone=dict(
# type='ResNet',
# in_channels=1,
# depth=50,
# num_stages=4,
# out_indices=(0, 1, 2, 3),
# frozen_stages=1,
# norm_cfg=dict(type='BN', requires_grad=True),
# norm_eval=False,
# style='pytorch'),
# neck=dict(
# type='FPN',
# in_channels=[256, 512, 1024, 2048],
# out_channels=256,
# num_outs=5),
# rpn_head=dict(
# type='RPNHead',
# in_channels=256,
# feat_channels=256,
# anchor_generator=dict(
# type='AnchorGenerator',
# scales=[8],
# ratios=[0.5, 1.0, 2.0],
# strides=[4, 8, 16, 32, 64]),
# bbox_coder=dict(
# type='DeltaXYWHBBoxCoder',
# target_means=[.0, .0, .0, .0],
# target_stds=[1.0, 1.0, 1.0, 1.0]),
# loss_cls=dict(
# type='CrossEntropyLoss', use_sigmoid=True, loss_weight=3.0),
# loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# roi_head=dict(
# type='StandardRoIHead',
# bbox_roi_extractor=dict(
# type='SingleRoIExtractor',
# roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
# out_channels=256,
# featmap_strides=[4, 8, 16, 32]),
# bbox_head=dict(
# type='Shared2FCBBoxHead',
# in_channels=256,
# fc_out_channels=1024,
# roi_feat_size=7,
# num_classes=num_classes,
# bbox_coder=dict(
# type='DeltaXYWHBBoxCoder',
# target_means=[0., 0., 0., 0.],
# target_stds=[0.1, 0.1, 0.2, 0.2]),
# reg_class_agnostic=False,
# loss_cls=dict(
# type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
# loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# mask_roi_extractor=dict(
# type='SingleRoIExtractor',
# roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
# out_channels=256,
# featmap_strides=[4, 8, 16, 32]),
# mask_head=dict(
# type='FCNMaskHead',
# num_convs=4,
# in_channels=256,
# conv_out_channels=256,
# num_classes=num_classes,
# loss_mask=dict(
# type='CrossEntropyLoss', use_mask=True, loss_weight=1))))
# model training and testing settings
train_cfg = dict( # Config of training hyperparameters for rpn and rcnn
rpn=dict( # Training config of rpn
assigner=dict( # Config of assigner
type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for many common detectors. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details.
pos_iou_thr=0.7, # IoU >= threshold 0.7 will be taken as positive samples
neg_iou_thr=0.3, # IoU < threshold 0.3 will be taken as negative samples
min_pos_iou=0.3, # The minimal IoU threshold to take boxes as positive samples
match_low_quality=True, # Whether to match the boxes under low quality (see API doc for more details).
ignore_iof_thr=-1), # IoF threshold for ignoring bboxes
sampler=dict( # Config of positive/negative sampler
type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details.
num=256, # Number of samples
pos_fraction=0.5, # The ratio of positive samples in the total samples.
neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples.
add_gt_as_proposals=False), # Whether add GT as proposals after sampling.
allowed_border=-1, # The border allowed after padding for valid anchors.
pos_weight=-1, # The weight of positive samples during training.
debug=False), # Whether to set the debug mode
rpn_proposal=dict( # The config to generate proposals during training
nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels.
nms_pre=2000, # The number of boxes before NMS
nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`.
max_per_img=1000, # The number of boxes to be kept after NMS.
nms=dict( # Config of NMS
type='nms', # Type of NMS
iou_threshold=0.7 # NMS threshold
),
min_bbox_size=0), # The allowed minimal box size
rcnn=dict( # The config for the roi heads.
assigner=dict( # Config of assigner for second stage, this is different for that in rpn
type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for all roi_heads for now. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details.
pos_iou_thr=0.5, # IoU >= threshold 0.5 will be taken as positive samples
neg_iou_thr=0.5, # IoU < threshold 0.5 will be taken as negative samples
min_pos_iou=0.5, # The minimal IoU threshold to take boxes as positive samples
match_low_quality=False, # Whether to match the boxes under low quality (see API doc for more details).
ignore_iof_thr=-1), # IoF threshold for ignoring bboxes
sampler=dict(
type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details.
num=512, # Number of samples
pos_fraction=0.25, # The ratio of positive samples in the total samples.
neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples.
add_gt_as_proposals=True
), # Whether add GT as proposals after sampling.
mask_size=28, # Size of mask
pos_weight=-1, # The weight of positive samples during training.
debug=False)) # Whether to set the debug mode
test_cfg = dict( # Config for testing hyperparameters for rpn and rcnn
rpn=dict( # The config to generate proposals during testing
nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels.
nms_pre=1000, # The number of boxes before NMS
nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`.
max_per_img=1000, # The number of boxes to be kept after NMS.
nms=dict( # Config of NMS
type='nms', #Type of NMS
iou_threshold=0.7 # NMS threshold
),
min_bbox_size=0), # The allowed minimal box size
rcnn=dict( # The config for the roi heads.
score_thr=0.05, # Threshold to filter out boxes
nms=dict( # Config of NMS in the second stage
type='nms', # Type of NMS
iou_threshold=0.3), # NMS threshold
max_per_img=100, # Max number of detections of each image
mask_thr_binary=0.5)) # Threshold of mask prediction
dataset_type = 'CocoDataset'
# data_root_pits_300920 = '/home/vasily/datasets/asbestos/pits/300920'
# data_root_pits_161120 = '/home/vasily/datasets/asbestos/pits/161120'
# data_root_pits_161220 = '/home/vasily/datasets/asbestos/pits/161220'
data_root_transporter = '/home/vasily/datasets/asbest_old/tr_stones/'
# dataset_type = 'StonesDataset'
# data_root_common = '/home/vasily/datasets/asbest/pits/'
# data_root_small_pits = '/home/vasily/datasets/asbest/camera_pits/'
# data_root_shelves = '/home/vasily/datasets/asbest/stones_on_shelves/'
# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_norm_cfg = dict(mean=[123], std=[58], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile', color_type='grayscale', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1600, 1200), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='RandomCrop', crop_size=(1333, 800)),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile', color_type='grayscale', to_float32=True),
dict(type='MultiScaleFlipAug',
img_scale=(1600, 1200),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type='Collect', keys=['img']),
])
]
dataset_transporter = dict(
type='RepeatDataset',
times=1,
dataset=dict(
type=dataset_type,
ann_file=data_root_transporter + 'annotation/annotation.json',
img_prefix=data_root_transporter + 'images/',
pipeline=train_pipeline,
classes=classes))
dataset_pits_300920 = dict(
type='RepeatDataset',
times=1,
dataset=dict(
type=dataset_type,
ann_file='/home/vasily/datasets/asbestos/pits/300920/annotation_300920.json',
img_prefix='/home/vasily/datasets',
pipeline=train_pipeline,
classes=classes))
dataset_pits_161120 = dict(
type='RepeatDataset',
times=1,
dataset=dict(
type=dataset_type,
ann_file='/home/vasily/datasets/asbestos/pits/161120/annotation_161120.json',
img_prefix='/home/vasily/datasets',
pipeline=train_pipeline,
classes=classes))
dataset_pits_161220 = dict(
type='RepeatDataset',
times=1,
dataset=dict(
type=dataset_type,
ann_file='/home/vasily/datasets/asbestos/pits/161220/annotation_161220.json',
img_prefix='/home/vasily/datasets',
pipeline=train_pipeline,
classes=classes))
dataset_pits_020221 = dict(
type='RepeatDataset',
times=1,
dataset=dict(
type=dataset_type,
ann_file='/home/vasily/datasets/asbestos/pits/020221/annotation_020221.json',
img_prefix='/home/vasily/datasets',
pipeline=train_pipeline,
classes=classes))
dataset_pits_111121 = dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file='/home/vasily/datasets/asbestos/pits/111121/annotation_111121.json',
img_prefix='/home/vasily/datasets/asbestos/pits/111121',
pipeline=train_pipeline,
classes=classes))
data = dict(
samples_per_gpu=4,
workers_per_gpu=1,
# train = [dataset_transporter],
train=[dataset_pits_300920, dataset_pits_161120, dataset_pits_020221, dataset_pits_111121],
# train=[dataset_pits_111121],
val=dict(
type=dataset_type,
ann_file='/home/vasily/datasets/asbestos/pits/111121/annotation_111121.json',
img_prefix='/home/vasily/datasets/asbestos/pits/111121',
pipeline=test_pipeline,
classes=classes),
test=dict(
type=dataset_type,
ann_file='/home/vasily/datasets/asbestos/pits/111121/annotation_111121.json',
img_prefix='/home/vasily/datasets/asbestos/pits/111121',
pipeline=test_pipeline,
classes=classes))
evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details.
interval=1, # Evaluation interval
metric=['bbox', 'segm']) # Metrics used during evaluation
# optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch
# type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/optimizer/default_constructor.py#L13 for more details
# lr=0.02, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch
# momentum=0.9, # Momentum
# weight_decay=0.0001) # Weight decay of SGD
# optimizer_config = dict( # Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details.
# grad_clip=None) # Most of the methods do not use gradient clip
# lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook
# policy='step', # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9.
# warmup='linear', # The warmup policy, also support `exp` and `constant`.
# warmup_iters=500, # The number of iterations for warmup
# warmup_ratio=
# 0.001, # The ratio of the starting learning rate used for warmup
# step=[8, 11]) # Steps to decay the learning rate
runner = dict(
type='EpochBasedRunner', # Type of runner to use (i.e. IterBasedRunner or EpochBasedRunner)
max_epochs=50) # Runner that runs the workflow in total max_epochs. For IterBasedRunner use `max_iters`
# evaluation = dict(metric=['bbox', 'segm'])
# '../_base_/default_runtime.py'
checkpoint_config = dict(interval=10)
# evaluation = dict(interval=5)
# yapf:disable
log_config = dict(
interval=10, # 50
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
gpu_ids = range(1)
work_dir = './checkpoints'
seed = 42
| 45.36036
| 243
| 0.652234
|
c888e5b5fecb643206576405e71aca6f014368a3
| 6,900
|
py
|
Python
|
train_scripts/train_patch_edsr.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 18
|
2019-10-21T23:54:28.000Z
|
2021-12-23T08:16:04.000Z
|
train_scripts/train_patch_edsr.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 1
|
2020-07-11T08:05:33.000Z
|
2020-07-11T08:05:33.000Z
|
train_scripts/train_patch_edsr.py
|
veritas9872/fastMRI-kspace
|
4c484b3183e9f06838b5ee108af283611c2e1e77
|
[
"MIT"
] | 5
|
2019-11-23T14:11:54.000Z
|
2022-02-19T13:39:15.000Z
|
import torch
from torch import nn, optim
from pathlib import Path
from utils.run_utils import initialize, save_dict_as_json, get_logger, create_arg_parser
from utils.data_loaders import create_prefetch_data_loaders
from train.subsample import RandomMaskFunc, UniformMaskFunc
from data.edsr_input import PreProcessEDSR
from data.edsr_output import PostProcessEDSR
from train.new_model_trainers.img_to_rss import ModelTrainerRSS
from metrics.new_1d_ssim import SSIMLoss
from models.edsr_model import EDSRModel
def train_img_to_rss(args):
# Creating checkpoint and logging directories, as well as the run name.
ckpt_path = Path(args.ckpt_root)
ckpt_path.mkdir(exist_ok=True)
ckpt_path = ckpt_path / args.train_method
ckpt_path.mkdir(exist_ok=True)
run_number, run_name = initialize(ckpt_path)
ckpt_path = ckpt_path / run_name
ckpt_path.mkdir(exist_ok=True)
log_path = Path(args.log_root)
log_path.mkdir(exist_ok=True)
log_path = log_path / args.train_method
log_path.mkdir(exist_ok=True)
log_path = log_path / run_name
log_path.mkdir(exist_ok=True)
logger = get_logger(name=__name__)
# Assignment inside running code appears to work.
if (args.gpu is not None) and torch.cuda.is_available():
device = torch.device(f'cuda:{args.gpu}')
logger.info(f'Using GPU {args.gpu} for {run_name}')
else:
device = torch.device('cpu')
logger.info(f'Using CPU for {run_name}')
# Saving peripheral variables and objects in args to reduce clutter and make the structure flexible.
args.run_number = run_number
args.run_name = run_name
args.ckpt_path = ckpt_path
args.log_path = log_path
args.device = device
save_dict_as_json(vars(args), log_dir=log_path, save_name=run_name)
arguments = vars(args) # Placed here for backward compatibility and convenience.
args.center_fractions_train = arguments.get('center_fractions_train', arguments.get('center_fractions'))
args.center_fractions_val = arguments.get('center_fractions_val', arguments.get('center_fractions'))
args.accelerations_train = arguments.get('accelerations_train', arguments.get('accelerations'))
args.accelerations_val = arguments.get('accelerations_val', arguments.get('accelerations'))
if args.random_sampling:
train_mask_func = RandomMaskFunc(args.center_fractions_train, args.accelerations_train)
val_mask_func = RandomMaskFunc(args.center_fractions_val, args.accelerations_val)
else:
train_mask_func = UniformMaskFunc(args.center_fractions_train, args.accelerations_train)
val_mask_func = UniformMaskFunc(args.center_fractions_val, args.accelerations_val)
input_train_transform = PreProcessEDSR(mask_func=train_mask_func, challenge=args.challenge, device=device,
augment_data=args.augment_data, use_seed=False,
use_patch=True, patch_size=args.patch_size)
input_val_transform = PreProcessEDSR(mask_func=val_mask_func, challenge=args.challenge, device=device,
augment_data=False, use_seed=True,
use_patch=False, patch_size=args.patch_size)
output_train_transform = PostProcessEDSR(challenge=args.challenge, residual_rss=args.residual_rss)
output_val_transform = PostProcessEDSR(challenge=args.challenge, residual_rss=args.residual_rss)
# DataLoaders
train_loader, val_loader = create_prefetch_data_loaders(args)
losses = dict(
rss_loss=SSIMLoss(filter_size=7).to(device=device)
# rss_loss=LogSSIMLoss(filter_size=7).to(device=device)
# rss_loss=nn.L1Loss()
# rss_loss=L1SSIMLoss(filter_size=7, l1_ratio=args.l1_ratio).to(device=device)
)
model = EDSRModel(in_chans=15, out_chans=1, chans=args.chans, num_depth_blocks=args.num_depth_blocks,
res_scale=args.res_scale, reduction=args.reduction, use_residual=False).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.init_lr)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=args.lr_red_rate, verbose=True)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_red_epochs, gamma=args.lr_red_rate)
scheduler = None
trainer = ModelTrainerRSS(args, model, optimizer, train_loader, val_loader, input_train_transform,
input_val_transform, output_train_transform, output_val_transform, losses, scheduler)
try:
trainer.train_model_(train_ratio=10) # Hack!!
except KeyboardInterrupt:
trainer.writer.close()
logger.warning('Closing summary writer due to KeyboardInterrupt.')
if __name__ == '__main__':
project_name = 'fastMRI-kspace'
assert Path.cwd().name == project_name, f'Current working directory set at {Path.cwd()}, not {project_name}!'
settings = dict(
# Variables that almost never change.
challenge='multicoil',
data_root='/media/veritas/D/FastMRI',
log_root='./logs',
ckpt_root='./checkpoints',
batch_size=1, # This MUST be 1 for now.
save_best_only=False,
# smoothing_factor=8,
# Variables that occasionally change.
center_fractions_train=[0.08],
accelerations_train=[4],
# When using single acceleration for train and two accelerations for validation,
# please remember that the validation loss is calculated for both accelerations,
# including the one that the model was not trained for.
# This may result in the checkpoint not being saved,
# even though performance on one acceleration improves significantly.
center_fractions_val=[0.08, 0.04],
accelerations_val=[4, 8],
random_sampling=True,
verbose=False,
use_gt=True,
# Model specific parameters.
train_method='Patch',
chans=64,
residual_rss=False,
num_depth_blocks=80,
res_scale=1,
augment_data=False,
patch_size=96,
reduction=8, # SE module reduction rate.
# TensorBoard related parameters.
max_images=8, # Maximum number of images to save.
shrink_scale=1, # Scale to shrink output image size.
# Learning rate scheduling.
# lr_red_epochs=[70, 90],
# lr_red_rate=0.2,
# Variables that change frequently.
use_slice_metrics=True,
num_epochs=10,
gpu=0, # Set to None for CPU mode.
num_workers=3,
init_lr=1E-4,
max_to_keep=2,
# prev_model_ckpt='',
sample_rate_train=1,
start_slice_train=0,
sample_rate_val=1,
start_slice_val=0,
)
options = create_arg_parser(**settings).parse_args()
train_img_to_rss(options)
| 40.116279
| 115
| 0.700145
|
7c11a4f853d4524947f9db437f88e4d41876079a
| 1,252
|
py
|
Python
|
catch/datasets/el_moro_canyon_orthohantavirus.py
|
broadinstitute/catch
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
[
"MIT"
] | 58
|
2018-01-24T16:31:37.000Z
|
2022-02-25T07:46:35.000Z
|
catch/datasets/el_moro_canyon_orthohantavirus.py
|
broadinstitute/catch
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
[
"MIT"
] | 29
|
2018-04-17T17:36:06.000Z
|
2022-02-25T11:48:58.000Z
|
catch/datasets/el_moro_canyon_orthohantavirus.py
|
broadinstitute/catch
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
[
"MIT"
] | 16
|
2018-05-23T12:19:41.000Z
|
2021-08-09T04:16:00.000Z
|
"""Dataset with 'El Moro Canyon orthohantavirus' sequences.
A dataset with 2 'El Moro Canyon orthohantavirus' sequences. The virus
is segmented and has 2 segments. Based on their strain and/or isolate,
these sequences were able to be grouped into 1 genomes. Many genomes
may have fewer than 2 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetMultiChrom
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (M|S)\]')
m = c.search(header)
if not m:
raise Exception("Unknown or invalid segment in header %s" % header)
seg = m.group(1)
return "segment_" + seg
def seq_header_to_genome(header):
import re
c = re.compile(r'\[genome (.+)\]')
m = c.search(header)
if not m:
raise Exception("Unknown genome in header %s" % header)
return m.group(1)
chrs = ["segment_" + seg for seg in ['M', 'S']]
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr,
seq_header_to_genome=seq_header_to_genome)
ds.add_fasta_path("data/el_moro_canyon_orthohantavirus.fasta.gz", relative=True)
sys.modules[__name__] = ds
| 31.3
| 80
| 0.684505
|
eb594870e7acfb7ecaaa3aae9ea5d7ef6aafcd0b
| 33,028
|
py
|
Python
|
mpf/core/bcp/bcp_interface.py
|
enteryourinitials/mpf
|
8fa529aacc1b163c71557adb61b591077d66c77e
|
[
"MIT"
] | null | null | null |
mpf/core/bcp/bcp_interface.py
|
enteryourinitials/mpf
|
8fa529aacc1b163c71557adb61b591077d66c77e
|
[
"MIT"
] | null | null | null |
mpf/core/bcp/bcp_interface.py
|
enteryourinitials/mpf
|
8fa529aacc1b163c71557adb61b591077d66c77e
|
[
"MIT"
] | null | null | null |
"""RPC Interface for BCP clients."""
from copy import deepcopy
from mpf.core.rgb_color import ColorException
from mpf.core.events import PostedEvent
from mpf.core.player import Player
from mpf.core.utility_functions import Util
from mpf.core.mpf_controller import MpfController
from mpf.core.switch_controller import MonitoredSwitchChange
from mpf.exceptions.driver_limits_error import DriverLimitsError
class BcpInterface(MpfController):
"""Implements the BCP interface which can be used by all clients.
Args:
machine: A reference to the main MPF machine object.
The following BCP commands are currently implemented:
error
get
hello?version=xxx&controller_name=xxx&controller_version=xxx
mode_start?name=xxx&priority=xxx
mode_stop?name=xxx
player_added?player_num=x
player_variable?name=x&value=x&prev_value=x&change=x&player_num=x
set
shot?name=x
switch?name=x&state=x
timer
trigger?name=xxx
"""
config_name = "bcp_interface"
__slots__ = ["configured", "config", "_client_reset_queue", "_client_reset_complete_status", "bcp_receive_commands",
"_shows"]
def __init__(self, machine):
"""Initialise BCP."""
super().__init__(machine)
if 'bcp' not in machine.config or not machine.config['bcp']:
self.configured = False
return
self.configured = True
self.config = machine.config['bcp']
self._client_reset_queue = None
self._client_reset_complete_status = {}
self.bcp_receive_commands = dict(
reset_complete=self._bcp_receive_reset_complete,
error=self._bcp_receive_error,
switch=self._bcp_receive_switch,
trigger=self._bcp_receive_trigger,
register_trigger=self._bcp_receive_register_trigger,
evaluate_placeholder=self._evaluate_placeholder,
remove_trigger=self._bcp_receive_deregister_trigger,
monitor_start=self._bcp_receive_monitor_start,
monitor_stop=self._bcp_receive_monitor_stop,
set_machine_var=self._bcp_receive_set_machine_var,
service=self._service,
)
self._shows = {}
self.machine.events.add_handler('machine_reset_phase_1', self.bcp_reset)
def __repr__(self):
"""Return string representation."""
return '<BCP Interface>'
def register_command_callback(self, cmd, callback):
"""Register a BCP command."""
if not self.configured:
return
self.bcp_receive_commands[cmd] = callback
def add_registered_trigger_event_for_client(self, client, event):
"""Add trigger for event."""
# register handler if first transport
if not self.machine.bcp.transport.get_transports_for_handler(event):
self.machine.events.add_handler(event=event,
handler=self.bcp_trigger,
name=event)
# register transport
self.machine.bcp.transport.add_handler_to_transport(event, client)
def remove_registered_trigger_event_for_client(self, client, event):
"""Remove trigger for event."""
# unregister transport
self.machine.bcp.transport.remove_transport_from_handle(event, client)
# if not transports remain, remove handler
if not self.machine.bcp.transport.get_transports_for_handler(event):
self.machine.events.remove_handler_by_event(event=event, handler=self.bcp_trigger)
async def _bcp_receive_set_machine_var(self, client, name, value):
"""Set machine var via bcp."""
del client
self.machine.variables.set_machine_var(name, value)
# document variables injected by MC
'''machine_var: mc_version
desc: Version of MC. This is set after MC got connected.
'''
'''machine_var: mc_extended_version
desc: Extended version of MC. This is set after MC got connected. Contains BCP and show version numbers.
'''
async def _service_stop(self, client):
for show in self._shows.values():
show.stop()
for light in self.machine.lights.values():
light.remove_from_stack_by_key("service")
self._shows = {}
await self.machine.service.stop_service()
self.machine.bcp.transport.send_to_client(client, "service_stop")
async def _service(self, client, subcommand, **kwargs):
"""Run service command."""
if subcommand == "start":
self.machine.service.start_service()
elif subcommand == "stop":
await self._service_stop(client)
elif subcommand == "list_switches":
self.machine.bcp.transport.send_to_client(client, "list_switches",
switches=[(s[0], str(s[1].hw_switch.number), s[1].name,
s[1].state)
for s in self.machine.service.get_switch_map()])
elif subcommand == "list_coils":
self.machine.bcp.transport.send_to_client(client, "list_coils",
coils=[(s[0], str(s[1].hw_driver.number), s[1].name) for s in
self.machine.service.get_coil_map()])
elif subcommand == "list_lights":
self.machine.bcp.transport.send_to_client(client, "list_lights",
lights=[(s[0], s[1].get_hw_numbers(), s[1].name, s[1].get_color())
for s in self.machine.service.get_light_map()])
elif subcommand == "list_shows":
self.machine.bcp.transport.send_to_client(client, "list_shows",
shows=[(s.name, sorted(s.tokens))
for s in sorted(self.machine.shows.values(),
key=lambda x: x.name)])
elif subcommand == "monitor_switches":
pass
elif subcommand == "coil_pulse":
self._coil_pulse(client, kwargs.get("coil"), kwargs.get("pulse_ms"), kwargs.get("pulse_power"))
elif subcommand == "coil_enable":
self._coil_enable(client, kwargs.get("coil"), kwargs.get("pulse_ms"), kwargs.get("pulse_power"),
kwargs.get("hold_power"))
elif subcommand == "coil_disable":
self._coil_disable(client, kwargs.get("coil"))
elif subcommand == "show_play":
self._show_play(client, kwargs.get("show"), kwargs.get("token"))
elif subcommand == "show_stop":
self._show_stop(client, kwargs.get("show"))
elif subcommand == "light_color":
self._light_color(client, kwargs.get("light"), kwargs.get("color"))
def _show_play(self, client, show_name, token):
try:
show = self.machine.shows[show_name]
except KeyError:
self.machine.bcp.transport.send_to_client(client, "show_play", error="Show not found")
return
if show_name in self._shows:
self._shows[show_name].stop()
try:
self._shows[show_name] = show.play(show_tokens=token, priority=100000)
except (ValueError, AssertionError) as e:
self.machine.bcp.transport.send_to_client(client, "show_play", error="Show error: {}".format(e))
return
self.machine.bcp.transport.send_to_client(client, "show_play", error=False)
def _show_stop(self, client, show_name):
if show_name in self._shows:
self._shows[show_name].stop()
del self._shows[show_name]
self.machine.bcp.transport.send_to_client(client, "show_stop", error=False)
else:
self.machine.bcp.transport.send_to_client(client, "show_stop", error="Show not playing")
def _coil_pulse(self, client, coil_name, pulse_ms, pulse_power):
try:
coil = self.machine.coils[coil_name]
except KeyError:
self.machine.bcp.transport.send_to_client(client, "coil_pulse", error="Coil not found")
return
if pulse_ms:
pulse_ms = int(pulse_ms)
if pulse_power:
pulse_power = float(pulse_power)
coil.pulse(pulse_ms=pulse_ms, pulse_power=pulse_power)
self.machine.bcp.transport.send_to_client(client, "coil_pulse", error=False)
def _coil_disable(self, client, coil_name):
try:
coil = self.machine.coils[coil_name]
except KeyError:
self.machine.bcp.transport.send_to_client(client, "coil_disable", error="Coil not found")
return
coil.disable()
self.machine.bcp.transport.send_to_client(client, "coil_disable", error=False)
# pylint: disable-msg=too-many-arguments
def _coil_enable(self, client, coil_name, pulse_ms, pulse_power, hold_power):
try:
coil = self.machine.coils[coil_name]
except KeyError:
self.machine.bcp.transport.send_to_client(client, "coil_enable", error="Coil not found")
return
if pulse_ms:
pulse_ms = int(pulse_ms)
if pulse_power:
pulse_power = float(pulse_power)
if hold_power:
hold_power = float(hold_power)
try:
coil.enable(pulse_ms=pulse_ms, pulse_power=pulse_power, hold_power=hold_power)
except DriverLimitsError as e:
self.machine.bcp.transport.send_to_client(client, "coil_enable", error=str(e))
return
self.machine.bcp.transport.send_to_client(client, "coil_enable", error=False)
def _light_color(self, client, light_name, color_name):
try:
light = self.machine.lights[light_name]
except KeyError:
self.machine.bcp.transport.send_to_client(client, "light_color", error="Light not found")
return
try:
light.color(color_name, key="service")
except (DriverLimitsError, ColorException) as e:
self.machine.bcp.transport.send_to_client(client, "light_color", error=str(e))
return
self.machine.bcp.transport.send_to_client(client, "light_color", error=False)
async def _bcp_receive_monitor_start(self, client, category):
"""Start monitoring the specified category."""
category = str.lower(category)
if category == "events":
self._monitor_events(client)
elif category == "devices":
self._monitor_devices(client)
elif category == "drivers":
self._monitor_drivers(client)
elif category == "switches":
self._monitor_switches(client)
elif category == "machine_vars":
self._monitor_machine_vars(client)
elif category == "player_vars":
self._monitor_player_vars(client)
elif category == "modes":
self._monitor_modes(client)
elif category == "core_events":
self._monitor_core_events(client)
elif category == "status_request":
self._monitor_status_request(client)
else:
self.machine.bcp.transport.send_to_client(client,
"error",
cmd="monitor_start?category={}".format(category),
error="Invalid category value")
async def _bcp_receive_monitor_stop(self, client, category):
"""Stop monitoring the specified category."""
category = str.lower(category)
if category == "events":
self._monitor_events_stop(client)
elif category == "devices":
self._monitor_devices_stop(client)
elif category == "drivers":
self._monitor_drivers_stop(client)
elif category == "switches":
self._monitor_switches_stop(client)
elif category == "machine_vars":
self._monitor_machine_vars_stop(client)
elif category == "player_vars":
self._monitor_player_vars_stop(client)
elif category == "modes":
self._monitor_modes_stop(client)
elif category == "core_events":
self._monitor_core_events_stop(client)
elif category == "status_request":
self._monitor_status_request_stop(client)
else:
self.machine.bcp.transport.send_to_client(client,
"error",
cmd="monitor_stop?category={}".format(category),
error="Invalid category value")
def _monitor_drivers(self, client):
"""Monitor all drivers."""
self.machine.bcp.transport.add_handler_to_transport("_monitor_drivers", client)
def _monitor_drivers_stop(self, client):
"""Monitor all drivers."""
self.machine.bcp.transport.remove_transport_from_handle("_monitor_drivers", client)
def _monitor_events(self, client):
"""Monitor all events."""
self.machine.bcp.transport.add_handler_to_transport("_monitor_events", client)
self.machine.events.monitor_events = True
def _monitor_events_stop(self, client):
"""Stop monitoring all events for the specified client."""
self.machine.bcp.transport.remove_transport_from_handle("_monitor_events", client)
if not self.machine.bcp.transport.get_transports_for_handler("_monitor_events"):
self.machine.events.monitor_events = False
def monitor_posted_event(self, posted_event: PostedEvent):
"""Send monitored posted event to bcp clients."""
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_monitor_events",
bcp_command="monitored_event",
event_name=posted_event.event,
event_type=posted_event.type,
event_callback=posted_event.callback,
event_kwargs=Util.convert_to_simply_type(posted_event.kwargs),
registered_handlers=Util.convert_to_simply_type(
self.machine.events.registered_handlers.get(posted_event.event, []))
)
def _monitor_devices(self, client):
"""Register client to get notified of device changes."""
self.machine.bcp.transport.add_handler_to_transport("_devices", client)
# trigger updates of lights
self.machine.light_controller.monitor_lights()
# initially send all states
for collection in self.machine.device_manager.get_monitorable_devices().values():
for device in collection.values():
self.machine.bcp.transport.send_to_client(
client=client,
bcp_command='device',
type=device.class_label,
name=device.name,
changes=False,
state=device.get_monitorable_state())
def _monitor_devices_stop(self, client):
"""Remove client to no longer get notified of device changes."""
self.machine.bcp.transport.remove_transport_from_handle("_devices", client)
def notify_device_changes(self, device, attribute_name, old_value, new_value):
"""Notify all listeners about device change."""
if not self.configured:
return
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_devices",
bcp_command='device',
type=device.class_label,
name=device.name,
changes=(attribute_name, Util.convert_to_simply_type(old_value), Util.convert_to_simply_type(new_value)),
state=device.get_monitorable_state())
def _monitor_switches(self, client):
"""Register client to get notified of switch changes."""
self.machine.switch_controller.add_monitor(self._notify_switch_changes)
self.machine.bcp.transport.add_handler_to_transport("_switches", client)
def _monitor_switches_stop(self, client):
"""Remove client to no longer get notified of switch changes."""
self.machine.bcp.transport.add_handler_to_transport("_switches", client)
# If there are no more clients monitoring switches, remove monitor
if not self.machine.bcp.transport.get_transports_for_handler("_switches"):
self.machine.switch_controller.remove_monitor(self._notify_switch_changes)
def _notify_switch_changes(self, change: MonitoredSwitchChange):
"""Notify all listeners about switch change."""
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_switches",
bcp_command='switch',
name=change.name,
state=change.state)
def _monitor_player_vars(self, client):
# Setup player variables to be monitored (if necessary)
if not self.machine.bcp.transport.get_transports_for_handler("_player_vars"):
Player.monitor_enabled = True
self.machine.register_monitor('player', self._player_var_change)
self.machine.bcp.transport.add_handler_to_transport("_player_vars", client)
def _monitor_player_vars_stop(self, client):
self.machine.bcp.transport.remove_transport_from_handle("_player_vars", client)
# If there are no more clients monitoring player variables, stop monitoring
if not self.machine.bcp.transport.get_transports_for_handler("_player_vars"):
Player.monitor_enabled = False
def _monitor_machine_vars(self, client):
# Setup machine variables to be monitored (if necessary)
if not self.machine.bcp.transport.get_transports_for_handler("_machine_vars"):
self.machine.variables.machine_var_monitor = True
self.machine.register_monitor('machine_vars', self._machine_var_change)
# Send initial machine variable values
self._send_machine_vars(client)
# Establish handler for machine variable changes
self.machine.bcp.transport.add_handler_to_transport("_machine_vars", client)
def _monitor_machine_vars_stop(self, client):
self.machine.bcp.transport.remove_transport_from_handle("_machine_vars", client)
# If there are no more clients monitoring machine variables, stop monitoring
if not self.machine.bcp.transport.get_transports_for_handler("_machine_vars"):
self.machine.machine_var_monitor = False
def _send_machine_vars(self, client):
self.machine.bcp.transport.send_to_client(
client, bcp_command='settings', settings=Util.convert_to_simply_type(self.machine.settings.get_settings()))
for var_name, settings in self.machine.variables.machine_vars.items():
self.machine.bcp.transport.send_to_client(client, bcp_command='machine_variable',
name=var_name,
value=settings['value'])
# pylint: disable-msg=too-many-arguments
def _player_var_change(self, name, value, prev_value, change, player_num):
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_player_vars",
bcp_command='player_variable',
name=name,
value=value,
prev_value=prev_value,
change=change,
player_num=player_num)
def _machine_var_change(self, name, value, prev_value, change):
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_machine_vars",
bcp_command='machine_variable',
name=name,
value=value,
prev_value=prev_value,
change=change)
def _monitor_modes(self, client):
"""Begin monitoring all mode events (start, stop) via the specified client."""
if not self.machine.bcp.transport.get_transports_for_handler("_modes"):
self.machine.mode_controller.register_start_method(self._mode_start, 'mode')
self.machine.events.add_handler("modes_active_modes_changed", self._send_mode_list)
self.machine.bcp.transport.add_handler_to_transport("_modes", client)
self.machine.bcp.transport.send_to_client(
client=client,
bcp_command="mode_list",
running_modes=[(m.name, m.priority) for m in self.machine.mode_controller.active_modes])
def _send_mode_list(self, **kwargs):
"""Send list of current modes."""
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_modes",
bcp_command="mode_list",
running_modes=[(m.name, m.priority) for m in self.machine.mode_controller.active_modes])
def _monitor_modes_stop(self, client):
"""Stop monitoring all mode events (start, stop) via the specified client."""
self.machine.bcp.transport.remove_transport_from_handle("_modes", client)
if not self.machine.bcp.transport.get_transports_for_handler("_modes"):
self.machine.mode_controller.remove_start_method(self._mode_start, 'mode')
self.machine.events.remove_handler_by_event("modes_active_modes_changed", self._send_mode_list)
def _mode_start(self, config, priority, mode, **kwargs):
"""Send 'mode_start' to the monitoring clients."""
del config
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_modes",
bcp_command="mode_start",
name=mode.name,
priority=priority)
# Return the method and mode name to call when the mode stops (self-registering)
return self._mode_stop, mode.name
def _mode_stop(self, mode, **kwargs):
"""Send 'mode_stop' to the monitoring clients."""
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_modes",
bcp_command="mode_stop",
name=mode)
def _monitor_core_events(self, client):
"""Begin monitoring all core events (ball, player turn, etc.) via the specified client."""
if not self.machine.bcp.transport.get_transports_for_handler("_core_events"):
self.machine.events.add_handler('ball_started', self._ball_started)
self.machine.events.add_handler('ball_ended', self._ball_ended)
self.machine.events.add_handler('player_turn_started', self._player_turn_start)
self.machine.events.add_handler('player_added', self._player_added)
self.machine.bcp.transport.add_handler_to_transport("_core_events", client)
def _monitor_core_events_stop(self, client):
"""Stop monitoring all core events (ball, player turn, etc.) via the specified client."""
self.machine.bcp.transport.remove_transport_from_handle("_core_events", client)
if not self.machine.bcp.transport.get_transports_for_handler("_core_events"):
self.machine.events.remove_handler_by_event('ball_started', self._ball_started)
self.machine.events.remove_handler_by_event('ball_ended', self._ball_ended)
self.machine.events.remove_handler_by_event('player_turn_started', self._player_turn_start)
self.machine.events.remove_handler_by_event('player_added', self._player_added)
def _monitor_status_request(self, client):
"""Begin monitoring status_request messages via the specified client."""
self.machine.bcp.transport.add_handler_to_transport("_status_request", client)
def _monitor_status_request_stop(self, client):
"""Stop monitoring status_request messages via the specified client."""
self.machine.bcp.transport.remove_transport_from_handle("_status_request", client)
def _ball_started(self, ball, player, **kwargs):
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_core_events",
bcp_command="ball_start",
player_num=player,
ball=ball)
def _ball_ended(self, **kwargs):
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_core_events",
bcp_command="ball_end")
def _player_turn_start(self, number, player, **kwargs):
del player
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_core_events",
bcp_command="player_turn_start",
player_num=number)
def _player_added(self, num, player, **kwargs):
del player
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler(
handler="_core_events",
bcp_command="player_added",
player_num=num)
async def process_bcp_message(self, cmd, kwargs, client):
"""Process BCP message.
Args:
cmd: The command for this message.
kwargs: Arguments for the command.
client: Client which send this message.
"""
if self._debug_to_console or self._debug_to_file:
if 'rawbytes' in kwargs:
debug_kwargs = deepcopy(kwargs)
debug_kwargs['rawbytes'] = '<{} bytes>'.format(
len(debug_kwargs.pop('rawbytes')))
self.debug_log("Processing command: %s %s", cmd, debug_kwargs)
else:
self.debug_log("Processing command: %s %s", cmd, kwargs)
if cmd in self.bcp_receive_commands:
try:
callback = self.bcp_receive_commands[cmd]
except TypeError as e:
self.machine.bcp.transport.send_to_client(client, "error", cmd=cmd, error=str(e), kwargs=kwargs)
else:
await callback(client=client, **kwargs)
else:
self.warning_log("Received invalid BCP command: %s from client: %s", cmd, client.name)
async def _bcp_receive_error(self, client, **kwargs):
"""Handle a BCP error message from a remote BCP host indicating that a command from MPF was not recognized.
This method only posts a warning to the log. It doesn't do anything else
at this point.
"""
self.warning_log('Received Error command from host with parameters: %s, from client %s',
kwargs, str(client))
def send_driver_event(self, **kwargs):
"""Notify all observers about driver event."""
self.machine.bcp.transport.send_to_clients_with_handler("_monitor_drivers", "driver_event", **kwargs)
async def _bcp_receive_reset_complete(self, client, **kwargs):
"""Handle a BCP reset_complete message from a remote BCP host indicating their reset process has completed."""
del kwargs
self.debug_log("Received reset_complete from client: %s %s", client.name)
self._client_reset_complete_status[client] = True
# Check if reset_complete status is True from all clients
if all(status is True for item, status in self._client_reset_complete_status.items()):
if self._client_reset_queue:
self._client_reset_queue.clear()
self._client_reset_queue = None
self._client_reset_complete_status.clear()
self.debug_log("Received reset_complete from all clients. Clearing wait from queue event.")
def bcp_reset(self, queue, **kwargs):
"""Send the 'reset' command to the remote BCP host."""
del kwargs
# Will hold the queue event until all clients respond with a "reset_complete" command
clients = self.machine.bcp.transport.get_all_clients()
self._client_reset_complete_status.clear()
for client in clients:
if not client.name:
continue
self._client_reset_complete_status[client] = False
if self._client_reset_complete_status:
queue.wait()
self._client_reset_queue = queue
# Send the reset command
self.debug_log("Sending reset to all clients (will now wait for reset_complete "
"to be received from all clients).")
self.machine.bcp.transport.send_to_all_clients("reset")
async def _bcp_receive_switch(self, client, name, state, **kwargs):
"""Process an incoming switch state change request from a remote BCP host.
Args:
client: Client which sent the switch state.
name: String name of the switch to set.
state: Integer representing the state this switch will be set to.
1 = active, 0 = inactive, -1 means this switch will be flipped
from whatever its current state is to the opposite state.
kwargs: Additional arguments (unused)
"""
del kwargs
del client
state = int(state)
try:
switch = self.machine.switches[name]
except KeyError:
self.warning_log("Received BCP switch message with invalid switch"
"name: '%s'", name)
return
if state == -1:
if self.machine.switch_controller.is_active(switch):
state = 0
else:
state = 1
self.machine.switch_controller.process_switch_obj(obj=switch, state=state, logical=True)
async def _evaluate_placeholder(self, client, placeholder, parameters=None, **kwargs):
"""Evaluate and return placeholder."""
del kwargs
if parameters is None:
parameters = []
placeholder_obj = self.machine.placeholder_manager.build_raw_template(placeholder, None)
try:
value = placeholder_obj.evaluate(parameters=parameters)
except AssertionError as e:
self.machine.bcp.transport.send_to_client(client=client, bcp_command='evaluate_placeholder',
error=str(e))
return
self.machine.bcp.transport.send_to_client(client=client, bcp_command='evaluate_placeholder', value=value,
error=False)
async def _bcp_receive_register_trigger(self, client, event, **kwargs):
"""Register a trigger for a client."""
del kwargs
self.add_registered_trigger_event_for_client(client, event)
async def _bcp_receive_deregister_trigger(self, client, event, **kwargs):
"""Deregister a trigger for a client."""
del kwargs
self.remove_registered_trigger_event_for_client(client, event)
def bcp_player_added(self, num, **kwargs):
"""Send BCP 'player_added' to the connected BCP hosts."""
del kwargs
self.machine.bcp.transport.send_to_clients_with_handler('_player_vars', 'player_added', player_num=num)
def bcp_trigger(self, name, **kwargs):
"""Send BCP 'trigger' to the connected BCP hosts."""
# ignore events which already came from bcp to prevent loops
if "_from_bcp" in kwargs:
return
# Since player variables are sent automatically, if we get a trigger
# for an event that starts with "player_", we need to only send it here
# if there's *not* a player variable with that name, since if there is
# a player variable then the player variable handler will send it.
if name.startswith('player_'):
try:
if self.machine.game.player.is_player_var(name.lstrip('player_')):
return
except AttributeError:
pass
self.machine.bcp.transport.send_to_clients_with_handler(
handler=name, bcp_command='trigger', name=name, **kwargs)
def bcp_trigger_client(self, client, name, **kwargs):
"""Send BCP 'trigger' to a specific client."""
# ignore events which already came from bcp to prevent loops
if "_from_bcp" in kwargs:
return
self.machine.bcp.transport.send_to_client(client=client, bcp_command='trigger', name=name, **kwargs)
async def _bcp_receive_trigger(self, client, name, callback=None, **kwargs):
"""Process an incoming trigger command from a remote BCP host."""
del client
kwargs['_from_bcp'] = True
if callback:
self.machine.events.post(event=name,
callback=self.bcp_trigger,
name=kwargs.pop('callback'),
**kwargs)
else:
self.machine.events.post(event=name, **kwargs)
| 44.692828
| 120
| 0.633735
|
e479da0026e44d3c09037f3860bb77ce98eb2b97
| 23,875
|
py
|
Python
|
simple_textmining/simple_textmining.py
|
Q35joih4334/simple_textmining
|
1cca58839d50cbf8f865b459da667da81775c593
|
[
"MIT"
] | null | null | null |
simple_textmining/simple_textmining.py
|
Q35joih4334/simple_textmining
|
1cca58839d50cbf8f865b459da667da81775c593
|
[
"MIT"
] | null | null | null |
simple_textmining/simple_textmining.py
|
Q35joih4334/simple_textmining
|
1cca58839d50cbf8f865b459da667da81775c593
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 22 11:24:27 2021
@author: Q35joih4334
"""
import sys
import io
import textwrap
import spacy
import textacy.extract
import textacy.tm
import textacy.representations
import pandas as pd
import tqdm
import numpy as np
from wordcloud import WordCloud
import xlsxwriter
import matplotlib
import matplotlib.pyplot as plt
import scipy.stats
tqdm.tqdm.pandas()
def mpl_to_xlsx(worksheet, row, col, fig):
"""
Simple function for entering matplotlib figures to xlsxwriter worksheet
"""
imgdata = io.BytesIO()
fig.savefig(imgdata)
imgdata.seek(0)
worksheet.insert_image(row, col, '', {'image_data': imgdata})
def pvalue_asterisk(pvalue):
asterisk = ''
if pvalue < .05:
asterisk = asterisk + '*'
if pvalue < .01:
asterisk = asterisk + '*'
if pvalue < .001:
asterisk = asterisk + '*'
if pvalue < .0001:
asterisk = asterisk + '*'
return asterisk
class textminer:
def __init__(self,
df,
text_column,
ngrams=(1, 3),
nlp=None,
keyword_algo='sgrank', #TODO: could be list
keyword_topn=10,
cvectorizer_args=None,
n_topics=20, # TODO: could be list?
model_type='nmf',
tvectorizer_args=None,
timeseries_column=None,
timeseries_epoch='Y',
docs=None):
self.df_init = df
self.text_column = text_column
self.ngrams = ngrams
self.keyword_algo = keyword_algo
self.keyword_topn = keyword_topn
self.n_topics = n_topics
self.model_type = model_type
self.timeseries_column = timeseries_column
self.timeseries_epoch = timeseries_epoch
# Count vectorizer args
self.cvectorizer_args = {
'tf_type': 'linear',
'idf_type': None,
'norm': None,
'min_df': .1,
'max_df': .95,
'max_n_terms': 100000
}
if cvectorizer_args:
self.cvectorizer_args.update(cvectorizer_args)
# TFIDF vectorizer args
self.tvectorizer_args = {
'tf_type': 'linear',
'idf_type': 'smooth',
'norm': 'l2',
'min_df': 3,
'max_df': .95,
'max_n_terms': 100000
}
if tvectorizer_args:
self.tvectorizer_args.update(tvectorizer_args)
self.nlp = nlp
if not self.nlp:
self.nlp = spacy.load('en_core_web_sm')
self.docs = docs
self.terms_list = None
def build(self):
if self.docs is None:
tqdm.tqdm.write('Creating Spacy docs.', file=sys.stderr)
# TODO: progress_apply is not working
#self.docs = self.df_init[self.text_column].progress_apply(self.nlp)
d = {}
for row, data in tqdm.tqdm(self.df_init.iterrows()):
d[row] = self.nlp(data[self.text_column])
#self.docs = self.df_init[self.text_column].apply(self.nlp)
#TODO: fix parallel processing, this gives pipe error
#docs = [d for d in tqdm.tqdm(self.nlp.pipe(self.df[self.text_column].tolist(), n_process=8))]
self.docs = pd.Series(d, name='_doc') #TODO: or series?
else:
tqdm.tqdm.write('Spacy docs already calculated. Skipping.', file=sys.stderr)
self.df = self.df_init.copy() #TODO: not sure if this is necessary
if self.terms_list is None:
tqdm.tqdm.write('Building bag of words.', file=sys.stderr)
d = {}
for row, data in tqdm.tqdm(self.docs.iteritems()):
clean = []
for ngram in textacy.extract.basics.ngrams(data, n=self.ngrams):
# Ngams are separated with underscore
joined_ngram = '_'.join([x.lemma_.lower() for x in ngram])
if len(joined_ngram) > 2:
clean.append(joined_ngram)
d[row] = clean
self.terms_list = pd.Series(d, name='_terms_list')
else:
tqdm.tqdm.write('Bag of words already calculated. Skipping.', file=sys.stderr)
def keyword_extraction(self):
d = {}
if self.keyword_algo:
tqdm.tqdm.write('Extracting keywords.', file=sys.stderr)
dd = {}
for row, data in tqdm.tqdm(self.docs.iteritems()):
# TODO: allow multiple algos
if self.keyword_algo == 'sgrank':
keyterms = textacy.extract.keyterms.sgrank(data, topn=self.keyword_topn)
# TODO: this should be a bit more robust, e.g. if there is no keyterms
dd[row] = [x[0].lower() for x in keyterms]
d['_top_keywords_{}'.format(self.keyword_algo)] = dd
# TODO: this could be dataframe with keyword algo in header
# TODO: then df_init can be removed?
#self.df['_top_keywords_{}'.format(self.keyword_algo)] = pd.Series(d)
self.keywords = pd.DataFrame(d)
def word_counts(self):
tqdm.tqdm.write('Running word counts.', file=sys.stderr)
cvectorizer = textacy.representations.Vectorizer(**self.cvectorizer_args)
count_doc_term_matrix = cvectorizer.fit_transform(self.terms_list.values)
df_vectorized = pd.DataFrame(count_doc_term_matrix.toarray(), index=self.df.index)
df_vectorized = df_vectorized.rename(cvectorizer.id_to_term, axis='columns')
# Sort columns by most prevalent
df_vectorized = df_vectorized[df_vectorized.sum().sort_values(ascending=False).index]
self.df_vectorized = df_vectorized
def topic_modelling(self):
tqdm.tqdm.write('Running topic model.', file=sys.stderr)
# NOTE: lda gives strange results with the default settings
# TODO: include something to help choose the number of topics for topic modelling
tvectorizer = textacy.representations.Vectorizer(**self.tvectorizer_args)
doc_term_matrix = tvectorizer.fit_transform(self.terms_list.values)
# Run topic model
model = textacy.tm.TopicModel(self.model_type, n_topics=self.n_topics)
model.fit(doc_term_matrix)
# Build top terms
top_terms_str = []
for topic_idx, top_terms in model.top_topic_terms(tvectorizer.id_to_term):
top_terms_str.append('TOPIC {}: {}'.format(str(topic_idx).zfill(2), ', '.join(top_terms)))
# Build matrices
doc_topic_matrix = model.transform(doc_term_matrix)
docs_terms_weights = list(model.top_topic_terms(tvectorizer.id_to_term, weights=True, top_n=-1))
# Get dominant topics
# NOTE: this finds multiple dominant topics if there are
dominant_topics = []
for row in doc_topic_matrix:
max_index = row.argmax()
max_indexes = np.where(row == row[max_index])[0]
dominant_topics.append([top_terms_str[x] for x in max_indexes])
self.dominant_topics = pd.DataFrame(dominant_topics, index=self.df.index, columns=['_dominant_topics'])
# This gets just one dominant topic
dominant_topic = []
for row in doc_topic_matrix:
max_index = row.argmax()
dominant_topic.append(top_terms_str[max_index])
self.dominant_topic = pd.DataFrame(dominant_topic, index=self.df.index, columns=['_dominant_topic'])
# TODO: rename this
top_terms = pd.DataFrame(
doc_topic_matrix,
columns=top_terms_str,
index=self.df.index)
# Boolean indicator matrix for terms
self.top_terms_boolean = top_terms[top_terms != 0]
self.model = model
self.doc_term_matrix = doc_term_matrix
self.top_terms_str = top_terms_str
self.doc_topic_matrix = doc_topic_matrix
self.tvectorizer = tvectorizer
self.docs_terms_weights = docs_terms_weights
self.top_terms = top_terms
def report_counts(self):
table = self.df_init.join(self.df_vectorized)
table.to_excel(self.writer, sheet_name='counts')
worksheet = self.writer.sheets['counts']
worksheet.freeze_panes(1, 0)
# Add table
columns = ['index'] + table.columns.tolist()
columns_data = []
for column in columns:
columns_data.append(
{'header': column})
table_range = xlsxwriter.utility.xl_range(
0,
0,
len(self.df.index),
len(self.df_vectorized.columns) + len(self.df_init.columns))
table_style = self.table_style
table_style.update({'columns': columns_data})
worksheet.add_table(
table_range,
table_style)
# Add conditional format for counts
worksheet.conditional_format(
1,
len(self.df_init.columns) + 1,
len(self.df_vectorized.index),
len(self.df_vectorized.columns) + len(self.df_init.columns) + 1,
{'type': '2_color_scale',
'min_value': 0,
'min_color': '#FFFFFF',
'max_value': self.df_vectorized.max().max(),
'max_color': '#4f81bd'})
def report_tm(self):
# TODO: maybe also report sentiment analysis here for easier analysis
# TODO: compare average sentiments per dominant theme?
tqdm.tqdm.write('Reporting topic model.', file=sys.stderr)
# TODO: define empty dataframes or check whether these actually exist before concating
table = pd.concat(
[self.df, self.keywords, self.polarities, self.dominant_topics, self.dominant_topic, self.top_terms],
axis='columns')
table.to_excel(
self.writer,
startrow=2,
sheet_name='topic_model')
worksheet = self.writer.sheets['topic_model']
# Add table
columns = ['index'] + table.columns.tolist()
columns_data = []
for column in columns:
columns_data.append(
{'header': column,
'header_format': self.hidden_format})
table_range = xlsxwriter.utility.xl_range(
2,
0,
len(table.index) + 2,
len(table.columns))
table_style = self.table_style
table_style.update({'columns': columns_data})
worksheet.add_table(
table_range,
table_style)
# Top header
for i, column in enumerate(columns):
if column in self.top_terms_str:
worksheet.write(0, i, column, self.topic_format)
formula = '=COUNTIF({},"*"&{}&"*")'.format(
xlsxwriter.utility.xl_range(
3,
columns.index('_dominant_topics'),
len(table.index) + 2,
columns.index('_dominant_topics')),
xlsxwriter.utility.xl_rowcol_to_cell(0, i))
worksheet.write_formula(1, i, formula)
else:
worksheet.write(0, i, column, self.header_format)
worksheet.set_row(0, 160)
# Format topic weights
weights_range = xlsxwriter.utility.xl_range(
3,
columns.index(self.top_terms_str[0]),
len(table.index) + 2,
columns.index(self.top_terms_str[-1]))
worksheet.conditional_format(weights_range,
{'type': '2_color_scale',
'min_value': 0,
'min_color': '#FFFFFF',
'max_value': table[self.top_terms_str].max().max(),
'max_color': '#4f6228'})
# Hide zero weights
worksheet.conditional_format(weights_range,
{'type': 'cell',
'criteria': 'equal to',
'value': 0,
'format': self.hidden_format})
# Highlight dominant topic
formula = '=ISNUMBER(SEARCH({},{}))'.format(
xlsxwriter.utility.xl_rowcol_to_cell(2,
columns.index(self.top_terms_str[0]),
row_abs=True),
xlsxwriter.utility.xl_rowcol_to_cell(3,
columns.index(self.top_terms_str[0]) - 1,
col_abs=True))
worksheet.conditional_format(weights_range,
{'type': 'formula',
'criteria': formula,
'format': self.highlighted_format})
# Freeze top rows
worksheet.freeze_panes(3, 0)
def report_topic_sentiment(self):
tqdm.tqdm.write('Reporting topic model sentiments.', file=sys.stderr)
worksheet = self.writer.book.add_worksheet('topic_sentiments')
for row, (term, termdata) in enumerate(self.top_terms.iteritems()):
col = 0
# Topic name
worksheet.write(0, col, 'Topic')
worksheet.write(row + 1, col, term)
# Non-zero topic weight mean
col = col + 1
worksheet.write(0, col, 'Non-zero topic weight NLTK sentiment compound M')
worksheet.write(row + 1, col, self.polarities[termdata != 0]._NLTK_sentiment_compound.mean())
# Correlation coefficient between term weight and sentiment compound
r = scipy.stats.pearsonr(
termdata,
self.polarities._NLTK_sentiment_compound)
col = col + 1
worksheet.write(0, col, 'pearson r')
worksheet.write(row + 1, col, r[0])
col = col + 1
worksheet.write(0, col, 'pearson r p-value')
worksheet.write(row + 1, col, r[1])
col = col + 1
worksheet.write(0, col, 'pearson r p-value sig.')
worksheet.write(row + 1, col, pvalue_asterisk(r[1]))
def report_wordclouds(self):
tqdm.tqdm.write('Drawing topic model wordclouds.', file=sys.stderr)
worksheet = self.writer.book.add_worksheet('topic_wordclouds')
for i, doc_terms_weights in enumerate(self.docs_terms_weights):
wc_freqs = {x[0]: x[1] for x in doc_terms_weights[1]}
if all([x == 0 for x in wc_freqs.values()]):
continue
wc = WordCloud(
background_color='white',
max_words=1000,
scale=8,
color_func=lambda *args, **kwargs: 'black'
)
wc.generate_from_frequencies(wc_freqs)
fig, ax = plt.subplots()
ax.imshow(wc, interpolation='bilinear')
ax.axis('off')
plt.title(textwrap.fill(self.top_terms_str[i], width=40))
plt.tight_layout()
mpl_to_xlsx(worksheet, i * 25, 0, fig)
plt.close()
def report_dominant_topics(self):
tqdm.tqdm.write('Drawing dominant topics chart.', file=sys.stderr)
# Counts of dominant topics
worksheet = self.writer.book.add_worksheet('dominant_topics')
fig, ax = plt.subplots(figsize=(16, 9))
self.dominant_topics.value_counts().plot.barh(ax=ax)
plt.tight_layout()
mpl_to_xlsx(worksheet, 0, 0, fig)
plt.close()
def report_termite_plot(self):
tqdm.tqdm.write('Drawing termite plot.', file=sys.stderr)
# Visualise topics with termite plot
# NOTE: n_terms should be such that all top10 terms are visible
# TODO: highlight dominant term?
worksheet = self.writer.book.add_worksheet('termite')
ax = self.model.termite_plot(
self.doc_term_matrix,
self.tvectorizer.id_to_term,
topics=-1,
#n_terms=len(set(itertools.chain.from_iterable(top_terms_list))),
sort_terms_by='seriation')
mpl_to_xlsx(worksheet, 0, 0, ax.get_figure())
plt.close()
def report_timeline_chart(self):
# TODO: maybe there should be option to set xticklabels format manually
if self.timeseries_column:
tqdm.tqdm.write('Drawing timeline chart.', file=sys.stderr)
# Dominant topics
worksheet = self.writer.book.add_worksheet('timeline_dominant_topics')
fig, ax = plt.subplots(figsize=(16, 9))
data = pd.crosstab(
self.df[self.timeseries_column],
self.df._dominant_topic)
data = data.resample(self.timeseries_epoch).sum()
data = data.transform(lambda x: x / x.sum(), axis=1)
data.plot(
ax=ax,
kind='bar',
width=1,
stacked=True).legend(
loc='lower center',
bbox_to_anchor=(.5, -.5))
ax.set_xticklabels(data.index.strftime('%' + self.timeseries_epoch))
ax.yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
plt.tight_layout()
mpl_to_xlsx(worksheet, 0, 0, ax.get_figure())
plt.close()
# Count of non-zero topics
worksheet = self.writer.book.add_worksheet('timeline_nonzero_topics')
fig, ax = plt.subplots(figsize=(16, 9))
data = pd.DataFrame(
index=self.df[self.timeseries_column],
data=(self.doc_topic_matrix != 0),
columns=self.top_terms_str).groupby(self.timeseries_column).sum()
data = data.resample(self.timeseries_epoch).sum()
data = data.transform(lambda x: x / x.sum(), axis=1)
data.plot(
ax=ax,
kind='bar',
width=1,
stacked=True).legend(
loc='lower center',
bbox_to_anchor=(0.5, -0.5))
ax.set_xticklabels(data.index.strftime('%' + self.timeseries_epoch))
ax.yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
plt.tight_layout()
mpl_to_xlsx(worksheet, 0, 0, ax.get_figure())
plt.close()
def cooccurrence_network(self):
# NOTE: this just generates the graph but does not visualize it in any way
tqdm.tqdm.write('Creating co-occurrence network.', file=sys.stderr)
# TODO: this could also be calculated elsewhere
doc_sents = []
for doc in self.docs:
for sent in doc.sents:
sent_data = []
for token in sent:
if not token.is_punct and not token.is_stop:
sent_data.append(token.lemma_.lower())
if sent_data:
doc_sents.append(sent_data)
self.G_cooccurrence = textacy.representations.network.build_cooccurrence_network(doc_sents)
self.doc_sents = doc_sents
def sentiment_analysis(self):
tqdm.tqdm.write('Running sentiment analysis.', file=sys.stderr)
# Depeche Mood
import textacy.resources
rs = textacy.resources.DepecheMood(lang="en", word_rep='lemmapos')
rs.download()
moods = {}
for row, doc in tqdm.tqdm(self.docs.iteritems()):
moods[row] = rs.get_emotional_valence(doc)
self.moods = pd.DataFrame.from_dict(moods, orient='index')
self.moods = self.moods.add_prefix('_DepecheMood_')
# NLTK
from nltk.sentiment import SentimentIntensityAnalyzer
sia = SentimentIntensityAnalyzer()
pols = {}
for row, doc in tqdm.tqdm(self.docs.iteritems()):
pols[row] = sia.polarity_scores(doc.text)
self.polarities = pd.DataFrame.from_dict(pols, orient='index')
self.polarities = self.polarities.add_prefix('_NLTK_sentiment_')
def report_sentiment_analysis(self):
tqdm.tqdm.write('Reporting sentiment analysis.', file=sys.stderr)
table = self.df_init.join(self.moods.join(self.polarities))
table.to_excel(
self.writer,
startrow=0,
sheet_name='sentiment_analysis')
worksheet = self.writer.sheets['sentiment_analysis']
worksheet.freeze_panes(1, 0)
columns = ['index'] + table.columns.tolist()
columns_data = []
for column in columns:
columns_data.append(
{'header': column})
table_range = xlsxwriter.utility.xl_range(
0,
0,
len(table.index),
len(table.columns))
table_style = self.table_style
table_style.update({'columns': columns_data})
worksheet.add_table(
table_range,
table_style)
def report_wordcloud(self):
tqdm.tqdm.write('Drawing wordcloud.', file=sys.stderr)
worksheet = self.writer.book.add_worksheet('wordcloud')
all_terms = self.terms_list.sum()
s_all_terms = pd.Series(all_terms)
wc_freqs = s_all_terms.value_counts().to_dict()
wc = WordCloud(
background_color='white',
max_words=10000,
scale=16,
color_func=lambda *args, **kwargs: 'black'
)
wc.generate_from_frequencies(wc_freqs)
fig, ax = plt.subplots()
ax.imshow(wc, interpolation='bilinear')
ax.axis('off')
plt.title('Full wordcloud')
plt.tight_layout()
mpl_to_xlsx(worksheet, 0, 0, fig)
plt.close()
def report_settings(self):
worksheet = self.writer.book.add_worksheet('settings')
#TODO
def define_xlsx_styles(self):
self.topic_format = self.writer.book.add_format({
'text_wrap': True,
'valign': 'bottom',
'align': 'left',
'fg_color': '#D7E4BC',
'rotation': 30,
'font_size': 8,
'border': 1})
self.header_format = self.writer.book.add_format({
'text_wrap': True,
'valign': 'bottom',
'align': 'left',
'rotation': 30,
'font_size': 12,
'border': 1})
self.hidden_format = self.writer.book.add_format({
'font_color': '#FFFFFF'})
self.centered = self.writer.book.add_format({
'align': 'center'})
self.highlighted_format = self.writer.book.add_format({
'bold': True})
# TODO: use this in tables
self.table_style = {
'style': 'Table Style Light 15',
'banded_rows': False}
def build_xlsx_report(self,
outfile='df.xlsx'):
self.build()
self.keyword_extraction()
self.word_counts()
self.topic_modelling()
self.sentiment_analysis()
self.cooccurrence_network()
tqdm.tqdm.write('Writing to {}'.format(outfile), file=sys.stderr)
self.writer = pd.ExcelWriter(outfile, engine='xlsxwriter')
self.define_xlsx_styles()
self.report_counts()
self.report_tm()
self.report_topic_sentiment()
self.report_wordclouds()
self.report_dominant_topics()
self.report_termite_plot()
self.report_timeline_chart()
self.report_sentiment_analysis()
self.report_wordcloud()
self.report_settings()
self.writer.save()
self.writer.close()
tqdm.tqdm.write('Saved.', file=sys.stderr)
| 33.113731
| 113
| 0.569131
|
8155b7fb9ed46f990f1f54d2d69ba1c3e6e27571
| 380
|
py
|
Python
|
vshare/extensions.py
|
wandonye/vshare
|
beea2f71fb7a37d9f9110e16dd3e260ba28bdea1
|
[
"BSD-3-Clause"
] | null | null | null |
vshare/extensions.py
|
wandonye/vshare
|
beea2f71fb7a37d9f9110e16dd3e260ba28bdea1
|
[
"BSD-3-Clause"
] | null | null | null |
vshare/extensions.py
|
wandonye/vshare
|
beea2f71fb7a37d9f9110e16dd3e260ba28bdea1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from flask.ext.mail import Mail
mail = Mail()
from flask.ext.cache import Cache
cache = Cache()
from flask.ext.login import LoginManager
login_manager = LoginManager()
from flask.ext.openid import OpenID
oid = OpenID()
from vshare.tokens import TokenManager
token_manager = TokenManager()
| 20
| 43
| 0.763158
|
db5d4f4e75d28d02efb2ad86c0b943e4154f89c0
| 1,186
|
py
|
Python
|
backend/apps/notification_app/notification_commands.py
|
raphaelrpl/portal
|
9e84e52a73500390187d3fc7c4871cf8a3620231
|
[
"MIT"
] | null | null | null |
backend/apps/notification_app/notification_commands.py
|
raphaelrpl/portal
|
9e84e52a73500390187d3fc7c4871cf8a3620231
|
[
"MIT"
] | null | null | null |
backend/apps/notification_app/notification_commands.py
|
raphaelrpl/portal
|
9e84e52a73500390187d3fc7c4871cf8a3620231
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.gaeutil import SaveCommand, ModelSearchCommand
from gaeforms.ndb.form import ModelForm
from gaegraph.business_base import UpdateNode, NodeSearch, DeleteNode
from notification_app.notification_model import Notification
class NotificationSaveForm(ModelForm):
"""
Form used to save and update Notification
"""
_model_class = Notification
_include = [Notification.sender, Notification.user, Notification.message]
class NotificationForm(ModelForm):
"""
Form used to expose Notification's properties for list or json
"""
_model_class = Notification
class GetNotificationCommand(NodeSearch):
_model_class = Notification
class DeleteNotificationCommand(DeleteNode):
_model_class = Notification
class SaveNotificationCommand(SaveCommand):
_model_form_class = NotificationSaveForm
class UpdateNotificationCommand(UpdateNode):
_model_form_class = NotificationSaveForm
class ListNotificationCommand(ModelSearchCommand):
def __init__(self):
super(ListNotificationCommand, self).__init__(Notification.query_by_creation())
| 26.355556
| 87
| 0.790051
|
5d0092c575f85abab11695e984d00229cd3e182d
| 6,032
|
py
|
Python
|
django_inventory/apps/inventory/__init__.py
|
alka653/inventory
|
b8fc944962666652189ff73ae53b1c2194553e02
|
[
"Apache-2.0"
] | null | null | null |
django_inventory/apps/inventory/__init__.py
|
alka653/inventory
|
b8fc944962666652189ff73ae53b1c2194553e02
|
[
"Apache-2.0"
] | null | null | null |
django_inventory/apps/inventory/__init__.py
|
alka653/inventory
|
b8fc944962666652189ff73ae53b1c2194553e02
|
[
"Apache-2.0"
] | 1
|
2020-06-08T11:57:08.000Z
|
2020-06-08T11:57:08.000Z
|
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from common.api import register_links, register_menu
from .models import Location, ItemTemplate, Inventory, InventoryTransaction, Supplier
inventory_list = {'text': _('View all inventories'), 'view': 'inventory_list', 'famfam': 'package_go'}
inventory_create = {'text': _('Create new inventory'), 'view': 'inventory_create', 'famfam': 'package_add'}
inventory_update = {'text': _(u'Edit'), 'view': 'inventory_update', 'args': 'object.id', 'famfam': 'package_green'}
inventory_delete = {'text': _(u'Delete'), 'view': 'inventory_delete', 'args': 'object.id', 'famfam': 'package_delete'}
inventory_create_transaction = {'text': _('Add transaction'), 'view': 'inventory_create_transaction', 'args': 'object.id', 'famfam': 'book_add'}
inventory_view = {'text': _(u'Details'), 'view': 'inventory_view', 'args': 'object.id', 'famfam': 'package_go'}
inventory_list_transactions = {'text': _(u'Inventory transactions'), 'view': 'inventory_list_transactions', 'args': 'object.id', 'famfam': 'book_go'}
inventory_transaction_update = {'text': _(u'Edit'), 'view': 'inventory_transaction_update', 'args': 'object.id', 'famfam': 'book_add'}
inventory_transaction_delete = {'text': _(u'Delete'), 'view': 'inventory_transaction_delete', 'args': 'object.id', 'famfam': 'book_delete'}
inventory_transaction_view = {'text': _(u'Details'), 'view': 'inventory_transaction_view', 'args': 'object.id', 'famfam': 'book_go'}
location_list = {'text': _('Locations'), 'view': 'location_list', 'famfam': 'map'}
location_create = {'text': _(u'Create new location'), 'view': 'location_create', 'famfam': 'map_add'}
location_update = {'text': _(u'Edit'), 'view': 'location_update', 'args': 'object.id', 'famfam': 'map_edit'}
location_delete = {'text': _(u'Delete'), 'view': 'location_delete', 'args': 'object.id', 'famfam': 'map_delete'}
supplier_create = {'text': _('Create new supplier'), 'view': 'supplier_create', 'famfam': 'lorry_add'}
supplier_list = {'text': _('Suppliers'), 'view': 'supplier_list', 'famfam': 'lorry'}
supplier_update = {'text': _('Edit'), 'view': 'supplier_update', 'args': 'object.id', 'famfam': 'lorry'}
supplier_delete = {'text': _('Delete'), 'view': 'supplier_delete', 'args': 'object.id', 'famfam': 'lorry_delete'}
supplier_assign_itemtemplate = {'text': _(u'Assign templates'), 'view': 'supplier_assign_itemtemplates', 'args': 'object.id', 'famfam': 'page_go'}
supplier_purchase_orders = {'text': _(u'Related purchase orders'), 'view': 'supplier_purchase_orders', 'args': 'object.id', 'famfam': 'cart_go'}
template_list = {'text': _('View all'), 'view': 'template_list', 'famfam': 'page_go'}
template_create = {'text': _('Create new template'), 'view': 'template_create', 'famfam': 'page_add'}
template_orphan_list = {'text': _('Orphans templates'), 'view': 'template_orphans_list'}
template_update = {'text': _(u'Edit'), 'view': 'template_update', 'args': 'object.id', 'famfam': 'page_edit'}
template_delete = {'text': _(u'Delete'), 'view': 'template_delete', 'args': 'object.id', 'famfam': 'page_delete'}
template_photos = {'text': _(u'Add / remove photos'), 'view': 'template_photos', 'args': 'object.id', 'famfam': 'picture_go'}
template_assets = {'text': _(u'Related assets'), 'view': 'template_items_list', 'args': 'object.id', 'famfam': 'computer_go'}
template_assign_supplies = {'text': _(u'Assign supplies'), 'view': 'template_assign_supply', 'args': 'object.id', 'famfam': 'monitor'}
template_assign_suppliers = {'text': _(u'Assign suppliers'), 'view': 'template_assign_suppliers', 'args': 'object.id', 'famfam': 'lorry_go'}
jump_to_template = {'text': _(u'Template'), 'view': 'template_view', 'args': 'object.supply.id', 'famfam': 'page_go'}
jump_to_inventory = {'text': _(u'Return to inventory'), 'view': 'inventory_view', 'args': 'object.inventory.id', 'famfam': 'package_go'}
template_menu_links = [template_list, template_orphan_list, supplier_list]
inventory_menu_links = [
inventory_list,
]
location_filter = {'name': 'Location', 'title': _(u'location'), 'queryset': Location.objects.all(), 'destination': 'location'}
register_links(['template_list', 'template_create', 'template_view', 'template_orphans_list', 'template_update', 'template_delete', 'template_photos', 'template_assign_supply', 'template_assign_suppliers'], [template_create], menu_name='sidebar')
register_links(ItemTemplate, [template_update, template_delete, template_photos, template_assets, template_assign_supplies, template_assign_suppliers])
register_links(['supplier_list', 'supplier_create', 'supplier_update', 'supplier_view', 'supplier_delete', 'supplier_assign_itemtemplates'], [supplier_create], menu_name='sidebar')
register_links(Supplier, [supplier_update, supplier_delete, supplier_assign_itemtemplate, supplier_purchase_orders])
register_links(['inventory_view', 'inventory_list', 'inventory_create', 'inventory_update', 'inventory_delete'], [inventory_create], menu_name='sidebar')
register_links(Inventory, [inventory_update, inventory_delete, inventory_list_transactions, inventory_create_transaction])
register_links(Inventory, [inventory_view], menu_name='sidebar')
register_links(['inventory_transaction_update', 'inventory_transaction_delete', 'inventory_transaction_view'], [inventory_create_transaction], menu_name='sidebar')
register_links(InventoryTransaction, [inventory_transaction_view, inventory_transaction_update, inventory_transaction_delete, jump_to_template])
register_links(InventoryTransaction, [jump_to_inventory], menu_name='sidebar')
register_links(['location_list', 'location_create', 'location_update', 'location_delete'], [location_create], menu_name='sidebar')
register_links(Location, [location_update, location_delete])
register_menu([
{'text': _('Templates'), 'view': 'template_list', 'links': template_menu_links, 'famfam': 'page', 'position': 1},
{'text': _('Inventories'), 'view': 'inventory_list', 'links': inventory_menu_links, 'famfam': 'package', 'position': 4},
])
| 81.513514
| 246
| 0.732924
|
0e522ffec783fb6d0d3dc897f878b2f3c67c21ce
| 38,785
|
py
|
Python
|
pypy/module/cpyext/typeobject.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/cpyext/typeobject.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/cpyext/typeobject.py
|
yxzoro/pypy
|
6e47b3d3e5513d9639a21554963a6ace172ccfee
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib import jit, rawrefcount
from rpython.rlib.objectmodel import specialize, we_are_translated
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import DescrMismatch
from pypy.interpreter.error import oefmt
from pypy.interpreter.typedef import (
GetSetProperty, TypeDef, interp_attrproperty, interp2app)
from pypy.module.__builtin__.abstractinst import abstract_issubclass_w
from pypy.module.cpyext import structmemberdefs
from pypy.module.cpyext.api import (
cpython_api, cpython_struct, bootstrap_function, Py_ssize_t,
slot_function, generic_cpy_call, METH_VARARGS, METH_KEYWORDS, CANNOT_FAIL,
build_type_checkers_flags, cts, parse_dir, PyTypeObject,
PyTypeObjectPtr, Py_buffer,
Py_TPFLAGS_HEAPTYPE, Py_TPFLAGS_READY, Py_TPFLAGS_READYING,
Py_TPFLAGS_LONG_SUBCLASS, Py_TPFLAGS_LIST_SUBCLASS,
Py_TPFLAGS_TUPLE_SUBCLASS, Py_TPFLAGS_UNICODE_SUBCLASS,
Py_TPFLAGS_DICT_SUBCLASS, Py_TPFLAGS_BASE_EXC_SUBCLASS,
Py_TPFLAGS_TYPE_SUBCLASS,
Py_TPFLAGS_BYTES_SUBCLASS,
Py_TPPYPYFLAGS_FLOAT_SUBCLASS,
)
from pypy.module.cpyext.cparser import CTypeSpace
from pypy.module.cpyext.methodobject import (W_PyCClassMethodObject,
PyCFunction_NewEx, PyCFunction, PyMethodDef,
W_PyCMethodObject, W_PyCFunctionObject, extract_doc, extract_txtsig,
W_PyCWrapperObject)
from pypy.module.cpyext.modsupport import convert_method_defs
from pypy.module.cpyext.pyobject import (
PyObject, make_ref, from_ref, get_typedescr, make_typedescr,
track_reference, decref, as_pyobj, incref)
from pypy.module.cpyext.slotdefs import (
slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function,
llslot)
from pypy.module.cpyext.state import State
from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne
from pypy.module.cpyext.typeobjectdefs import (
PyGetSetDef, PyMemberDef, PyMappingMethods,
PyNumberMethods, PySequenceMethods, PyBufferProcs)
from pypy.objspace.std.typeobject import W_TypeObject, find_best_base
#WARN_ABOUT_MISSING_SLOT_FUNCTIONS = False
PyType_Check, PyType_CheckExact = build_type_checkers_flags("Type")
PyHeapTypeObject = cts.gettype('PyHeapTypeObject *')
cts.parse_header(parse_dir / "cpyext_descrobject.h")
cts.parse_header(parse_dir / "typeslots.h")
class W_GetSetPropertyEx(GetSetProperty):
def __init__(self, getset, w_type):
self.getset = getset
self.w_type = w_type
doc = fset = fget = fdel = None
if doc:
# XXX dead code?
doc = rffi.charp2str(getset.c_doc)
if getset.c_get:
fget = GettersAndSetters.getter.im_func
if getset.c_set:
fset = GettersAndSetters.setter.im_func
fdel = GettersAndSetters.deleter.im_func
GetSetProperty.__init__(self, fget, fset, fdel, doc,
cls=None, use_closure=True,
tag="cpyext_1")
self.name = rffi.charp2str(getset.c_name)
def readonly_attribute(self, space): # overwritten
raise oefmt(space.w_AttributeError,
"attribute '%s' of '%N' objects is not writable",
self.name, self.w_type)
def PyDescr_NewGetSet(space, getset, w_type):
return W_GetSetPropertyEx(getset, w_type)
def make_GetSet(space, getsetprop):
py_getsetdef = lltype.malloc(PyGetSetDef, flavor='raw')
doc = getsetprop.doc
if doc:
py_getsetdef.c_doc = rffi.str2charp(doc)
else:
py_getsetdef.c_doc = rffi.cast(rffi.CCHARP, 0)
py_getsetdef.c_name = rffi.str2charp(getsetprop.getname(space).encode('utf-8'))
# XXX FIXME - actually assign these !!!
py_getsetdef.c_get = cts.cast('getter', 0)
py_getsetdef.c_set = cts.cast('setter', 0)
py_getsetdef.c_closure = cts.cast('void*', 0)
return py_getsetdef
class W_MemberDescr(GetSetProperty):
name = 'member_descriptor'
def __init__(self, member, w_type):
self.member = member
self.name = rffi.charp2str(member.c_name)
self.w_type = w_type
flags = rffi.cast(lltype.Signed, member.c_flags)
doc = set = None
if member.c_doc:
doc = rffi.charp2str(member.c_doc)
get = GettersAndSetters.member_getter.im_func
del_ = GettersAndSetters.member_delete.im_func
if not (flags & structmemberdefs.READONLY):
set = GettersAndSetters.member_setter.im_func
GetSetProperty.__init__(self, get, set, del_, doc,
cls=None, use_closure=True,
tag="cpyext_2")
# change the typedef name
W_MemberDescr.typedef = TypeDef(
"member_descriptor",
__get__ = interp2app(GetSetProperty.descr_property_get),
__set__ = interp2app(GetSetProperty.descr_property_set),
__delete__ = interp2app(GetSetProperty.descr_property_del),
__name__ = interp_attrproperty('name', cls=GetSetProperty,
wrapfn="newtext_or_none"),
__objclass__ = GetSetProperty(GetSetProperty.descr_get_objclass),
__doc__ = interp_attrproperty('doc', cls=GetSetProperty,
wrapfn="newtext_or_none"),
)
assert not W_MemberDescr.typedef.acceptable_as_base_class # no __new__
@bootstrap_function
def init_memberdescrobject(space):
make_typedescr(W_MemberDescr.typedef,
basestruct=cts.gettype('PyMemberDescrObject'),
attach=memberdescr_attach,
realize=memberdescr_realize,
)
make_typedescr(W_GetSetPropertyEx.typedef,
basestruct=cts.gettype('PyGetSetDescrObject'),
attach=getsetdescr_attach,
)
make_typedescr(W_PyCClassMethodObject.typedef,
basestruct=cts.gettype('PyMethodDescrObject'),
attach=methoddescr_attach,
realize=classmethoddescr_realize,
)
make_typedescr(W_PyCMethodObject.typedef,
basestruct=cts.gettype('PyMethodDescrObject'),
attach=methoddescr_attach,
realize=methoddescr_realize,
)
def memberdescr_attach(space, py_obj, w_obj, w_userdata=None):
"""
Fills a newly allocated PyMemberDescrObject with the given W_MemberDescr
object. The values must not be modified.
"""
py_memberdescr = cts.cast('PyMemberDescrObject*', py_obj)
# XXX assign to d_dname, d_type?
assert isinstance(w_obj, W_MemberDescr)
py_memberdescr.c_d_member = w_obj.member
def memberdescr_realize(space, obj):
# XXX NOT TESTED When is this ever called?
member = cts.cast('PyMemberDef*', obj)
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_MemberDescr, w_type)
w_obj.__init__(member, w_type)
track_reference(space, obj, w_obj)
return w_obj
def getsetdescr_attach(space, py_obj, w_obj, w_userdata=None):
"""
Fills a newly allocated PyGetSetDescrObject with the given W_GetSetPropertyEx
object. The values must not be modified.
"""
py_getsetdescr = cts.cast('PyGetSetDescrObject*', py_obj)
if isinstance(w_obj, GetSetProperty):
py_getsetdef = make_GetSet(space, w_obj)
assert space.isinstance_w(w_userdata, space.w_type)
w_obj = W_GetSetPropertyEx(py_getsetdef, w_userdata)
# now w_obj.getset is py_getsetdef, which was freshly allocated
# XXX how is this ever released?
# XXX assign to d_dname, d_type?
assert isinstance(w_obj, W_GetSetPropertyEx)
py_getsetdescr.c_d_getset = w_obj.getset
def methoddescr_attach(space, py_obj, w_obj, w_userdata=None):
py_methoddescr = cts.cast('PyMethodDescrObject*', py_obj)
# XXX assign to d_dname, d_type?
assert isinstance(w_obj, W_PyCFunctionObject)
py_methoddescr.c_d_method = w_obj.ml
def classmethoddescr_realize(space, obj):
# XXX NOT TESTED When is this ever called?
method = rffi.cast(lltype.Ptr(PyMethodDef), obj)
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_PyCClassMethodObject, w_type)
w_obj.__init__(space, method, w_type)
track_reference(space, obj, w_obj)
return w_obj
def methoddescr_realize(space, obj):
# XXX NOT TESTED When is this ever called?
method = rffi.cast(lltype.Ptr(PyMethodDef), obj)
w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
w_obj = space.allocate_instance(W_PyCMethodObject, w_type)
w_obj.__init__(space, method, w_type)
track_reference(space, obj, w_obj)
return w_obj
def convert_getset_defs(space, dict_w, getsets, w_type):
getsets = rffi.cast(rffi.CArrayPtr(PyGetSetDef), getsets)
if getsets:
i = -1
while True:
i = i + 1
getset = getsets[i]
name = getset.c_name
if not name:
break
name = rffi.charp2str(name)
w_descr = PyDescr_NewGetSet(space, getset, w_type)
dict_w[name] = w_descr
def convert_member_defs(space, dict_w, members, w_type):
members = rffi.cast(rffi.CArrayPtr(PyMemberDef), members)
if members:
i = 0
while True:
member = members[i]
name = member.c_name
if not name:
break
name = rffi.charp2str(name)
w_descr = W_MemberDescr(member, w_type)
dict_w[name] = w_descr
i += 1
missing_slots={}
def warn_missing_slot(space, method_name, slot_name, w_type):
if not we_are_translated():
if slot_name not in missing_slots:
missing_slots[slot_name] = w_type.getname(space)
print "missing slot %r/%r, discovered on %r" % (
method_name, slot_name, w_type.getname(space))
def update_all_slots(space, w_type, pto):
# fill slots in pto
for method_name, slot_name, slot_names, slot_apifunc in slotdefs_for_tp_slots:
slot_func_helper = None
w_descr = w_type.dict_w.get(method_name, None)
if w_descr:
# use the slot_apifunc (userslots) to lookup at runtime
pass
elif len(slot_names) ==1:
# 'inherit' from tp_base
slot_func_helper = getattr(pto.c_tp_base, slot_names[0])
else:
struct = getattr(pto.c_tp_base, slot_names[0])
if struct:
slot_func_helper = getattr(struct, slot_names[1])
if not slot_func_helper:
if not slot_apifunc:
warn_missing_slot(space, method_name, slot_name, w_type)
continue
slot_func_helper = slot_apifunc.get_llhelper(space)
fill_slot(space, pto, w_type, slot_names, slot_func_helper)
def update_all_slots_builtin(space, w_type, pto):
typedef = w_type.layout.typedef
for method_name, slot_name, slot_names, slot_apifunc in slotdefs_for_tp_slots:
slot_apifunc = get_slot_tp_function(space, typedef, slot_name, method_name)
if not slot_apifunc:
warn_missing_slot(space, method_name, slot_name, w_type)
continue
slot_llfunc = slot_apifunc.get_llhelper(space)
fill_slot(space, pto, w_type, slot_names, slot_llfunc)
@specialize.arg(3)
def fill_slot(space, pto, w_type, slot_names, slot_func_helper):
# XXX special case wrapper-functions and use a "specific" slot func
if len(slot_names) == 1:
setattr(pto, slot_names[0], slot_func_helper)
elif ((w_type is space.w_list or w_type is space.w_tuple) and
slot_names[0] == 'c_tp_as_number'):
# XXX hack - how can we generalize this? The problem is method
# names like __mul__ map to more than one slot, and we have no
# convenient way to indicate which slots CPython have filled
#
# We need at least this special case since Numpy checks that
# (list, tuple) do __not__ fill tp_as_number
pass
elif ((space.issubtype_w(w_type, space.w_bytes) or
space.issubtype_w(w_type, space.w_unicode)) and
slot_names[0] == 'c_tp_as_number'):
# like above but for any str type
pass
else:
assert len(slot_names) == 2
struct = getattr(pto, slot_names[0])
if not struct:
#assert not space.config.translating
assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE
if slot_names[0] == 'c_tp_as_number':
STRUCT_TYPE = PyNumberMethods
elif slot_names[0] == 'c_tp_as_sequence':
STRUCT_TYPE = PySequenceMethods
elif slot_names[0] == 'c_tp_as_buffer':
STRUCT_TYPE = PyBufferProcs
elif slot_names[0] == 'c_tp_as_mapping':
STRUCT_TYPE = PyMappingMethods
else:
raise AssertionError(
"Structure not allocated: %s" % (slot_names[0],))
struct = lltype.malloc(STRUCT_TYPE, flavor='raw', zero=True)
setattr(pto, slot_names[0], struct)
setattr(struct, slot_names[1], slot_func_helper)
def add_operators(space, dict_w, pto, name):
from pypy.module.cpyext.object import PyObject_HashNotImplemented
hash_not_impl = llslot(space, PyObject_HashNotImplemented)
for method_name, slot_names, wrapper_class, doc in slotdefs_for_wrappers:
if method_name in dict_w:
continue
offset = [rffi.offsetof(lltype.typeOf(pto).TO, slot_names[0])]
if len(slot_names) == 1:
func = getattr(pto, slot_names[0])
if slot_names[0] == 'c_tp_hash':
# two special cases where __hash__ is explicitly set to None
# (which leads to an unhashable type):
# 1) tp_hash == PyObject_HashNotImplemented
# 2) tp_hash == NULL and tp_richcompare not NULL
if hash_not_impl == func or (
not func and pto.c_tp_richcompare):
dict_w[method_name] = space.w_None
continue
else:
assert len(slot_names) == 2
struct = getattr(pto, slot_names[0])
if not struct:
continue
offset.append(rffi.offsetof(lltype.typeOf(struct).TO, slot_names[1]))
func = getattr(struct, slot_names[1])
func_voidp = rffi.cast(rffi.VOIDP, func)
if not func:
continue
if wrapper_class is None:
continue
assert issubclass(wrapper_class, W_PyCWrapperObject)
w_obj = wrapper_class(space, pto, method_name, doc, func_voidp, offset=offset[:])
dict_w[method_name] = w_obj
if pto.c_tp_doc:
raw_doc = rffi.charp2str(cts.cast('char*', pto.c_tp_doc))
dict_w['__doc__'] = space.newtext(extract_doc(raw_doc, name))
if pto.c_tp_new:
add_tp_new_wrapper(space, dict_w, pto)
@slot_function([PyObject, PyObject, PyObject], PyObject)
def tp_new_wrapper(space, self, w_args, w_kwds):
self_pytype = rffi.cast(PyTypeObjectPtr, self)
tp_new = self_pytype.c_tp_new
# Check that the user doesn't do something silly and unsafe like
# object.__new__(dict). To do this, we check that the most
# derived base that's not a heap type is this type.
# XXX do it
args_w = space.fixedview(w_args)
w_subtype = args_w[0]
w_args = space.newtuple(args_w[1:])
subtype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_subtype))
try:
w_obj = generic_cpy_call(space, tp_new, subtype, w_args, w_kwds)
finally:
decref(space, subtype)
return w_obj
@specialize.memo()
def get_new_method_def(space):
state = space.fromcache(State)
if state.new_method_def:
return state.new_method_def
ptr = lltype.malloc(PyMethodDef, flavor="raw", zero=True,
immortal=True)
ptr.c_ml_name = rffi.cast(rffi.CONST_CCHARP, rffi.str2charp("__new__"))
lltype.render_immortal(ptr.c_ml_name)
rffi.setintfield(ptr, 'c_ml_flags', METH_VARARGS | METH_KEYWORDS)
ptr.c_ml_doc = rffi.cast(rffi.CONST_CCHARP, rffi.str2charp(
"Create and return a new object. "
"See help(type) for accurate signature."))
lltype.render_immortal(ptr.c_ml_doc)
state.new_method_def = ptr
return ptr
def setup_new_method_def(space):
ptr = get_new_method_def(space)
ptr.c_ml_meth = rffi.cast(PyCFunction, llslot(space, tp_new_wrapper))
@jit.dont_look_inside
def is_tp_new_wrapper(space, ml):
return ml.c_ml_meth == rffi.cast(PyCFunction, llslot(space, tp_new_wrapper))
def add_tp_new_wrapper(space, dict_w, pto):
if "__new__" in dict_w:
return
pyo = rffi.cast(PyObject, pto)
dict_w["__new__"] = PyCFunction_NewEx(space, get_new_method_def(space),
from_ref(space, pyo), None)
def inherit_special(space, pto, w_obj, base_pto):
# XXX missing: copy basicsize and flags in a magical way
# (minimally, if tp_basicsize is zero or too low, we copy it from the base)
if pto.c_tp_basicsize < base_pto.c_tp_basicsize:
pto.c_tp_basicsize = base_pto.c_tp_basicsize
if pto.c_tp_itemsize < base_pto.c_tp_itemsize:
pto.c_tp_itemsize = base_pto.c_tp_itemsize
#/* Setup fast subclass flags */
if space.issubtype_w(w_obj, space.w_BaseException):
pto.c_tp_flags |= Py_TPFLAGS_BASE_EXC_SUBCLASS
elif space.issubtype_w(w_obj, space.w_type):
pto.c_tp_flags |= Py_TPFLAGS_TYPE_SUBCLASS
elif space.issubtype_w(w_obj, space.w_int):
pto.c_tp_flags |= Py_TPFLAGS_LONG_SUBCLASS
elif space.issubtype_w(w_obj, space.w_bytes):
pto.c_tp_flags |= Py_TPFLAGS_BYTES_SUBCLASS
elif space.issubtype_w(w_obj, space.w_unicode):
pto.c_tp_flags |= Py_TPFLAGS_UNICODE_SUBCLASS
elif space.issubtype_w(w_obj, space.w_tuple):
pto.c_tp_flags |= Py_TPFLAGS_TUPLE_SUBCLASS
elif space.issubtype_w(w_obj, space.w_list):
pto.c_tp_flags |= Py_TPFLAGS_LIST_SUBCLASS
elif space.issubtype_w(w_obj, space.w_dict):
pto.c_tp_flags |= Py_TPFLAGS_DICT_SUBCLASS
# the following types are a pypy-specific extensions, using tp_pypy_flags
elif space.issubtype_w(w_obj, space.w_float):
pto.c_tp_pypy_flags |= Py_TPPYPYFLAGS_FLOAT_SUBCLASS
def check_descr(space, w_self, w_type):
if not space.isinstance_w(w_self, w_type):
raise DescrMismatch()
class GettersAndSetters:
def getter(self, space, w_self):
assert isinstance(self, W_GetSetPropertyEx)
check_descr(space, w_self, self.w_type)
return generic_cpy_call(
space, self.getset.c_get, w_self,
self.getset.c_closure)
def setter(self, space, w_self, w_value):
assert isinstance(self, W_GetSetPropertyEx)
check_descr(space, w_self, self.w_type)
res = generic_cpy_call(
space, self.getset.c_set, w_self, w_value,
self.getset.c_closure)
if rffi.cast(lltype.Signed, res) < 0:
state = space.fromcache(State)
state.check_and_raise_exception()
def deleter(self, space, w_self):
assert isinstance(self, W_GetSetPropertyEx)
check_descr(space, w_self, self.w_type)
res = generic_cpy_call(
space, self.getset.c_set, w_self, None,
self.getset.c_closure)
if rffi.cast(lltype.Signed, res) < 0:
state = space.fromcache(State)
state.check_and_raise_exception()
def member_getter(self, space, w_self):
assert isinstance(self, W_MemberDescr)
check_descr(space, w_self, self.w_type)
pyref = make_ref(space, w_self)
try:
return PyMember_GetOne(
space, rffi.cast(rffi.CCHARP, pyref), self.member)
finally:
decref(space, pyref)
def member_delete(self, space, w_self):
assert isinstance(self, W_MemberDescr)
check_descr(space, w_self, self.w_type)
pyref = make_ref(space, w_self)
try:
PyMember_SetOne(
space, rffi.cast(rffi.CCHARP, pyref), self.member, None)
finally:
decref(space, pyref)
def member_setter(self, space, w_self, w_value):
assert isinstance(self, W_MemberDescr)
check_descr(space, w_self, self.w_type)
pyref = make_ref(space, w_self)
try:
PyMember_SetOne(
space, rffi.cast(rffi.CCHARP, pyref), self.member, w_value)
finally:
decref(space, pyref)
class W_PyCTypeObject(W_TypeObject):
@jit.dont_look_inside
def __init__(self, space, pto):
bases_w = space.fixedview(from_ref(space, pto.c_tp_bases))
dict_w = {}
name = rffi.charp2str(cts.cast('char*', pto.c_tp_name))
add_operators(space, dict_w, pto, name)
convert_method_defs(space, dict_w, pto.c_tp_methods, self)
convert_getset_defs(space, dict_w, pto.c_tp_getset, self)
convert_member_defs(space, dict_w, pto.c_tp_members, self)
flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE
if flag_heaptype:
minsize = rffi.sizeof(PyHeapTypeObject.TO)
else:
minsize = rffi.sizeof(PyObject.TO)
new_layout = (pto.c_tp_basicsize > minsize or pto.c_tp_itemsize > 0)
self.flag_cpytype = True
W_TypeObject.__init__(self, space, name,
bases_w or [space.w_object], dict_w, force_new_layout=new_layout,
is_heaptype=flag_heaptype)
# if a sequence or a mapping, then set the flag to force it
if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item:
self.flag_map_or_seq = 'S'
elif pto.c_tp_as_mapping and pto.c_tp_as_mapping.c_mp_subscript:
self.flag_map_or_seq = 'M'
if pto.c_tp_doc:
rawdoc = rffi.charp2str(cts.cast('char*', pto.c_tp_doc))
self.w_doc = space.newtext_or_none(extract_doc(rawdoc, name))
self.text_signature = extract_txtsig(rawdoc, name)
def _cpyext_attach_pyobj(self, space, py_obj):
self._cpy_ref = py_obj
rawrefcount.create_link_pyobj(self, py_obj)
@bootstrap_function
def init_typeobject(space):
make_typedescr(space.w_type.layout.typedef,
basestruct=PyHeapTypeObject.TO,
alloc=type_alloc,
attach=type_attach,
realize=type_realize,
dealloc=type_dealloc)
@slot_function([PyObject], lltype.Void)
def type_dealloc(space, obj):
from pypy.module.cpyext.object import _dealloc
obj_pto = rffi.cast(PyTypeObjectPtr, obj)
base_pyo = rffi.cast(PyObject, obj_pto.c_tp_base)
decref(space, obj_pto.c_tp_bases)
decref(space, obj_pto.c_tp_mro)
decref(space, obj_pto.c_tp_cache) # let's do it like cpython
decref(space, obj_pto.c_tp_dict)
if obj_pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE:
heaptype = rffi.cast(PyHeapTypeObject, obj)
decref(space, heaptype.c_ht_name)
decref(space, heaptype.c_ht_qualname)
decref(space, base_pyo)
_dealloc(space, obj)
# CCC port it to C
def type_alloc(typedescr, space, w_metatype, itemsize=0):
metatype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_metatype))
# Don't increase refcount for non-heaptypes
if metatype:
flags = rffi.cast(lltype.Signed, metatype.c_tp_flags)
if not flags & Py_TPFLAGS_HEAPTYPE:
decref(space, metatype)
heaptype = lltype.malloc(PyHeapTypeObject.TO,
flavor='raw', zero=True,
add_memory_pressure=True)
pto = heaptype.c_ht_type
pto.c_ob_refcnt = 1
pto.c_ob_pypy_link = 0
pto.c_ob_type = metatype
pto.c_tp_flags |= Py_TPFLAGS_HEAPTYPE
pto.c_tp_as_async = heaptype.c_as_async
pto.c_tp_as_number = heaptype.c_as_number
pto.c_tp_as_sequence = heaptype.c_as_sequence
pto.c_tp_as_mapping = heaptype.c_as_mapping
pto.c_tp_as_buffer = heaptype.c_as_buffer
pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out
pto.c_tp_itemsize = 0
return rffi.cast(PyObject, heaptype)
def type_attach(space, py_obj, w_type, w_userdata=None):
"""
Fills a newly allocated PyTypeObject from an existing type.
"""
assert isinstance(w_type, W_TypeObject)
pto = rffi.cast(PyTypeObjectPtr, py_obj)
typedescr = get_typedescr(w_type.layout.typedef)
if space.is_w(w_type, space.w_bytes):
pto.c_tp_itemsize = 1
elif space.is_w(w_type, space.w_tuple):
pto.c_tp_itemsize = rffi.sizeof(PyObject)
state = space.fromcache(State)
pto.c_tp_free = state.C.PyObject_Free
pto.c_tp_alloc = state.C.PyType_GenericAlloc
builder = state.builder
if ((pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE) != 0
and builder.cpyext_type_init is None):
# this ^^^ is not None only during startup of cpyext. At that
# point we might get into troubles by doing make_ref() when
# things are not initialized yet. So in this case, simply use
# str2charp() and "leak" the string.
w_typename = space.getattr(w_type, space.newtext('__name__'))
heaptype = cts.cast('PyHeapTypeObject*', pto)
heaptype.c_ht_name = make_ref(space, w_typename)
from pypy.module.cpyext.unicodeobject import PyUnicode_AsUTF8
pto.c_tp_name = cts.cast('const char *',
PyUnicode_AsUTF8(space, heaptype.c_ht_name))
else:
pto.c_tp_name = cts.cast('const char*', rffi.str2charp(w_type.name))
# uninitialized fields:
# c_tp_print
# XXX implement
# c_tp_compare and more?
w_base = best_base(space, w_type.bases_w)
pto.c_tp_base = rffi.cast(PyTypeObjectPtr, make_ref(space, w_base))
# dealloc
if space.gettypeobject(w_type.layout.typedef) is w_type:
# only for the exact type, like 'space.w_tuple' or 'space.w_list'
pto.c_tp_dealloc = typedescr.get_dealloc(space)
else:
# for all subtypes, use base's dealloc (requires sorting in attach_all)
pto.c_tp_dealloc = pto.c_tp_base.c_tp_dealloc
if not pto.c_tp_dealloc:
# strange, but happens (ABCMeta)
pto.c_tp_dealloc = state.C._PyPy_subtype_dealloc
if builder.cpyext_type_init is not None:
builder.cpyext_type_init.append((pto, w_type))
else:
finish_type_1(space, pto, w_type.bases_w)
finish_type_2(space, pto, w_type)
pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct)
if pto.c_tp_base:
if pto.c_tp_base.c_tp_basicsize > pto.c_tp_basicsize:
pto.c_tp_basicsize = pto.c_tp_base.c_tp_basicsize
if pto.c_tp_itemsize < pto.c_tp_base.c_tp_itemsize:
pto.c_tp_itemsize = pto.c_tp_base.c_tp_itemsize
if w_type.is_heaptype():
update_all_slots(space, w_type, pto)
else:
update_all_slots_builtin(space, w_type, pto)
if not pto.c_tp_new:
base_object_pyo = make_ref(space, space.w_object)
base_object_pto = rffi.cast(PyTypeObjectPtr, base_object_pyo)
flags = rffi.cast(lltype.Signed, pto.c_tp_flags)
if pto.c_tp_base != base_object_pto or flags & Py_TPFLAGS_HEAPTYPE:
pto.c_tp_new = pto.c_tp_base.c_tp_new
decref(space, base_object_pyo)
pto.c_tp_flags |= Py_TPFLAGS_READY
return pto
def py_type_ready(space, pto):
if pto.c_tp_flags & Py_TPFLAGS_READY:
return
type_realize(space, rffi.cast(PyObject, pto))
@cpython_api([PyTypeObjectPtr], rffi.INT_real, error=-1)
def PyType_Ready(space, pto):
py_type_ready(space, pto)
return 0
def type_realize(space, py_obj):
pto = rffi.cast(PyTypeObjectPtr, py_obj)
assert pto.c_tp_flags & Py_TPFLAGS_READY == 0
assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0
pto.c_tp_flags |= Py_TPFLAGS_READYING
try:
w_obj = _type_realize(space, py_obj)
finally:
pto.c_tp_flags &= ~Py_TPFLAGS_READYING
pto.c_tp_flags |= Py_TPFLAGS_READY
return w_obj
def solid_base(space, w_type):
typedef = w_type.layout.typedef
return space.gettypeobject(typedef)
def best_base(space, bases_w):
if not bases_w:
return None
return find_best_base(bases_w)
def inherit_slots(space, pto, w_base):
base_pyo = make_ref(space, w_base)
try:
base = rffi.cast(PyTypeObjectPtr, base_pyo)
if not pto.c_tp_dealloc:
pto.c_tp_dealloc = base.c_tp_dealloc
if not pto.c_tp_init:
pto.c_tp_init = base.c_tp_init
if not pto.c_tp_alloc:
pto.c_tp_alloc = base.c_tp_alloc
# XXX check for correct GC flags!
if not pto.c_tp_free:
pto.c_tp_free = base.c_tp_free
if not pto.c_tp_setattro:
pto.c_tp_setattro = base.c_tp_setattro
if not pto.c_tp_getattro:
pto.c_tp_getattro = base.c_tp_getattro
if not pto.c_tp_as_buffer:
pto.c_tp_as_buffer = base.c_tp_as_buffer
if base.c_tp_as_buffer:
# inherit base.c_tp_as_buffer functions not inherited from w_type
pto_as = pto.c_tp_as_buffer
base_as = base.c_tp_as_buffer
if not pto_as.c_bf_getbuffer:
pto_as.c_bf_getbuffer = base_as.c_bf_getbuffer
if not pto_as.c_bf_releasebuffer:
pto_as.c_bf_releasebuffer = base_as.c_bf_releasebuffer
finally:
decref(space, base_pyo)
def _type_realize(space, py_obj):
"""
Creates an interpreter type from a PyTypeObject structure.
"""
# missing:
# unsupported:
# tp_mro, tp_subclasses
py_type = rffi.cast(PyTypeObjectPtr, py_obj)
if not py_type.c_tp_base:
# borrowed reference, but w_object is unlikely to disappear
base = as_pyobj(space, space.w_object)
py_type.c_tp_base = rffi.cast(PyTypeObjectPtr, base)
finish_type_1(space, py_type)
if py_type.c_ob_type:
w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type))
else:
# Somehow the tp_base type is created with no ob_type, notably
# PyString_Type and PyBaseString_Type
# While this is a hack, cpython does it as well.
w_metatype = space.w_type
w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype)
track_reference(space, py_obj, w_obj)
# __init__ wraps all slotdefs functions from py_type via add_operators
w_obj.__init__(space, py_type)
w_obj.ready()
finish_type_2(space, py_type, w_obj)
base = py_type.c_tp_base
if base:
# XXX refactor - parts of this are done in finish_type_2 -> inherit_slots
if not py_type.c_tp_as_number:
py_type.c_tp_as_number = base.c_tp_as_number
if not py_type.c_tp_as_sequence:
py_type.c_tp_as_sequence = base.c_tp_as_sequence
if not py_type.c_tp_as_mapping:
py_type.c_tp_as_mapping = base.c_tp_as_mapping
#if not py_type.c_tp_as_buffer: py_type.c_tp_as_buffer = base.c_tp_as_buffer
return w_obj
def finish_type_1(space, pto, bases_w=None):
"""
Sets up tp_bases, necessary before creating the interpreter type.
"""
base = pto.c_tp_base
base_pyo = rffi.cast(PyObject, pto.c_tp_base)
if base and not base.c_tp_flags & Py_TPFLAGS_READY:
type_realize(space, base_pyo)
if base and not pto.c_ob_type: # will be filled later
pto.c_ob_type = base.c_ob_type
if not pto.c_tp_bases:
if bases_w is None:
if not base:
bases_w = []
else:
bases_w = [from_ref(space, base_pyo)]
is_heaptype = bool(pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE)
pto.c_tp_bases = make_ref(space, space.newtuple(bases_w),
immortal=not is_heaptype)
def finish_type_2(space, pto, w_obj):
"""
Sets up other attributes, when the interpreter type has been created.
"""
pto.c_tp_mro = make_ref(space, space.newtuple(w_obj.mro_w))
base = pto.c_tp_base
if base:
inherit_special(space, pto, w_obj, base)
for w_base in space.fixedview(from_ref(space, pto.c_tp_bases)):
if isinstance(w_base, W_TypeObject):
inherit_slots(space, pto, w_base)
#else:
# w_base is a W_ClassObject, ignore it
if not pto.c_tp_setattro:
from pypy.module.cpyext.object import PyObject_GenericSetAttr
pto.c_tp_setattro = llslot(space, PyObject_GenericSetAttr)
if not pto.c_tp_getattro:
from pypy.module.cpyext.object import PyObject_GenericGetAttr
pto.c_tp_getattro = llslot(space, PyObject_GenericGetAttr)
if w_obj.is_cpytype():
decref(space, pto.c_tp_dict)
w_dict = w_obj.getdict(space)
# pass in the w_obj to convert any values that are
# unbound GetSetProperty into bound PyGetSetDescrObject
pto.c_tp_dict = make_ref(space, w_dict, w_obj)
@cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL)
def PyType_IsSubtype(space, a, b):
"""Return true if a is a subtype of b.
"""
w_type1 = from_ref(space, rffi.cast(PyObject, a))
w_type2 = from_ref(space, rffi.cast(PyObject, b))
return int(abstract_issubclass_w(space, w_type1, w_type2)) #XXX correct?
@cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject)
def PyType_GenericNew(space, type, w_args, w_kwds):
return generic_cpy_call(
space, type.c_tp_alloc, type, 0)
def _parse_typeslots():
slots_hdr = CTypeSpace()
slots_hdr.parse_header(parse_dir / "typeslots.h")
prefix2member = {
'tp': "ht_type",
'am': "as_async",
'nb': "as_number",
'mp': "as_mapping",
'sq': "as_sequence",
'bf': "as_buffer"}
TABLE = []
HTO = cts.gettype('PyHeapTypeObject')
for name, num in slots_hdr.macros.items():
assert isinstance(num, int)
assert name.startswith('Py_')
name = name[3:]
membername = 'c_' + prefix2member[name[:2]]
slotname = 'c_' + name
TARGET = HTO._flds[membername]._flds[slotname]
TABLE.append((num, membername, slotname, TARGET))
return unrolling_iterable(TABLE)
SLOT_TABLE = _parse_typeslots()
def fill_ht_slot(ht, slotnum, ptr):
for num, membername, slotname, TARGET in SLOT_TABLE:
if num == slotnum:
setattr(getattr(ht, membername), slotname, rffi.cast(TARGET, ptr))
@cts.decl("""PyObject *
PyType_FromSpecWithBases(PyType_Spec *spec, PyObject *bases)""",
result_is_ll=True)
def PyType_FromSpecWithBases(space, spec, bases):
from pypy.module.cpyext.unicodeobject import PyUnicode_FromString
state = space.fromcache(State)
p_type = cts.cast('PyTypeObject*', make_ref(space, space.w_type))
res = state.ccall("PyType_GenericAlloc", p_type, 0)
res = cts.cast('PyHeapTypeObject *', res)
typ = res.c_ht_type
typ.c_tp_flags = rffi.cast(lltype.Unsigned, spec.c_flags)
typ.c_tp_flags |= Py_TPFLAGS_HEAPTYPE
specname = rffi.charp2str(cts.cast('char*', spec.c_name))
dotpos = specname.rfind('.')
if dotpos < 0:
name = specname
else:
name = specname[dotpos + 1:]
res.c_ht_name = make_ref(space, space.newtext(name))
res.c_ht_qualname = res.c_ht_name
incref(space, res.c_ht_qualname)
typ.c_tp_name = spec.c_name
slotdefs = rffi.cast(rffi.CArrayPtr(cts.gettype('PyType_Slot')), spec.c_slots)
if not bases:
w_base = space.w_object
bases_w = []
i = 0
while True:
slotdef = slotdefs[i]
slotnum = rffi.cast(lltype.Signed, slotdef.c_slot)
if slotnum == 0:
break
elif slotnum == cts.macros['Py_tp_base']:
w_base = from_ref(space, cts.cast('PyObject*', slotdef.c_pfunc))
elif slotnum == cts.macros['Py_tp_bases']:
bases = cts.cast('PyObject*', slotdef.c_pfunc)
bases_w = space.fixedview(from_ref(space, bases))
i += 1
if not bases_w:
bases_w = [w_base]
else:
bases_w = space.fixedview(from_ref(space, bases))
w_base = best_base(space, bases_w)
base = cts.cast('PyTypeObject*', make_ref(space, w_base))
if False: # not base.c_tp_flags & Py_TPFLAGS_BASETYPE:
raise oefmt(space.w_TypeError,
"type '%s' is not an acceptable base type",
rffi.charp2str(base.c_tp_name))
typ.c_tp_as_async = res.c_as_async
typ.c_tp_as_number = res.c_as_number
typ.c_tp_as_sequence = res.c_as_sequence
typ.c_tp_as_mapping = res.c_as_mapping
typ.c_tp_as_buffer = res.c_as_buffer
typ.c_tp_bases = bases
typ.c_tp_base = base
typ.c_tp_basicsize = cts.cast('Py_ssize_t', spec.c_basicsize)
typ.c_tp_itemsize = cts.cast('Py_ssize_t', spec.c_itemsize)
i = 0
while True:
slotdef = slotdefs[i]
slot = rffi.cast(lltype.Signed, slotdef.c_slot)
if slot == 0:
break
if slot < 0: # or slot > len(slotoffsets):
raise oefmt(space.w_RuntimeError, "invalid slot offset")
if slot in (cts.macros['Py_tp_base'], cts.macros['Py_tp_bases']):
# Processed above
i += 1
continue
fill_ht_slot(res, slot, slotdef.c_pfunc)
# XXX: need to make a copy of the docstring slot, which usually
# points to a static string literal
i += 1
if not typ.c_tp_dealloc:
typ.c_tp_dealloc = state.C._PyPy_subtype_dealloc
py_type_ready(space, typ)
return cts.cast('PyObject*', res)
@cpython_api([PyTypeObjectPtr, PyObject], PyObject, error=CANNOT_FAIL,
result_borrowed=True)
def _PyType_Lookup(space, type, w_name):
"""Internal API to look for a name through the MRO.
This returns a borrowed reference, and doesn't set an exception!"""
w_type = from_ref(space, rffi.cast(PyObject, type))
assert isinstance(w_type, W_TypeObject)
if not space.isinstance_w(w_name, space.w_text):
return None
name = space.text_w(w_name)
w_obj = w_type.lookup(name)
# this assumes that w_obj is not dynamically created, but will stay alive
# until w_type is modified or dies. Assuming this, we return a borrowed ref
return w_obj
@cpython_api([PyTypeObjectPtr], lltype.Void)
def PyType_Modified(space, w_obj):
"""Invalidate the internal lookup cache for the type and all of its
subtypes. This function must be called after any manual
modification of the attributes or base classes of the type.
"""
# Invalidate the type cache in case of a builtin type.
if not isinstance(w_obj, W_TypeObject):
return
if w_obj.is_cpytype():
w_obj.mutated(None)
| 39.657464
| 89
| 0.66915
|
3406548a3ddd10cb59acf42b18bd1bd6762d28da
| 13,678
|
py
|
Python
|
google-cloud-sdk/lib/third_party/pygments/formatters/latex.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 28
|
2015-01-26T14:00:59.000Z
|
2021-01-09T18:13:30.000Z
|
google-cloud-sdk/lib/third_party/pygments/formatters/latex.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 1
|
2016-04-19T13:03:17.000Z
|
2016-04-19T13:03:17.000Z
|
google-cloud-sdk/lib/third_party/pygments/formatters/latex.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 11
|
2015-02-20T14:41:33.000Z
|
2021-12-22T23:50:36.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
*New in Pygments 0.7.*
*New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``). *New in Pygments 1.2.*
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``). *New in Pygments 1.2.*
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.iteritems():
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(r'\begin{Verbatim}[commandchars=\\\{\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments:
outfile.write(r',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
else:
value = escape_tex(value, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write('\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
| 36.670241
| 80
| 0.5503
|
e5f139ab0a890a500f93ffb4308302463ef5abb6
| 1,387
|
py
|
Python
|
google/appengine/ext/mapreduce/pipeline_base.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
google/appengine/ext/mapreduce/pipeline_base.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
google/appengine/ext/mapreduce/pipeline_base.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Base pipelines."""
import google
from appengine_pipeline.src import pipeline
from google.appengine.ext.mapreduce import parameters
class PipelineBase(pipeline.Pipeline):
"""Base class for all pipelines within mapreduce framework.
Rewrites base path to use pipeline library bundled with mapreduce.
"""
def start(self, **kwargs):
if "base_path" not in kwargs:
kwargs["base_path"] = parameters._DEFAULT_PIPELINE_BASE_PATH
return pipeline.Pipeline.start(self, **kwargs)
class _OutputSlotsMixin(object):
"""Defines common output slots for all MR user facing pipelines.
result_status: one of model.MapreduceState._RESULTS. When a MR pipeline
finishes, user should check this for the status of the MR job.
"""
output_names = ["result_status"]
| 28.895833
| 74
| 0.751983
|
b39627467ccd0053cc9199c206927f784d793338
| 4,740
|
py
|
Python
|
vispy/scene/visuals.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/scene/visuals.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/scene/visuals.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
The classes in scene.visuals are visuals that may be added to a scenegraph
using the methods and properties defined by vispy.scene.Node.
These classes are automatically generated by mixing vispy.scene.Node with
the Visual classes found in vispy.visuals.
For developing custom visuals, it is recommended to subclass from
vispy.visuals.Visual rather than vispy.scene.Node.
"""
import re
from .. import visuals
from .node import Node
def create_visual_node(subclass):
# Create a new subclass of Node.
# Decide on new class name
clsname = subclass.__name__
assert clsname.endswith('Visual')
clsname = clsname[:-6]
# Generate new docstring based on visual docstring
try:
doc = generate_docstring(subclass, clsname)
except Exception:
# If parsing fails, just return the original Visual docstring
doc = subclass.__doc__
# New __init__ method
def __init__(self, *args, **kwargs):
parent = kwargs.pop('parent', None)
name = kwargs.pop('name', None)
subclass.__init__(self, *args, **kwargs)
Node.__init__(self, parent=parent, name=name)
# Create new class
cls = type(clsname, (subclass, Node), {'__init__': __init__,
'__doc__': doc})
return cls
def generate_docstring(subclass, clsname):
# Generate a Visual+Node docstring by modifying the Visual's docstring
# to include information about Node inheritance and extra init args.
sc_doc = subclass.__doc__
if sc_doc is None:
sc_doc = ""
# find locations within docstring to insert new parameters
lines = sc_doc.split("\n")
# discard blank lines at start
while lines and lines[0].strip() == '':
lines.pop(0)
i = 0
params_started = False
param_indent = None
first_blank = None
param_end = None
while i < len(lines):
line = lines[i]
# ignore blank lines and '------' lines
if re.search(r'\w', line):
indent = len(line) - len(line.lstrip())
# If Params section has already started, check for end of params
# (that is where we will insert new params)
if params_started:
if indent < param_indent:
break
elif indent == param_indent:
# might be end of parameters block..
if re.match(r'\s*[a-zA-Z0-9_]+\s*:\s*\S+', line) is None:
break
param_end = i + 1
# Check for beginning of params section
elif re.match(r'\s*Parameters\s*', line):
params_started = True
param_indent = indent
if first_blank is None:
first_blank = i
# Check for first blank line
# (this is where the Node inheritance description will be
# inserted)
elif first_blank is None and line.strip() == '':
first_blank = i
i += 1
if i == len(lines) and param_end is None:
# reached end of docstring; insert here
param_end = i
# If original docstring has no params heading, we need to generate it.
if not params_started:
lines.extend(["", " Parameters", " ----------"])
param_end = len(lines)
if first_blank is None:
first_blank = param_end - 3
params_started = True
# build class and parameter description strings
class_desc = ("\n This class inherits from visuals.%sVisual and "
"scene.Node, allowing the visual to be placed inside a "
"scenegraph.\n" % (clsname))
parm_doc = (" parent : Node\n"
" The parent node to assign to this node (optional).\n"
" name : string\n"
" A name for this node, used primarily for debugging\n"
" (optional).")
# assemble all docstring parts
lines = (lines[:first_blank] +
[class_desc] +
lines[first_blank:param_end] +
[parm_doc] +
lines[param_end:])
doc = '\n'.join(lines)
return doc
__all__ = []
for obj_name in dir(visuals):
obj = getattr(visuals, obj_name)
if (isinstance(obj, type) and
issubclass(obj, visuals.Visual) and
obj is not visuals.Visual):
cls = create_visual_node(obj)
globals()[cls.__name__] = cls
__all__.append(cls.__name__)
| 33.617021
| 78
| 0.58038
|
bb0a6f57bf1a48f5bb0408552ac3863a1b953991
| 11,570
|
py
|
Python
|
fattureincloud_python_sdk/model/modify_received_document_response.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | 2
|
2022-02-17T08:33:17.000Z
|
2022-03-22T09:27:00.000Z
|
fattureincloud_python_sdk/model/modify_received_document_response.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | null | null | null |
fattureincloud_python_sdk/model/modify_received_document_response.py
|
fattureincloud/fattureincloud-python-sdk
|
f3a40fac345751014ea389680efdaef90f03bac1
|
[
"MIT"
] | null | null | null |
"""
Fatture in Cloud API v2 - API Reference
Connect your software with Fatture in Cloud, the invoicing platform chosen by more than 400.000 businesses in Italy. The Fatture in Cloud API is based on REST, and makes possible to interact with the user related data prior authorization via OAuth2 protocol. # noqa: E501
The version of the OpenAPI document: 2.0.15
Contact: info@fattureincloud.it
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fattureincloud_python_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fattureincloud_python_sdk.exceptions import ApiAttributeError
def lazy_import():
from fattureincloud_python_sdk.model.received_document import ReceivedDocument
globals()['ReceivedDocument'] = ReceivedDocument
class ModifyReceivedDocumentResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (ReceivedDocument,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ModifyReceivedDocumentResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (ReceivedDocument): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ModifyReceivedDocumentResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (ReceivedDocument): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.992395
| 278
| 0.58038
|
1d03f37bdf4badf42b8e70aa36976c9b861e7331
| 4,628
|
py
|
Python
|
helper.py
|
grantrosario/semantic-segmentation
|
a6feea1cfb2c402fb34fb8ee0b27f15dc15c7be7
|
[
"MIT"
] | null | null | null |
helper.py
|
grantrosario/semantic-segmentation
|
a6feea1cfb2c402fb34fb8ee0b27f15dc15c7be7
|
[
"MIT"
] | null | null | null |
helper.py
|
grantrosario/semantic-segmentation
|
a6feea1cfb2c402fb34fb8ee0b27f15dc15c7be7
|
[
"MIT"
] | null | null | null |
import re
import random
import numpy as np
import os.path
import scipy.misc
import shutil
import zipfile
import time
import tensorflow as tf
from glob import glob
from urllib.request import urlretrieve
from tqdm import tqdm
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
def maybe_download_pretrained_vgg(data_dir):
"""
Download and extract pretrained vgg model if it doesn't exist
:param data_dir: Directory to download the model to
"""
vgg_filename = 'vgg.zip'
vgg_path = os.path.join(data_dir, 'vgg')
vgg_files = [
os.path.join(vgg_path, 'variables/variables.data-00000-of-00001'),
os.path.join(vgg_path, 'variables/variables.index'),
os.path.join(vgg_path, 'saved_model.pb')]
missing_vgg_files = [vgg_file for vgg_file in vgg_files if not os.path.exists(vgg_file)]
if missing_vgg_files:
# Clean vgg dir
if os.path.exists(vgg_path):
shutil.rmtree(vgg_path)
os.makedirs(vgg_path)
# Download vgg
print('Downloading pre-trained vgg model...')
with DLProgress(unit='B', unit_scale=True, miniters=1) as pbar:
urlretrieve(
'https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/vgg.zip',
os.path.join(vgg_path, vgg_filename),
pbar.hook)
# Extract vgg
print('Extracting model...')
zip_ref = zipfile.ZipFile(os.path.join(vgg_path, vgg_filename), 'r')
zip_ref.extractall(data_dir)
zip_ref.close()
# Remove zip file to save space
os.remove(os.path.join(vgg_path, vgg_filename))
def gen_batch_function(data_folder, image_shape):
"""
Generate function to create batches of training data
:param data_folder: Path to folder that contains all the datasets
:param image_shape: Tuple - Shape of image
:return:
"""
def get_batches_fn(batch_size):
"""
Create batches of training data
:param batch_size: Batch Size
:return: Batches of training data
"""
image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))
label_paths = {
re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path
for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}
background_color = np.array([255, 0, 0])
random.shuffle(image_paths)
for batch_i in range(0, len(image_paths), batch_size):
images = []
gt_images = []
for image_file in image_paths[batch_i:batch_i+batch_size]:
gt_image_file = label_paths[os.path.basename(image_file)]
image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)
gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)
gt_bg = np.all(gt_image == background_color, axis=2)
gt_bg = gt_bg.reshape(*gt_bg.shape, 1)
gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)
images.append(image)
gt_images.append(gt_image)
yield np.array(images), np.array(gt_images)
return get_batches_fn
def gen_test_output(sess, logits, keep_prob, image_pl, data_folder, image_shape):
"""
Generate test output using the test images
:param sess: TF session
:param logits: TF Tensor for the logits
:param keep_prob: TF Placeholder for the dropout keep robability
:param image_pl: TF Placeholder for the image placeholder
:param data_folder: Path to the folder that contains the datasets
:param image_shape: Tuple - Shape of image
:return: Output for for each test image
"""
for image_file in glob(os.path.join(data_folder, 'image_2', '*.png')):
image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)
im_softmax = sess.run(
[tf.nn.softmax(logits)],
{keep_prob: 1.0, image_pl: [image]})
im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])
segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)
mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))
mask = scipy.misc.toimage(mask, mode="RGBA")
street_im = scipy.misc.toimage(image)
street_im.paste(mask, box=None, mask=mask)
yield os.path.basename(image_file), np.array(street_im)
| 37.024
| 93
| 0.646283
|
b41a7bf34bc04bac112723956df098b24a9efb0f
| 1,646
|
py
|
Python
|
azure-mgmt-eventhub/azure/mgmt/eventhub/models/sku.py
|
apahim/azure-sdk-for-python
|
f68c120f172404a65ddd477c16bcb4801a26a549
|
[
"MIT"
] | null | null | null |
azure-mgmt-eventhub/azure/mgmt/eventhub/models/sku.py
|
apahim/azure-sdk-for-python
|
f68c120f172404a65ddd477c16bcb4801a26a549
|
[
"MIT"
] | null | null | null |
azure-mgmt-eventhub/azure/mgmt/eventhub/models/sku.py
|
apahim/azure-sdk-for-python
|
f68c120f172404a65ddd477c16bcb4801a26a549
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Sku(Model):
"""SKU parameters supplied to the create namespace operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of this SKU. Possible values include: 'Basic',
'Standard'
:type name: str or ~azure.mgmt.eventhub.models.SkuName
:param tier: The billing tier of this particular SKU. Possible values
include: 'Basic', 'Standard'
:type tier: str or ~azure.mgmt.eventhub.models.SkuTier
:param capacity: The Event Hubs throughput units, value should be 0 to 20
throughput units.
:type capacity: int
"""
_validation = {
'name': {'required': True},
'capacity': {'maximum': 20, 'minimum': 0},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(self, **kwargs):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.capacity = kwargs.get('capacity', None)
| 35.021277
| 78
| 0.589915
|
67797acd6b06ea1e528adb03e93d741d9f91ee94
| 1,018
|
py
|
Python
|
vendor/packages/translate-toolkit/translate/lang/nso.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/translate-toolkit/translate/lang/nso.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | 1
|
2021-12-13T20:55:07.000Z
|
2021-12-13T20:55:07.000Z
|
vendor/packages/translate-toolkit/translate/lang/nso.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module represents the Northern Sotho language.
.. seealso:: http://en.wikipedia.org/wiki/Northern_Sotho_language
"""
from translate.lang import common
class nso(common.Common):
"""This class represents Northern Sotho."""
specialchars = "šŠ"
| 30.848485
| 70
| 0.744597
|
5a5bcd54c6a209f779d003ef161967c7f01d789b
| 1,081
|
py
|
Python
|
lib/surface/access_context_manager/levels/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/access_context_manager/levels/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/access_context_manager/levels/__init__.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for the Access Context Manager levels CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class AccessContextManager(base.Group):
"""Manage Access Context Manager levels.
An access level is a classification of requests based on raw attributes of
that request (e.g. IP address, device identity, time of day, etc.).
"""
| 36.033333
| 76
| 0.764107
|
44302443b731454a6eedb5a5feb82fa423bc9a89
| 1,307
|
py
|
Python
|
micropython/bluetooth/aioble/aioble/core.py
|
mkomon/micropython-lib
|
25ebe4a261e7b1c7c8471bceef2fd0e12837cdd2
|
[
"PSF-2.0"
] | 1,556
|
2015-01-18T01:10:21.000Z
|
2022-03-31T23:27:33.000Z
|
micropython/bluetooth/aioble/aioble/core.py
|
Li-Lian1069/micropython-lib
|
1dfca5ad343b2841965df6c4e59f92d6d94a24bd
|
[
"PSF-2.0"
] | 414
|
2015-01-01T09:01:22.000Z
|
2022-03-31T15:08:24.000Z
|
micropython/bluetooth/aioble/aioble/core.py
|
Li-Lian1069/micropython-lib
|
1dfca5ad343b2841965df6c4e59f92d6d94a24bd
|
[
"PSF-2.0"
] | 859
|
2015-02-05T13:23:00.000Z
|
2022-03-28T02:28:16.000Z
|
# MicroPython aioble module
# MIT license; Copyright (c) 2021 Jim Mussared
import bluetooth
log_level = 1
def log_error(*args):
if log_level > 0:
print("[aioble] E:", *args)
def log_warn(*args):
if log_level > 1:
print("[aioble] W:", *args)
def log_info(*args):
if log_level > 2:
print("[aioble] I:", *args)
class GattError(Exception):
def __init__(self, status):
self._status = status
def ensure_active():
if not ble.active():
try:
from .security import load_secrets
load_secrets()
except:
pass
ble.active(True)
def config(*args, **kwargs):
ensure_active()
return ble.config(*args, **kwargs)
def stop():
ble.active(False)
# Because different functionality is enabled by which files are available
# the different modules can register their IRQ handlers dynamically.
_irq_handlers = []
def register_irq_handler(handler):
_irq_handlers.append(handler)
# Dispatch IRQs to the registered sub-modules.
def ble_irq(event, data):
log_info(event, data)
for handler in _irq_handlers:
result = handler(event, data)
if result is not None:
return result
# TODO: Allow this to be injected.
ble = bluetooth.BLE()
ble.irq(ble_irq)
| 18.152778
| 73
| 0.642693
|
504a2c4fa1a8ddd16f8865a964046580d2cd08de
| 4,645
|
py
|
Python
|
sppas/sppas/src/audiodata/aio/waveio.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/audiodata/aio/waveio.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/audiodata/aio/waveio.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.audiodata.aio.waveio.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import wave
from sppas.src.utils import u
from ..audio import sppasAudioPCM
# ---------------------------------------------------------------------------
class WaveIO(sppasAudioPCM):
"""
:author: Nicolas Chazeau, Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2017 Brigitte Bigi
:summary: A wave file open/save sppasAudioPCM class.
Waveform Audio File Format is a Microsoft and IBM audio file format
standard for storing an audio bitstream on PCs. It is an application of
the Resource Interchange File Format (RIFF) bitstream format method for
storing data in "chunks".
"""
def __init__(self):
"""Constructor."""
super(WaveIO, self).__init__()
# -----------------------------------------------------------------------
def open(self, filename):
"""Get an audio from a Waveform Audio File Format file.
:param filename (str) input file name.
"""
# Use the standard wave library to load the wave file
# open method returns a Wave_read() object
self._audio_fp = wave.open(u(filename), "r")
# -----------------------------------------------------------------------
def save(self, filename):
"""Write an audio content as a Waveform Audio File Format file.
:param filename (str) output filename.
"""
if self._audio_fp is not None:
self.rewind()
frames = self._audio_fp.readframes(self._audio_fp.getnframes())
self.save_fragment(filename, frames)
elif len(self._channels) == 1:
channel = self._channels[0]
f = wave.Wave_write(u(filename))
f.setnchannels(1)
f.setsampwidth(channel.get_sampwidth())
f.setframerate(channel.get_framerate())
try:
f.writeframes(channel.get_frames())
finally:
f.close()
else:
self.verify_channels()
sw = self._channels[0].get_sampwidth()
frames = b""
for i in range(0, self._channels[0].get_nframes()*sw, sw):
for j in range(len(self._channels)):
frames += self._channels[j].get_frames(sw)
f = wave.Wave_write(u(filename))
f.setnchannels(len(self._channels))
f.setsampwidth(self._channels[0].get_sampwidth())
f.setframerate(self._channels[0].get_framerate())
try:
f.writeframes(frames)
finally:
f.close()
# -----------------------------------------------------------------------
def save_fragment(self, filename, frames):
"""Write an audio content as a Waveform Audio File Format file.
:param filename: (str) output filename.
:param frames: (str) the frames to write
"""
f = wave.Wave_write(u(filename))
f.setnchannels(self.get_nchannels())
f.setsampwidth(self.get_sampwidth())
f.setframerate(self.get_framerate())
try:
f.writeframes(frames)
finally:
f.close()
| 34.407407
| 78
| 0.522067
|
479828349670ee7787fdf1bfb9f65e88bbc09f29
| 2,535
|
py
|
Python
|
disent/frameworks/ae/_supervised__tae.py
|
neonkitchen/disent
|
0f45fefea03473690dfdbf48ef83f6e17ca9b8b3
|
[
"MIT"
] | null | null | null |
disent/frameworks/ae/_supervised__tae.py
|
neonkitchen/disent
|
0f45fefea03473690dfdbf48ef83f6e17ca9b8b3
|
[
"MIT"
] | null | null | null |
disent/frameworks/ae/_supervised__tae.py
|
neonkitchen/disent
|
0f45fefea03473690dfdbf48ef83f6e17ca9b8b3
|
[
"MIT"
] | 1
|
2022-01-18T06:43:33.000Z
|
2022-01-18T06:43:33.000Z
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 Nathan Juraj Michlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
from dataclasses import dataclass
from numbers import Number
from typing import Any
from typing import Dict
from typing import Sequence
from typing import Tuple
from typing import Union
import torch
from disent.frameworks.ae._unsupervised__ae import Ae
from disent.frameworks.helper.triplet_loss import compute_triplet_loss
from disent.frameworks.helper.triplet_loss import TripletLossConfig
# ========================================================================= #
# triple ae #
# ========================================================================= #
class TripletAe(Ae):
REQUIRED_OBS = 3
@dataclass
class cfg(Ae.cfg, TripletLossConfig):
pass
def hook_ae_compute_ave_aug_loss(self, zs: Sequence[torch.Tensor], xs_partial_recon: Sequence[torch.Tensor], xs_targ: Sequence[torch.Tensor]) -> Tuple[Union[torch.Tensor, Number], Dict[str, Any]]:
return compute_triplet_loss(zs=zs, cfg=self.cfg)
# ========================================================================= #
# END #
# ========================================================================= #
| 42.25
| 200
| 0.585799
|
c4e4e1702384645fc206c7a9409b60814103c451
| 2,316
|
py
|
Python
|
helpers/media_manipulator.py
|
tervay/the-blue-alliance
|
e14c15cb04b455f90a2fcfdf4c1cdbf8454e17f8
|
[
"MIT"
] | 1
|
2016-03-19T20:29:35.000Z
|
2016-03-19T20:29:35.000Z
|
helpers/media_manipulator.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 11
|
2020-10-10T03:05:29.000Z
|
2022-02-27T09:57:22.000Z
|
helpers/media_manipulator.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | null | null | null |
from helpers.cache_clearer import CacheClearer
from helpers.manipulator_base import ManipulatorBase
class MediaManipulator(ManipulatorBase):
"""
Handle Media database writes.
"""
@classmethod
def getCacheKeysAndControllers(cls, affected_refs):
return CacheClearer.get_media_cache_keys_and_controllers(affected_refs)
@classmethod
def updateMerge(self, new_media, old_media, auto_union=True):
"""
Given an "old" and a "new" Media object, replace the fields in the
"old" object that are present in the "new" object, but keep fields from
the "old" object that are null in the "new" object.
Special case: References (list of Keys) are merged, not overwritten
"""
attrs = [
'media_type_enum',
'foreign_key',
'details_json',
'year',
]
list_attrs = []
auto_union_attrs = [
'references',
'preferred_references',
'media_tag_enum',
]
old_media._updated_attrs = []
# if not auto_union, treat auto_union_attrs as list_attrs
if not auto_union:
list_attrs += auto_union_attrs
auto_union_attrs = []
for attr in attrs:
if getattr(new_media, attr) is not None:
if getattr(new_media, attr) != getattr(old_media, attr):
setattr(old_media, attr, getattr(new_media, attr))
old_media._updated_attrs.append(attr)
old_media.dirty = True
for attr in list_attrs:
if len(getattr(new_media, attr)) > 0 or not auto_union:
if getattr(new_media, attr) != getattr(old_media, attr):
setattr(old_media, attr, getattr(new_media, attr))
old_media._updated_attrs.append(attr)
old_media.dirty = True
for attr in auto_union_attrs:
old_set = set(getattr(old_media, attr))
new_set = set(getattr(new_media, attr))
unioned = old_set.union(new_set)
if unioned != old_set:
setattr(old_media, attr, list(unioned))
old_media._updated_attrs.append(attr)
old_media.dirty = True
return old_media
| 34.567164
| 79
| 0.591105
|
f915ab6684f30d0debbe882f13d54ebe5cb8b802
| 2,772
|
py
|
Python
|
shop/models.py
|
Yang-Wei-Ting/williams_website
|
7e516ac9388f95e405d58d7f160d8f7081ff9083
|
[
"Apache-2.0"
] | null | null | null |
shop/models.py
|
Yang-Wei-Ting/williams_website
|
7e516ac9388f95e405d58d7f160d8f7081ff9083
|
[
"Apache-2.0"
] | null | null | null |
shop/models.py
|
Yang-Wei-Ting/williams_website
|
7e516ac9388f95e405d58d7f160d8f7081ff9083
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.urls import reverse
COUNTRY_CHOICES = [
('AQ', 'Antarctica'),
('AU', 'Australia'),
('AT', 'Austria'),
('BE', 'Belgium'),
('BR', 'Brazil'),
('CA', 'Canada'),
('CG', 'Congo'),
('CU', 'Cuba'),
('DK', 'Denmark'),
('EG', 'Egypt'),
('FI', 'Finland'),
('FR', 'France'),
('DE', 'Germany'),
('GR', 'Greece'),
('HU', 'Hungary'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IT', 'Italy'),
('MY', 'Malaysia'),
('NO', 'Norway'),
('CN', "People's Republic of China"),
('PH', 'Philippines'),
('PL', 'Poland'),
('PT', 'Portugal'),
('TW', 'Republic of China (Taiwan)'),
('KR', 'Republic of Korea'),
('SA', 'Saudi Arabia'),
('SG', 'Singapore'),
('ZA', 'South Africa'),
('ES', 'Spain'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('TH', 'Thailand'),
('TR', 'Turkey'),
('GB', 'United Kingdom'),
('US', 'United States of America'),
('VN', 'Viet Nam'),
('XX', 'Unknown'),
]
class ProductCategory(models.Model):
prodcat_name = models.CharField("Name", max_length=100)
class Meta:
ordering = ('prodcat_name',)
def __str__(self):
return self.prodcat_name
class Vendor(models.Model):
vend_name = models.CharField("Name", max_length=100)
vend_country = models.CharField("Country", max_length=2, choices=COUNTRY_CHOICES, default='TW')
vend_city = models.CharField("City", max_length=100)
class Meta:
ordering = ('vend_name',)
def __str__(self):
return self.vend_name
class Product(models.Model):
prod_name = models.CharField("Name", max_length=100)
prod_desc = models.TextField("Description")
prod_price = models.FloatField("Price (NTD)")
prod_imgname = models.CharField("Image File Name", max_length=100)
prod_imgsrc = models.TextField("Image Source")
prodcat = models.ForeignKey(ProductCategory, on_delete=models.CASCADE)
vend = models.ForeignKey(Vendor, on_delete=models.CASCADE)
class Meta:
ordering = ('prod_name',)
def __str__(self):
return self.prod_name
def get_absolute_url(self):
return reverse("product", args=[str(self.id)])
class Order(models.Model):
cust = models.ForeignKey("auth.User", on_delete=models.CASCADE)
prod = models.ForeignKey(Product, on_delete=models.CASCADE)
order_quantity = models.PositiveIntegerField("Quantity")
order_totalprice = models.PositiveIntegerField("Total Price")
order_date = models.DateField("Date", auto_now_add=True)
def __str__(self):
return f"Order ID {self.id}"
| 27.72
| 99
| 0.583333
|
21206be1770c402ea22d71e7426477e29b4c9f4e
| 3,878
|
py
|
Python
|
torchvision_paddle/to_pil_image.py
|
ImportPaddle/Old2Life
|
424a2433e9a00c7eaeb660c40d22f6168dc8f576
|
[
"MIT"
] | 1
|
2021-11-02T11:38:13.000Z
|
2021-11-02T11:38:13.000Z
|
torchvision_paddle/to_pil_image.py
|
ImportPaddle/Old2Life
|
424a2433e9a00c7eaeb660c40d22f6168dc8f576
|
[
"MIT"
] | null | null | null |
torchvision_paddle/to_pil_image.py
|
ImportPaddle/Old2Life
|
424a2433e9a00c7eaeb660c40d22f6168dc8f576
|
[
"MIT"
] | null | null | null |
import paddle
import numpy as np
from PIL import Image
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(isinstance(pic, paddle.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
elif isinstance(pic, paddle.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
# check number of channels
if pic.shape[-3] > 4:
raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-3]))
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
# check number of channels
if pic.shape[-1] > 4:
raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-1]))
npimg = pic
if isinstance(pic, paddle.Tensor):
if pic.is_floating_point() and mode != 'F':
pic = pic.mul(255).byte()
npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
| 39.571429
| 107
| 0.610108
|
9a4f96837d206b69cb1f965e2d5b61d3a3ad203e
| 1,344
|
py
|
Python
|
tests/test_http_lookupd.py
|
rcrai/asyncnsq
|
93f163b6d9fbf3c70ad6d045df45c0a77adae196
|
[
"MIT"
] | 1
|
2020-11-14T17:38:38.000Z
|
2020-11-14T17:38:38.000Z
|
tests/test_http_lookupd.py
|
rcrai/asyncnsq
|
93f163b6d9fbf3c70ad6d045df45c0a77adae196
|
[
"MIT"
] | null | null | null |
tests/test_http_lookupd.py
|
rcrai/asyncnsq
|
93f163b6d9fbf3c70ad6d045df45c0a77adae196
|
[
"MIT"
] | 2
|
2021-04-09T07:40:02.000Z
|
2021-04-11T10:30:33.000Z
|
from ._testutils import run_until_complete, BaseTest
from nsqio.http.lookupd import NsqLookupd
class NsqLookupdTest(BaseTest):
"""
:see: http://nsq.io/components/nsqd.html
"""
@run_until_complete
async def test_ok(self):
conn = NsqLookupd("127.0.0.1", 4161, loop=self.loop)
res = await conn.ping()
self.assertEqual(res, "OK")
@run_until_complete
async def test_info(self):
conn = NsqLookupd("127.0.0.1", 4161, loop=self.loop)
res = await conn.info()
self.assertTrue("version" in res)
@run_until_complete
async def test_lookup(self):
conn = NsqLookupd("127.0.0.1", 4161, loop=self.loop)
res = await conn.lookup("foo")
self.assertIn("producers", res)
@run_until_complete
async def test_topics(self):
conn = NsqLookupd("127.0.0.1", 4161, loop=self.loop)
res = await conn.topics()
self.assertIn("topics", res)
@run_until_complete
async def test_channels(self):
conn = NsqLookupd("127.0.0.1", 4161, loop=self.loop)
res = await conn.channels("foo")
self.assertIn("channels", res)
@run_until_complete
async def test_nodes(self):
conn = NsqLookupd("127.0.0.1", 4161, loop=self.loop)
res = await conn.nodes()
self.assertIn("producers", res)
| 29.866667
| 60
| 0.62872
|
a076658680aaf3cb67c77f5d0f6a7c095ca03605
| 887
|
py
|
Python
|
setup.py
|
LevPerla/Time_Series_Prediction_RNN
|
ece481f9defa047423d667b8d49dca34ee83d1a3
|
[
"MIT"
] | 2
|
2022-02-06T09:57:53.000Z
|
2022-03-19T10:10:07.000Z
|
setup.py
|
LevPerla/Time_Series_Prediction_RNN
|
ece481f9defa047423d667b8d49dca34ee83d1a3
|
[
"MIT"
] | 5
|
2020-11-13T19:03:53.000Z
|
2021-04-15T13:06:37.000Z
|
setup.py
|
LevPerla/Time_Series_Prediction_RNN
|
ece481f9defa047423d667b8d49dca34ee83d1a3
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from pathlib import Path
# Read the contents of README file
source_root = Path(".")
with (source_root / "README.md").open(encoding="utf-8") as f:
long_description = f.read()
# Read the requirements
with (source_root / "requirements.txt").open(encoding="utf8") as f:
requirements = f.readlines()
setup(
name='ts_rnn',
version='0.1',
author="Lev Perla",
author_email="levperla@mail.ru",
description='Package to forecast time series with recurrent neural network',
packages=find_packages(),
url='http://https://github.com/LevPerla/Time_Series_Prediction_RNN',
license="MIT",
python_requires=">=3.7",
install_requires=requirements,
keywords="keras data-science data-analysis python jupyter ipython",
long_description=long_description,
long_description_content_type="text/markdown",
)
| 31.678571
| 80
| 0.723788
|
dff3275b7806ca1876b3e6abc28ecb427d1fde9b
| 3,769
|
py
|
Python
|
production/test/testrig.py
|
Blinkinlabs/EightByEight
|
9df6381c33987d6e1bdc88115bfc41287b6bc875
|
[
"MIT"
] | 18
|
2016-08-23T03:45:16.000Z
|
2021-02-20T20:50:02.000Z
|
production/test/testrig.py
|
Blinkinlabs/EightByEight
|
9df6381c33987d6e1bdc88115bfc41287b6bc875
|
[
"MIT"
] | 3
|
2016-10-22T19:02:44.000Z
|
2020-09-21T18:12:24.000Z
|
production/test/testrig.py
|
Blinkinlabs/EightByEight
|
9df6381c33987d6e1bdc88115bfc41287b6bc875
|
[
"MIT"
] | 8
|
2016-08-19T20:56:57.000Z
|
2020-12-25T01:39:12.000Z
|
import ina219
import ads1015
#import Adafruit_ADS1x15
import RPi.GPIO as GPIO
class TestRig:
leds = {"pass" : 14, "fail" : 15}
powerModes = {"full" : 24, "limited" : 27}
#"name" : gpio
digitalPins = {
"1" : 5,
"2" : 6,
"3" : 12,
"4" : 13,
"5" : 16,
"6" : 19,
"7" : 20,
"8" : 21,
"9" : 26,
"10" : 4,
"11" : 17,
"12" : 22,
"13" : 23,
#14 conflicts with power_limited
#15 conflicts with power_full
"JTAG_TMS" : 25,
"JTAG_TCK" : 11,
"JTAG_TDI" : 10,
"JTAG_TDO" : 9,
"JTAG_RESET" : 7
}
#"name" : [adc, channel]
analogPins = {
"1" : [0, 3],
"2" : [0, 2],
"3" : [0, 1],
"4" : [0, 0],
"5" : [1, 3],
"6" : [1, 2],
"7" : [1, 1],
"8" : [1, 0]
}
usbPin = 18
startButtonPin = 8
def __init__(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
for pin in self.leds.itervalues():
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.LOW)
for pin in self.powerModes.itervalues():
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.LOW)
for pin in self.digitalPins.itervalues():
GPIO.setup(pin, GPIO.IN)
self.dutCurrent = ina219.INA219()
#self.adc0 = Adafruit_ADS1x15.ADS1015(address=0x48)
#self.adc0 = Adafruit_ADS1x15.ADS1015(address=0x48)
#self.adc0 = ads1015.ADS1015(address=0x48)
#self.adc1 = ads1015.ADS1015(address=0x49)
GPIO.setup(self.usbPin, GPIO.OUT)
GPIO.output(self.usbPin, GPIO.LOW)
GPIO.setup(self.startButtonPin, GPIO.IN)
def setLED(self, led, state):
if led in self.leds:
GPIO.output(self.leds[led], state)
else:
raise("Invalid led")
def readStartButton(self):
return not GPIO.input(self.startButtonPin)
def setPowerMode(self, powerMode):
for pin in self.powerModes.itervalues():
GPIO.output(pin, GPIO.LOW)
if powerMode in self.powerModes:
GPIO.output(self.powerModes[powerMode], GPIO.HIGH)
elif powerMode == "off":
pass
else:
raise(NameError("Invalid power state"))
def enableUSB(self):
GPIO.output(self.usbPin, GPIO.HIGH)
def disableUSB(self):
GPIO.output(self.usbPin, GPIO.LOW)
def readDutPower(self):
values = {}
self.dutCurrent.measure()
values["Iin"] = self.dutCurrent.getCurrent_mA()
values["Vin"] = self.dutCurrent.getBusVoltage_V()
return values
def readVoltage(self, pin):
GAIN=2/3
if (pin in self.analogPins):
adcinfo = self.analogPins[pin]
if (adcinfo[0] == 0):
return self.adc0.measure(adcinfo[1])
elif (adcinfo[0] == 1):
return self.adc1.measure(adcinfo[1])
else:
raise(NameError("Invalid adc"))
else:
raise(NameError("Invalid pin"))
def readDigitalPin(self, pin):
if pin in self.digitalPins:
return GPIO.input(self.digitalPins[pin])
else:
raise(NameError("Invalid pin"))
def digitalPinMode(self, pin, mode):
if pin in self.digitalPins:
GPIO.setup(self.digitalPins[pin], mode)
else:
raise(NameError("Invalid pin"))
def digitalPinWrite(self, pin, state):
if pin in self.digitalPins:
GPIO.output(self.digitalPins[pin], state)
else:
raise(NameError("Invalid pin"))
if __name__ == '__main__':
import time
rig = TestRig()
#while(True):
# rig.setPowerMode("limited")
# time.sleep(.1)
# rig.dutCurrent.measure()
# print(rig.dutCurrent.getCurrent_mA()),
# print(rig.dutCurrent.getBusVoltage_V())
#
# rig.setPowerMode("full")
# time.sleep(.1)
# rig.dutCurrent.measure()
# print(rig.dutCurrent.getCurrent_mA()),
# print(rig.dutCurrent.getBusVoltage_V())
#
# rig.setPowerMode("off")
# time.sleep(.1)
# rig.dutCurrent.measure()
# print(rig.dutCurrent.getCurrent_mA()),
# print(rig.dutCurrent.getBusVoltage_V())
# print("")
#rig.enableUSB()
#time.sleep(.1)
#rig.disableUSB()
#rig.setPowerMode("full")
#rig.enableUSB()
#rig.readVoltages()
#print(rig.readDigitalPins())
#print(rig.readDigitalPin("2"))
#rig.setPowerMode("off")
#rig.disableUSB()
#rig.readVoltages()
| 20.708791
| 53
| 0.668878
|
7beaff66668bc46209d416e053ba12a65db5fb39
| 871
|
py
|
Python
|
FPE/ETL/Vector.py
|
chackoge/ERNIE_Plus
|
7e480c47a69fc2f736ac7fb55ece35dbff919938
|
[
"MIT"
] | 6
|
2017-09-26T23:45:52.000Z
|
2021-10-18T22:58:38.000Z
|
FPE/ETL/Vector.py
|
NETESOLUTIONS/ERNIE
|
454518f28b39a6f37ad8dde4f3be15d4dccc6f61
|
[
"MIT"
] | null | null | null |
FPE/ETL/Vector.py
|
NETESOLUTIONS/ERNIE
|
454518f28b39a6f37ad8dde4f3be15d4dccc6f61
|
[
"MIT"
] | 9
|
2017-11-22T13:42:32.000Z
|
2021-05-16T17:58:03.000Z
|
from decimal import *
class Vector(object):
def __init__(self, args):
""" Create a vector, example: v = Vector(1,2) """
self.values = args
def norm(self):
""" Returns the norm (length, magnitude) of the vector """
return Decimal(sum(comp**2 for comp in self.values)).sqrt()
def normalize(self):
""" Returns a normalized unit vector """
norm = self.norm()
if norm:
normed = list(comp/norm for comp in self.values)
return Vector(normed)
else: return self
def mult(self, other):
return Vector([a * b for a, b in [x for x in zip(self.values, other.values)]])
def inner(self, other):
""" Returns the dot product (inner product) of self and other vector """
return sum(self.mult(other).values)
| 33.5
| 87
| 0.559127
|
a42ce5c4777fd9e818367700ee44627bc36bd128
| 5,709
|
py
|
Python
|
xpdview/waterfall.py
|
xpdAcq/xpdView
|
52a3837eae5b9ececb6f149fc4e7ca96776a2ba7
|
[
"BSD-3-Clause"
] | null | null | null |
xpdview/waterfall.py
|
xpdAcq/xpdView
|
52a3837eae5b9ececb6f149fc4e7ca96776a2ba7
|
[
"BSD-3-Clause"
] | 17
|
2017-01-17T18:37:28.000Z
|
2018-12-04T16:47:37.000Z
|
xpdview/waterfall.py
|
xpdAcq/xpdView
|
52a3837eae5b9ececb6f149fc4e7ca96776a2ba7
|
[
"BSD-3-Clause"
] | 1
|
2017-01-19T19:37:23.000Z
|
2017-01-19T19:37:23.000Z
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from cycler import cycler
simonCycle2 = [
"#0B3C5D",
"#B82601",
"#1c6b0a",
"#328CC1",
"#062F4F",
"#D9B310",
"#984B43",
"#76323F",
"#626E60",
"#AB987A",
"#C09F80",
"#b0b0b0ff",
]
mpl.rcParams["axes.prop_cycle"] = cycler(color=simonCycle2)
plt.rcParams["axes.linewidth"] = 3.0
plt.rcParams["figure.dpi"] = 100
plt.rcParams["lines.linewidth"] = 2.0
plt.rcParams["font.size"] = 14
class Waterfall:
"""class holds data and generate watefall plot
Parameters
----------
fig : matplotlib.Figure
fig this waterfall plot will be drawn on
canvas : matplotlib.Canvas
canvas this waterfall plot will be drawn on
key_list : list, optional
list of key names. default to None
int_data_list : list, optional
list of 1D reduced data. expect each element to be in (x,y)
format. default to None
unit : tuple, optional
a tuple containing strings of x and y labels
kwargs :
keyword arguments for plotting
"""
def __init__(self, fig=None, canvas=None, *, unit=None, **kwargs):
if not fig:
fig = plt.figure()
self.fig = fig
self.fig.clear()
if not canvas:
canvas = self.fig.canvas
self.canvas = canvas
self.kwargs = kwargs
self.x_array_list = []
self.y_array_list = []
# callback for showing legend
self.canvas.mpl_connect("pick_event", self.on_plot_hover)
self.key_list = []
self.ax = self.fig.add_subplot(111)
self.unit = unit
# add sliders, which store information
self.ydist = 0
self.xdist = 0
y_offset_slider_ax = self.fig.add_axes([0.15, 0.95, 0.3, 0.035])
self.y_offset_slider = Slider(
y_offset_slider_ax,
"y-offset",
0.0,
1.0,
valinit=0.1,
valfmt="%1.2f",
)
self.y_offset_slider.on_changed(self.update_y_offset)
x_offset_slider_ax = self.fig.add_axes([0.6, 0.95, 0.3, 0.035])
self.x_offset_slider = Slider(
x_offset_slider_ax,
"x-offset",
0.0,
1.0,
valinit=0.,
valfmt="%1.2f",
)
self.x_offset_slider.on_changed(self.update_x_offset)
def update(self, key_list, int_data_list):
"""top method to update information carried by class and plot
Parameters
----------
key_list : list, optional
list of keys. default to None.
int_data_list : list, optional
list of 1D data. default to None.
"""
self._adapt_data_list(key_list, int_data_list)
# generate plot
self._update_data()
self._update_plot() # use current value of x,y offset
def _adapt_data_list(self, key_list, int_data_list):
"""method to return stateful information of 1D data list"""
self.key_list.extend(key_list)
# parse
for x, y in int_data_list:
self.xdist = max(np.ptp(x), self.xdist)
self.ydist = max(np.ptp(y), self.ydist)
self.x_array_list.append(x)
self.y_array_list.append(y)
def _update_data(self):
# draw if fresh axes
if len(self.x_array_list) != len(self.key_list):
raise RuntimeError(
f"The keys must match the data! "
f"{len(self.x_array_list)}, "
f"{len(self.key_list):}"
)
if not self.ax.lines:
for ind, el in enumerate(
zip(self.x_array_list, self.y_array_list, self.key_list)
):
x, y, k = el
self.ax.plot(x, y, label=k, picker=5, **self.kwargs)
if len(self.ax.get_lines()) < len(self.y_array_list):
diff = len(self.y_array_list) - len(self.ax.get_lines())
for ind, el in enumerate(
zip(
self.x_array_list[-diff:],
self.y_array_list[-diff:],
self.key_list[-diff:],
)
):
x, y, k = el
self.ax.plot(x, y, label=k, picker=5, **self.kwargs)
def _update_plot(self):
"""core method to update x-, y-offset sliders"""
x_offset_val = self.x_offset_slider.val
y_offset_val = self.y_offset_slider.val
# update matplotlib line data
lines = self.ax.get_lines()
for i, (l, x, y) in enumerate(
zip(lines, self.x_array_list, self.y_array_list)
):
xx = x + self.xdist * i * x_offset_val
yy = y + self.ydist * i * y_offset_val
l.set_data(xx, yy)
self.ax.relim()
self.ax.autoscale_view()
if self.unit:
xlabel, ylabel = self.unit
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.canvas.draw_idle()
def update_y_offset(self, val):
self._update_plot()
def update_x_offset(self, val):
self._update_plot()
def on_plot_hover(self, event):
"""callback to show legend when click on one of curves"""
line = event.artist
name = line.get_label()
line.axes.legend(
[name], handlelength=0, handletextpad=0, fancybox=True
)
line.figure.canvas.draw_idle()
def clear(self):
self.key_list.clear()
self.x_array_list.clear()
self.y_array_list.clear()
self.ax.lines.clear()
self.canvas.draw_idle()
| 30.367021
| 72
| 0.561219
|
abc864b1215c7b766d044d0dbe13ed32ea8afd06
| 1,157
|
py
|
Python
|
Day01-15/pratice_code/HY_Day_03.py
|
reic/groupLearning-Python-100-Days
|
91746e6ee3acf2dbf0e9d324f6c6ce3cb91ed131
|
[
"MIT"
] | 4
|
2020-05-21T06:50:52.000Z
|
2020-09-07T05:39:24.000Z
|
Day01-15/pratice_code/HY_Day_03.py
|
reic/groupLearning-Python-100-Days
|
91746e6ee3acf2dbf0e9d324f6c6ce3cb91ed131
|
[
"MIT"
] | 1
|
2020-05-24T07:26:56.000Z
|
2020-05-25T00:06:02.000Z
|
Day01-15/pratice_code/HY_Day_03.py
|
reic/groupLearning-Python-100-Days
|
91746e6ee3acf2dbf0e9d324f6c6ce3cb91ed131
|
[
"MIT"
] | 1
|
2020-11-05T13:03:42.000Z
|
2020-11-05T13:03:42.000Z
|
'''
Item : Python 100Dsays
Time : 20200522
分支結構,if 結構
# 縮進層次
# 4個空格
'''
# 練習 1-肩平化結構, 分段函數求值, (較優) -------------------------------------
x = float(input("x = "))
if x > 1 :
y = 3 * x - 5
elif x >= -1:
y = x + 2
else:
y = 5 * x + 3
print( "f (%.2f) = %.2f " % ( x , y ))
# 練習 2 - 嵌套結構 ------------------------------------------------
# 嵌套 : if , elif, else 的內部都可以再出一支分支結構
x = int(input( "x = "))
if x > 1 :
y = 3 * x - 5
else :
if x > -1 :
y = y + 2
else :
y = 5 * x + 3
print( "f (%.2f) = %.2f " % ( x, y))
# 練習 3 - 單位互換-----------------------------------------------------
value = int(input("輸入尺寸: "))
unit = input(" 輸入單位 (in OR cm ): ")
if unit =='in' :
print(" %f 英吋 = %f 公分" % ( value ,value * 2.54))
elif unit == 'cm' :
print( "%f 公分 = %f 英吋" % ( value, value / 2.54))
else:
print("errrr...")
# 練習 4 -數字轉類別 -----------------------------------------------------
score = int(input( " 輸入數字 : "))
if score >= 90 :
grade = "A"
elif score >= 80 :
grade = "B"
elif score >= 70 :
grade = "C"
elif score >= 60 :
grade = "D"
else:
grade = "E"
print( " 類別 : ", grade )
| 16.768116
| 67
| 0.370787
|
1573a218fa231af67cc812fd8c68c9ebfc76a11d
| 3,563
|
py
|
Python
|
pymtl3/passes/rtlir/behavioral/test/BehavioralRTLIRL3Pass_test.py
|
mondO/pymtl3
|
9869dda28c01926cee6da94ebdeac2a210150c62
|
[
"BSD-3-Clause"
] | null | null | null |
pymtl3/passes/rtlir/behavioral/test/BehavioralRTLIRL3Pass_test.py
|
mondO/pymtl3
|
9869dda28c01926cee6da94ebdeac2a210150c62
|
[
"BSD-3-Clause"
] | null | null | null |
pymtl3/passes/rtlir/behavioral/test/BehavioralRTLIRL3Pass_test.py
|
mondO/pymtl3
|
9869dda28c01926cee6da94ebdeac2a210150c62
|
[
"BSD-3-Clause"
] | null | null | null |
#=========================================================================
# BehavioralRTLIRL3Pass_test.py
#=========================================================================
# Author : Peitian Pan
# Date : Feb 2, 2019
"""Test the level 3 behavioral RTLIR passes.
The L3 generation, L3 type check, and visualization passes are invoked. The
generation pass results are verified against a reference AST.
"""
from pymtl3.dsl.errors import VarNotDeclaredError
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIR import *
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIRGenL3Pass import (
BehavioralRTLIRGenL3Pass,
)
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIRTypeCheckL3Pass import (
BehavioralRTLIRTypeCheckL3Pass,
)
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIRVisualizationPass import (
BehavioralRTLIRVisualizationPass,
)
from pymtl3.passes.rtlir.errors import PyMTLSyntaxError, PyMTLTypeError
from pymtl3.passes.rtlir.util.test_utility import do_test, expected_failure
from pymtl3.passes.testcases import (
Bits32Foo,
CaseBits32FooInBits32OutComp,
CaseBits32FooInstantiationComp,
CaseBits32FooKwargComp,
CaseBitsAttributeComp,
CaseConstStructInstComp,
CaseStructMissingAttributeComp,
)
def local_do_test( m ):
"""Check if generated behavioral RTLIR is the same as reference."""
if isinstance(m, type):
m = m.DUT()
m.elaborate()
m.apply( BehavioralRTLIRGenL3Pass() )
m.apply( BehavioralRTLIRTypeCheckL3Pass() )
m.apply( BehavioralRTLIRVisualizationPass() )
try:
ref = m._rtlir_test_ref
for blk in m.get_update_blocks():
upblk = m._pass_behavioral_rtlir_gen.rtlir_upblks[ blk ]
assert upblk == ref[ blk.__name__ ]
except AttributeError:
pass
#-------------------------------------------------------------------------
# Correct test cases
#-------------------------------------------------------------------------
def test_L3_struct_attr( do_test ):
a = CaseBits32FooInBits32OutComp.DUT()
a._rtlir_test_ref = { 'upblk' : CombUpblk( 'upblk', [ Assign(
[Attribute( Base( a ), 'out' )], Attribute(
Attribute( Base( a ), 'in_' ), 'foo' ), True ) ] ) }
do_test( a )
def test_L3_struct_inst_kwargs( do_test ):
a = CaseBits32FooKwargComp.DUT()
a._rtlir_test_ref = { 'upblk' : CombUpblk( 'upblk', [ Assign(
[Attribute( Base( a ), 'out' )], StructInst(
Bits32Foo, [ SizeCast( 32, Number( 42 ) ) ] ), True ) ] ) }
with expected_failure( PyMTLSyntaxError, 'keyword argument is not supported' ):
do_test( a )
def test_L3_struct_inst( do_test ):
a = CaseBits32FooInstantiationComp.DUT()
a._rtlir_test_ref = { 'upblk' : CombUpblk( 'upblk', [ Assign(
[Attribute( Base( a ), 'out' )], StructInst(
Bits32Foo, [ SizeCast( 32, Number( 42 ) ) ] ), True ) ] ) }
do_test( a )
def test_L3_const_struct( do_test ):
a = CaseConstStructInstComp.DUT()
a._rtlir_test_ref = { 'upblk' : CombUpblk( 'upblk', [ Assign(
[Attribute( Base( a ), 'out' )], SizeCast(32, Number(0)), True ) ] ) }
do_test( a )
#-------------------------------------------------------------------------
# PyMTL type errors
#-------------------------------------------------------------------------
def test_L3_vector_attr( do_test ):
with expected_failure( VarNotDeclaredError, 's.in_ does not have field "foo"' ):
do_test( CaseBitsAttributeComp )
def test_L3_struct_no_field( do_test ):
with expected_failure( VarNotDeclaredError, 's.in_ does not have field "bar"' ):
do_test( CaseStructMissingAttributeComp )
| 37.114583
| 82
| 0.629526
|
7aa5c4d5ed44f0d7149826c36f8dad418bc76007
| 85,451
|
py
|
Python
|
cryspy/A_functions_base/function_2_space_group.py
|
eandklahn/cryspy
|
a664cee1e1ffd5f23e54295a11e479d7d4cda7e5
|
[
"MIT"
] | null | null | null |
cryspy/A_functions_base/function_2_space_group.py
|
eandklahn/cryspy
|
a664cee1e1ffd5f23e54295a11e479d7d4cda7e5
|
[
"MIT"
] | null | null | null |
cryspy/A_functions_base/function_2_space_group.py
|
eandklahn/cryspy
|
a664cee1e1ffd5f23e54295a11e479d7d4cda7e5
|
[
"MIT"
] | null | null | null |
"""
Functions and constants to work with space group.
List of constants:
-------------------
ACCESIBLE_BRAVAIS_TYPE
ACCESIBLE_IT_COORDINATE_SYSTEM_CODE
ACCESIBLE_LAUE_CLASS
ACCESIBLE_CENTRING_TYPE
ACCESIBLE_CRYSTAL_SYSTEM
ACCESIBLE_NAME_HM_SHORT
ACCESIBLE_NAME_SCHOENFLIES
ACCESIBLE_NAME_HALL_SHORT
ACCESIBLE_REFERENCE_SETTING
DEFAULT_REFERENCE_TABLE_IT_NUMBER_NAME_HALL_NAME_SCHOENFLIES_NAME_HM_SHORT_REFERENCE_SETTING_IT_COORDINATE_SYSTEM_CODE
D_CENTRING_TYPE_SHIFT - accessible list and shift
D_CRYSTAL_FAMILY_DESCRIPTION - accessible list and description
D_BRAVAIS_TYPE_CELL_CONSTRAINT_MODE_ABC - accessible list and description constraint_mode_abc
T_BRAVAIS_TYPE_CENTRING_TYPE_CRYSTAL_SYSTEM - relation between bravais_type, centring_type, crystal_system
List of functions:
-------------------
get_crystal_system_by_it_number(it_number:int)->str
get_default_it_coordinate_system_code_by_it_number(it_number:int)->str
get_it_number_by_name_hm_short(name:str)->int
get_it_number_by_name_schoenflies(name:str)->int
get_it_number_by_name_hall(name:str)->int
get_name_hm_short_by_it_number(it_number:int)->str
get_name_schoenflies_by_it_number(it_number:int)->str
get_name_hall_by_it_number(it_number:int)->str
"""
import os
from numpy import array, transpose, zeros
from fractions import Fraction
from cryspy.A_functions_base.function_1_strings import \
transform_string_to_r_b, transform_r_b_to_string
from typing import Tuple
F_ITABLES = os.path.join(os.path.dirname(__file__), "itables.txt")
F_WYCKOFF = os.path.join(os.path.dirname(__file__), "wyckoff.dat")
def read_el_cards():
"""
Read information about space group from file to list of cards ldcard.
Info in file fitables:
1 P1 Triclinic
choice: 1
centr: false
pcentr: 0, 0, 0
symmetry: X,Y,Z
2 P-1 Triclinic
...
"""
fid = open(F_ITABLES, "r")
lcontent = fid.readlines()
fid.close()
lcontent = [hh.strip() for hh in lcontent if hh.strip() != ""]
ldcard = []
dcard = None
for hh in lcontent:
lhelp = hh.split()
if lhelp[0].isdigit():
if dcard != None:
ldcard.append(dcard)
dcard = {"it_number": int(lhelp[0]), "name": lhelp[1], "singony": lhelp[2]}
else:
lhelp = hh.split(":")
if (lhelp[0].strip() in dcard.keys()):
dcard[lhelp[0].strip()].append(lhelp[1].strip())
else:
dcard[lhelp[0].strip()] = [lhelp[1].strip()]
ldcard.append(dcard)
return ldcard
EL_CARDS = read_el_cards()
def read_wyckoff():
with open(F_WYCKOFF, "r") as fid:
l_cont = fid.readlines()
l_numb_b, l_numb_e = [], []
for _i_line, _line in enumerate(l_cont):
l_h = _line.strip().split()
for _i, _ in enumerate(l_h):
if not (_.isdigit()):
break
if _i >= 4:
l_numb_b.append(_i_line)
if len(l_h) == 0:
l_numb_e.append(_i_line)
l_data = []
for _numb_b, _numb_e in zip(l_numb_b, l_numb_e):
l_param = l_cont[_numb_b].strip().split()[:5]
hm_full = ""
flag = False
for _char in l_cont[_numb_b].strip():
if _char.isalpha():
flag = True
if flag:
hm_full += _char
data = {"it_number": int(l_param[0]), "choice": int(l_param[1]), "centr_000": int(l_param[3] == 1),
"hm_full": hm_full.strip(), "wyckoff": []}
l_cont_2 = l_cont[(_numb_b + 1):_numb_e]
l_wyckoff_symop = []
l_d_card = []
d_card = None
for _line in l_cont_2:
l_h = _line.strip().split()
if l_h[0].isdigit():
if d_card is not None:
l_d_card.append(d_card)
d_card = {"multiplicity": int(l_h[0]), "letter": l_h[1], "site_symmetry": l_h[2], "symop": []}
else:
d_card["symop"].extend(l_h)
l_d_card.append(d_card)
data["wyckoff"].extend(l_d_card)
l_data.append(data)
return l_data
WYCKOFF = read_wyckoff()
def get_crystal_system_by_it_number(it_number: int) -> str:
if it_number is None:
return None
if (it_number >= 1) & (it_number <= 2):
res = "triclinic"
elif (it_number >= 3) & (it_number <= 15):
res = "monoclinic"
elif (it_number >= 16) & (it_number <= 74):
res = "orthorhombic"
elif (it_number >= 75) & (it_number <= 142):
res = "tetragonal"
elif (it_number >= 143) & (it_number <= 167):
res = "trigonal"
elif (it_number >= 168) & (it_number <= 194):
res = "hexagonal"
elif (it_number >= 195) & (it_number <= 230):
res = "cubic"
else:
res = None
return res
ACCESIBLE_IT_NUMBER_TRICLINIC_SYSTEM = tuple(range(1, 3))
ACCESIBLE_IT_NUMBER_MONOCLINIC_SYSTEM = tuple(range(3, 16))
ACCESIBLE_IT_NUMBER_ORTHORHOMBIC_SYSTEM = tuple(range(16, 75))
ACCESIBLE_IT_NUMBER_TETRAGONAL_SYSTEM = tuple(range(7, 143))
ACCESIBLE_IT_NUMBER_TRIGONAL_SYSTEM = tuple(range(143, 168))
ACCESIBLE_IT_NUMBER_HEXAGONAL_SYSTEM = tuple(range(168, 195))
ACCESIBLE_IT_NUMBER_CUBIC_SYSTEM = tuple(range(195, 231))
ACCESIBLE_IT_NUMBER_MONOCLINIC_SYSTEM_TRIPLE_CHOICE = (5, 7, 8, 9, 12, 13, 14, 15)
ACCESIBLE_IT_NUMBER_ORTHORHOMBIC_SYSTEM_DOUBLE_CHOICE = (48, 50, 59, 68, 70)
ACCESIBLE_IT_NUMBER_TETRAGONAL_SYSTEM_DOUBLE_CHOICE = (85, 86, 88, 125, 126, 129, 130, 133, 134, 137, 138, 141, 142)
ACCESIBLE_IT_NUMBER_TRIGONAL_SYSTEM_DOUBLE_AXES = (146, 148, 155, 160, 161, 166, 167)
ACCESIBLE_IT_NUMBER_CUBIC_SYSTEM_DOUBLE_CHOICE = (201, 203, 222, 224, 227, 228)
ACCESIBLE_IT_NUMBER = (ACCESIBLE_IT_NUMBER_TRICLINIC_SYSTEM +
ACCESIBLE_IT_NUMBER_MONOCLINIC_SYSTEM +
ACCESIBLE_IT_NUMBER_ORTHORHOMBIC_SYSTEM +
ACCESIBLE_IT_NUMBER_TETRAGONAL_SYSTEM +
ACCESIBLE_IT_NUMBER_TRIGONAL_SYSTEM +
ACCESIBLE_IT_NUMBER_HEXAGONAL_SYSTEM +
ACCESIBLE_IT_NUMBER_CUBIC_SYSTEM)
def get_default_it_coordinate_system_code_by_it_number(it_number: int) -> str:
crystal_system = get_crystal_system_by_it_number(it_number)
if crystal_system == "triclinic":
it_coordinate_system_code = None
elif crystal_system == "monoclinic":
it_coordinate_system_code = "b1"
elif crystal_system == "orthorhombic":
if it_number in ACCESIBLE_IT_NUMBER_ORTHORHOMBIC_SYSTEM_DOUBLE_CHOICE:
it_coordinate_system_code = "2abc"
else:
it_coordinate_system_code = "abc"
elif crystal_system == "tetragonal":
if it_number in ACCESIBLE_IT_NUMBER_TETRAGONAL_SYSTEM_DOUBLE_CHOICE:
it_coordinate_system_code = "2"
else:
it_coordinate_system_code = "1"
elif crystal_system == "trigonal":
if it_number in ACCESIBLE_IT_NUMBER_TRIGONAL_SYSTEM_DOUBLE_AXES:
it_coordinate_system_code = "h"
else:
it_coordinate_system_code = "r"
elif crystal_system == "hexagonal":
it_coordinate_system_code = "h"
elif crystal_system == "cubic":
if it_number in ACCESIBLE_IT_NUMBER_CUBIC_SYSTEM_DOUBLE_CHOICE:
it_coordinate_system_code = "2"
else:
it_coordinate_system_code = "1"
else:
it_coordinate_system_code = None
return it_coordinate_system_code
def get_it_coordinate_system_codes_by_it_number(it_number: int) -> str:
crystal_system = get_crystal_system_by_it_number(it_number)
if crystal_system == "triclinic":
it_coordinate_system_codes = ()
elif crystal_system == "monoclinic":
it_coordinate_system_codes = (
"b1", "c1", "a1", "b2", "c2", "a2", "b3", "c3", "a3", "-b1", "-c1", "-a1", "-b2", "-c2", "-a2", "-b3", "-c3",
"-a3")
elif crystal_system == "orthorhombic":
if it_number in ACCESIBLE_IT_NUMBER_ORTHORHOMBIC_SYSTEM_DOUBLE_CHOICE:
it_coordinate_system_codes = ("1abc", "1ba-c", "1cab", "1-cba",
"1bca", "1a-cb", "2abc", "2ba-c", "2cab", "2-cba", "2bca", "2a-cb")
else:
it_coordinate_system_codes = ("abc", "ba-c", "cab", "-cba", "bca", "a-cb")
elif crystal_system == "tetragonal":
if it_number in ACCESIBLE_IT_NUMBER_TETRAGONAL_SYSTEM_DOUBLE_CHOICE:
it_coordinate_system_codes = ("2", "1")
else:
it_coordinate_system_codes = ("1",)
elif crystal_system == "trigonal":
if it_number in ACCESIBLE_IT_NUMBER_TRIGONAL_SYSTEM_DOUBLE_AXES:
it_coordinate_system_codes = ("h", "r")
else:
it_coordinate_system_codes = ("r",)
elif crystal_system == "hexagonal":
it_coordinate_system_codes = ("h",)
elif crystal_system == "cubic":
if it_number in ACCESIBLE_IT_NUMBER_CUBIC_SYSTEM_DOUBLE_CHOICE:
it_coordinate_system_codes = ("2", "1")
else:
it_coordinate_system_codes = ("1",)
else:
it_coordinate_system_codes = ()
return it_coordinate_system_codes
ACCESIBLE_IT_COORDINATE_SYSTEM_CODE = ("b1", "b2", "b3", "-b1", "-b2", "-b3", "c1", "c2", "c3", "-c1", "-c2", "-c3",
"a1", "a2", "a3", "-a1", "-a2", "-a3", "abc", "ba-c", "cab", "-cba", "bca",
"a-cb", "1abc", "1ba-c", "1cab", "1-cba",
"1bca", "1a-cb", "2abc", "2ba-c", "2cab", "2-cba", "2bca", "2a-cb", "1", "2",
"h", "r")
ACCESIBLE_CRYSTAL_SYSTEM = ("triclinic", "monoclinic", "orthorhombic", "tetragonal", "trigonal", "hexagonal", "cubic")
def get_it_coordinate_system_codes_by_crystal_system(crystal_system: str) -> str:
if crystal_system.startswith("tric"):
it_coordinate_system_codes = ()
elif crystal_system.startswith("m"):
it_coordinate_system_codes = ("b1", "b2", "b3", "-b1", "-b2", "-b3", "c1", "c2", "c3", "-c1", "-c2", "-c3",
"a1", "a2", "a3", "-a1", "-a2", "-a3")
elif crystal_system.startswith("o"):
it_coordinate_system_codes = ("abc", "ba-c", "cab", "-cba", "bca", "a-cb", "1abc", "1ba-c", "1cab", "1-cba",
"1bca", "1a-cb", "2abc", "2ba-c", "2cab", "2-cba", "2bca", "2a-cb")
elif crystal_system.startswith("te"):
it_coordinate_system_codes = ("1", "2")
elif crystal_system.startswith("trig"):
it_coordinate_system_codes = ("h", "r")
elif crystal_system.startswith("h"):
it_coordinate_system_codes = ("h",)
elif crystal_system.startswith("c"):
it_coordinate_system_codes = ("1", "2")
else:
it_coordinate_system_codes = ()
return it_coordinate_system_codes
ACCESIBLE_LAUE_CLASS = ("-1", "2/m", "mmm", "4/m", "4/mmm", "-3", "-3m", "6/m", "6/mmm", "m-3", "m-3m")
ACCESIBLE_CENTRING_TYPE = ("P", "A", "B", "C", "F", "I", "R", "Rrev", "H")
ACCESIBLE_NAME_HM_SHORT = ("P 1", "P -1", "P 2", "P 21", "C 2", "P m", "P c", "C m", "C c", "P 2/m", "P 21/m", "C 2/m",
"P 2/c", "P 21/c", "C 2/c", "P 2 2 2", "P 2 2 21", "P 21 21 2", "P 21 21 21", "C 2 2 21",
"C 2 2 2", "F 2 2 2", "I 2 2 2",
"I 21 21 21", "P m m 2", "P m c 21", "P c c 2", "P m a 2", "P c a 21", "P n c 2", "P m n 21",
"P b a 2", "P n a 21", "P n n 2",
"C m m 2", "C m c 21", "C c c 2", "A m m 2", "A e m 2", "A m a 2", "A e a 2", "F m m 2",
"F d d 2", "I m m 2", "I b a 2", "I m a 2",
"P m m m", "P n n n", "P c c m", "P b a n", "P m m a", "P n n a", "P m n a", "P c c a",
"P b a m", "P c c n", "P b c m", "P n n m",
"P m m n", "P b c n", "P b c a", "P n m a", "C m c m", "C m c e", "C m m m", "C c c m",
"C m m e", "C c c e", "F m m m", "F d d d",
"I m m m", "I b a m", "I b c a", "I m m a", "P 4", "P 41", "P 42", "P 43", "I 4", "I 41",
"P -4", "I -4", "P 4/m", "P 42/m", "P 4/n",
"P 42/n", "I 4/m", "I 41/a", "P 4 2 2", "P 4 21 2", "P 41 2 2", "P 41 21 2", "P 42 2 2",
"P 42 21 2", "P 43 2 2", "P 43 21 2", "I 4 2 2",
"I 41 2 2", "P 4 m m", "P 4 b m", "P 42 c m", "P 42 n m", "P 4 c c", "P 4 n c", "P 42 m c",
"P 42 b c", "I 4 m m", "I 4 c m", "I 41 m d",
"I 41 c d", "P -4 2 m", "P -4 2 c", "P -4 21 m", "P -4 21 c", "P -4 m 2", "P -4 c 2",
"P -4 b 2", "P -4 n 2", "I -4 m 2", "I -4 c 2",
"I -4 2 m", "I -4 2 d", "P 4/m m m", "P 4/m c c", "P 4/n b m", "P 4/n n c", "P 4/m b m",
"P 4/m n c", "P 4/n m m", "P 4/n c c", "P 42/m m c",
"P 42/m c m", "P 42/n b c", "P 42/n n m", "P 42/m b c", "P 42/m n m", "P 42/n m c",
"P 42/n c m", "I 4/m m m", "I 4/m c m", "I 41/a m d",
"I 41/a c d", "P 3", "P 31", "P 32", "R 3", "P -3", "R -3", "P 3 1 2", "P 3 2 1", "P 31 1 2",
"P 31 2 1", "P 32 1 2", "P 32 2 1", "R 3 2",
"P 3 m 1", "P 3 1 m", "P 3 c 1", "P 3 1 c", "R 3 m", "R 3 c", "P -3 1 m", "P -3 1 c",
"P -3 m 1", "P -3 c 1", "R -3 m", "R -3 c", "P 6", "P 61",
"P 65", "P 62", "P 64", "P 63", "P -6", "P 6/m ", "P 63/m", "P 6 2 2", "P 61 2 2",
"P 65 2 2", "P 62 2 2", "P 64 2 2", "P 63 2 2", "P 6 m m",
"P 6 c c", "P 63 c m", "P 63 m c", "P -6 m 2", "P -6 c 2", "P -6 2 m", "P -6 2 c",
"P 6/m m m", "P 6/m c c", "P 63/m c m", "P 63/m m c", "P 2 3",
"F 2 3", "I 2 3", "P 21 3", "I 21 3", "P m -3", "P n -3", "F m -3", "F d -3", "I m -3",
"P a -3", "I a -3", "P 4 3 2", "P 42 3 2", "F 4 3 2",
"F 41 3 2", "I 4 3 2", "P 43 3 2", "P 41 3 2", "I 41 3 2", "P -4 3 m", "F -4 3 m",
"I -4 3 m", "P -4 3 n", "F -4 3 c", "I -4 3 d", "P m -3 m",
"P n -3 n", "P m -3 n", "P n -3 m", "F m -3 m", "F m -3 c", "F d -3 m", "F d -3 c",
"I m -3 m", "I a -3 d")
ACCESIBLE_NAME_HM_FULL = ("P 1", "P -1", "P 2", "P 21", "C 2", "P m", "P c", "C m", "C c", "P 2/m", "P 21/m", "C 2/m",
"P 2/c", "P 21/c", "C 2/c", "P 2 2 2", "P 2 2 21", "P 21 21 2", "P 21 21 21", "C 2 2 21",
"C 2 2 2", "F 2 2 2", "I 2 2 2",
"I 21 21 21", "P m m 2", "P m c 21", "P c c 2", "P m a 2", "P c a 21", "P n c 2", "P m n 21",
"P b a 2", "P n a 21", "P n n 2",
"C m m 2", "C m c 21", "C c c 2", "A m m 2", "A e m 2", "A m a 2", "A e a 2", "F m m 2",
"F d d 2", "I m m 2", "I b a 2", "I m a 2",
"P m m m", "P n n n", "P c c m", "P b a n", "P m m a", "P n n a", "P m n a", "P c c a",
"P b a m", "P c c n", "P b c m", "P n n m",
"P m m n", "P b c n", "P b c a", "P n m a", "C m c m", "C m c e", "C m m m", "C c c m",
"C m m e", "C c c e", "F m m m", "F d d d",
"I m m m", "I b a m", "I b c a", "I m m a", "P 4", "P 41", "P 42", "P 43", "I 4", "I 41",
"P -4", "I -4", "P 4/m", "P 42/m", "P 4/n",
"P 42/n", "I 4/m", "I 41/a", "P 4 2 2", "P 4 21 2", "P 41 2 2", "P 41 21 2", "P 42 2 2",
"P 42 21 2", "P 43 2 2", "P 43 21 2", "I 4 2 2",
"I 41 2 2", "P 4 m m", "P 4 b m", "P 42 c m", "P 42 n m", "P 4 c c", "P 4 n c", "P 42 m c",
"P 42 b c", "I 4 m m", "I 4 c m", "I 41 m d",
"I 41 c d", "P -4 2 m", "P -4 2 c", "P -4 21 m", "P -4 21 c", "P -4 m 2", "P -4 c 2",
"P -4 b 2", "P -4 n 2", "I -4 m 2", "I -4 c 2",
"I -4 2 m", "I -4 2 d", "P 4/m m m", "P 4/m c c", "P 4/n b m", "P 4/n n c", "P 4/m b m",
"P 4/m n c", "P 4/n m m", "P 4/n c c", "P 42/m m c",
"P 42/m c m", "P 42/n b c", "P 42/n n m", "P 42/m b c", "P 42/m n m", "P 42/n m c",
"P 42/n c m", "I 4/m m m", "I 4/m c m", "I 41/a m d",
"I 41/a c d", "P 3", "P 31", "P 32", "R 3", "P -3", "R -3", "P 3 1 2", "P 3 2 1", "P 31 1 2",
"P 31 2 1", "P 32 1 2", "P 32 2 1", "R 3 2",
"P 3 m 1", "P 3 1 m", "P 3 c 1", "P 3 1 c", "R 3 m", "R 3 c", "P -3 1 m", "P -3 1 c",
"P -3 m 1", "P -3 c 1", "R -3 m", "R -3 c", "P 6", "P 61",
"P 65", "P 62", "P 64", "P 63", "P -6", "P 6/m ", "P 63/m", "P 6 2 2", "P 61 2 2", "P 65 2 2",
"P 62 2 2", "P 64 2 2", "P 63 2 2", "P 6 m m",
"P 6 c c", "P 63 c m", "P 63 m c", "P -6 m 2", "P -6 c 2", "P -6 2 m", "P -6 2 c",
"P 6/m m m", "P 6/m c c", "P 63/m c m", "P 63/m m c", "P 2 3",
"F 2 3", "I 2 3", "P 21 3", "I 21 3", "P m -3", "P n -3", "F m -3", "F d -3", "I m -3",
"P a -3", "I a -3", "P 4 3 2", "P 42 3 2", "F 4 3 2",
"F 41 3 2", "I 4 3 2", "P 43 3 2", "P 41 3 2", "I 41 3 2", "P -4 3 m", "F -4 3 m", "I -4 3 m",
"P -4 3 n", "F -4 3 c", "I -4 3 d", "P m -3 m",
"P n -3 n", "P m -3 n", "P n -3 m", "F m -3 m", "F m -3 c", "F d -3 m", "F d -3 c",
"I m -3 m", "I a -3 d")
ACCESIBLE_NAME_SCHOENFLIES = (
"C1.1", "Ci.1", "C2.1", "C2.2", "C2.3", "Cs.1", "Cs.2", "Cs.3", "Cs.4", "C2h.1", "C2h.2", "C2h.3", "C2h.4",
"C2h.5", "C2h.6", "D2.1", "D2.2", "D2.3", "D2.4", "D2.5", "D2.6", "D2.7", "D2.8", "D2.9", "C2v.1", "C2v.2", "C2v.3",
"C2v.4", "C2v.5",
"C2v.6", "C2v.7", "C2v.8", "C2v.9", "C2v.10", "C2v.11", "C2v.12", "C2v.13", "C2v.14", "C2v.15", "C2v.16", "C2v.17",
"C2v.18", "C2v.19",
"C2v.20", "C2v.21", "C2v.22", "D2h.1", "D2h.2", "D2h.3", "D2h.4", "D2h.5", "D2h.6", "D2h.7", "D2h.8", "D2h.9", "D2h.10",
"D2h.11", "D2h.12",
"D2h.13", "D2h.14", "D2h.15", "D2h.16", "D2h.17", "D2h.18", "D2h.19", "D2h.20", "D2h.21", "D2h.22", "D2h.23", "D2h.24",
"D2h.25", "D2h.26",
"D2h.27", "D2h.28", "C4.1", "C4.2", "C4.3", "C4.4", "C4.5", "C4.6", "S4.1", "S4.2", "C4h.1", "C4h.2", "C4h.3", "C4h.4",
"C4h.5", "C4h.6",
"D4.1", "D4.2", "D4.3", "D4.4", "D4.5", "D4.6", "D4.7", "D4.8", "D4.9", "D4.10", "C4v.1", "C4v.2", "C4v.3", "C4v.4",
"C4v.5", "C4v.6", "C4v.7",
"C4v.8", "C4v.9", "C4v.10", "C4v.11", "C4v.12", "D2d.1", "D2d.2", "D2d.3", "D2d.4", "D2d.5", "D2d.6", "D2d.7", "D2d.8",
"D2d.9", "D2d.10",
"D2d.11", "D2d.12", "D4h.1", "D4h.2", "D4h.3", "D4h.4", "D4h.5", "D4h.6", "D4h.7", "D4h.8", "D4h.9", "D4h.10", "D4h.11",
"D4h.12", "D4h.13",
"D4h.14", "D4h.15", "D4h.16", "D4h.17", "D4h.18", "D4h.19", "D4h.20", "C3.1", "C3.2", "C3.3", "C3.4", "C3i.1", "C3i.2",
"D3.1", "D3.2", "D3.3",
"D3.4", "D3.5", "D3.6", "D3.7", "C3v.1", "C3v.2", "C3v.3", "C3v.4", "C3v.5", "C3v.6", "D3d.1", "D3d.2", "D3d.3",
"D3d.4", "D3d.5", "D3d.6",
"C6.1", "C6.2", "C6.3", "C6.4", "C6.5", "C6.6", "C3h.1", "C6h.1", "C6h.2", "D6.1", "D6.2", "D6.3", "D6.4", "D6.5",
"D6.6", "C6v.1", "C6v.2",
"C6v.3", "C6v.4", "D3h.1", "D3h.2", "D3h.3", "D3h.4", "D6h.1", "D6h.2", "D6h.3", "D6h.4", "T.1", "T.2", "T.3", "T.4",
"T.5", "Th.1", "Th.2",
"Th.3", "Th.4", "Th.5", "Th.6", "Th.7", "O.1", "O.2", "O.3", "O.4", "O.5", "O.6", "O.7", "O.8", "Td.1", "Td.2", "Td.3",
"Td.4", "Td.5", "Td.6",
"Oh.1", "Oh.2", "Oh.3", "Oh.4", "Oh.5", "Oh.6", "Oh.7", "Oh.8", "Oh.9", "Oh.10")
ACCESIBLE_NAME_HALL_SHORT = (
"P 1", "-P 1", "P 2y", "P 2yb", "C 2y", "P -2y", "P -2yc", "C -2y", "C -2yc", "-P 2y", "-P 2yb", "-C 2y", "-P 2yc",
"-P 2ybc",
"-C 2yc", "P 2 2", "P 2c 2", "P 2 2ab", "P 2ac 2ab", "C 2c 2", "C 2 2", "F 2 2", "I 2 2", "I 2b 2c", "P 2 -2",
"P 2c -2", "P 2 -2c", "P 2 -2a", "P 2c -2ac",
"P 2 -2bc", "P 2ac -2", "P 2 -2ab", "P 2c -2n", "P 2 -2n", "C 2 -2", "C 2c -2", "C 2 -2c", "A 2 -2", "A 2 -2b",
"A 2 -2a", "A 2 -2ab", "F 2 -2",
"F 2 -2d", "I 2 -2", "I 2 -2c", "I 2 -2a", "-P 2 2", "-P 2ab 2bc", "-P 2 2c", "-P 2ab 2b", "-P 2a 2a", "-P 2a 2bc",
"-P 2ac 2", "-P 2a 2ac", "-P 2 2ab",
"-P 2ab 2ac", "-P 2c 2b", "-P 2 2n", "-P 2ab 2a", "-P 2n 2ab", "-P 2ac 2ab", "-P 2ac 2n", "-C 2c 2", "-C 2ac 2",
"-C 2 2", "-C 2 2c",
"-C 2a 2", "-C 2a 2ac", "-F 2 2", "-F 2uv 2vw", "-I 2 2", "-I 2 2c", "-I 2b 2c", "-I 2b 2", "P 4", "P 4w", "P 4c",
"P 4cw", "I 4", "I 4bw", "P -4", "I -4",
"-P 4", "-P 4c", "-P 4a", "-P 4bc", "-I 4", "-I 4ad", "P 4 2", "P 4ab 2ab", "P 4w 2c", "P 4abw 2nw", "P 4c 2",
"P 4n 2n", "P 4cw 2c", "P 4nw 2abw", "I 4 2",
"I 4bw 2bw", "P 4 -2", "P 4 -2ab", "P 4c -2c", "P 4n -2n", "P 4 -2c", "P 4 -2n", "P 4c -2", "P 4c -2ab", "I 4 -2",
"I 4 -2c", "I 4bw -2", "I 4bw -2c",
"P -4 2", "P -4 2c", "P -4 2ab", "P -4 2n", "P -4 -2", "P -4 -2c", "P -4 -2ab", "P -4 -2n", "I -4 -2", "I -4 -2c",
"I -4 2", "I -4 2bw", "-P 4 2",
"-P 4 2c", "-P 4a 2b", "-P 4a 2bc", "-P 4 2ab", "-P 4 2n", "-P 4a 2a", "-P 4a 2ac", "-P 4c 2", "-P 4c 2c", "-P 4ac 2b",
"-P 4ac 2bc", "-P 4c 2ab",
"-P 4n 2n", "-P 4ac 2a", "-P 4ac 2ac", "-I 4 2", "-I 4 2c", "-I 4bd 2", "-I 4bd 2c", "P 3", "P 31", "P 32", "R 3",
"-P 3", "-R 3",
"P 3 2", "P 3 2\"", "P 31 2 (0 0 4)", "P 31 2\"", "P 32 2 (0 0 2)", "P 32 2\"", "R 3 2\"", "P 3 -2\"", "P 3 -2",
"P 3 -2\"c", "P 3 -2c", "R 3 -2\"", "R 3 -2\"c",
"-P 3 2", "-P 3 2c", "-P 3 2\"", "-P 3 2\"c", "-R 3 2\"", "-R 3 2\"c", "P 6", "P 61", "P 65", "P 62", "P 64", "P 6c",
"P -6", "-P 6", "-P 6c", "P 6 2",
"P 61 2 (0 0 5)", "P 65 2 (0 0 1)", "P 62 2 (0 0 4)", "P 64 2 (0 0 2)", "P 6c 2c", "P 6 -2", "P 6 -2c", "P 6c -2",
"P 6c -2c", "P -6 2", "P -6c 2", "P -6 -2",
"P -6c -2c", "-P 6 2", "-P 6 2c", "-P 6c 2", "-P 6c 2c", "P 2 2 3", "F 2 2 3", "I 2 2 3", "P 2ac 2ab 3", "I 2b 2c 3",
"-P 2 2 3", "-P 2ab 2bc 3",
"-F 2 2 3", "-F 2uv 2vw 3", "-I 2 2 3", "-P 2ac 2ab 3", "-I 2b 2c 3", "P 4 2 3", "P 4n 2 3", "F 4 2 3", "F 4d 2 3",
"I 4 2 3", "P 4acd 2ab 3", "P 4bd 2ab 3",
"I 4bd 2c 3", "P -4 2 3", "F -4 2 3", "I -4 2 3", "P -4n 2 3", "F -4a 2 3", "I -4bd 2c 3", "-P 4 2 3", "-P 4a 2bc 3",
"-P 4n 2 3", "-P 4bc 2bc 3",
"-F 4 2 3", "-F 4a 2 3", "-F 4vw 2vw 3", "-F 4ud 2vw 3", "-I 4 2 3", "-I 4bd 2c 3")
ACCESIBLE_REFERENCE_SETTING = tuple(
[f"{str(_1).zfill(3):}: {_2:}" for _1, _2 in zip(range(1, 231), ACCESIBLE_NAME_HALL_SHORT)])
DEFAULT_REFERENCE_TABLE_IT_NUMBER_NAME_HALL_NAME_SCHOENFLIES_NAME_HM_SHORT_REFERENCE_SETTING_IT_COORDINATE_SYSTEM_CODE = tuple(
[
(_1, _2, _3, _4, _5, get_default_it_coordinate_system_code_by_it_number(_1)) for _1, _2, _3, _4, _5 in
zip(range(1, 231), ACCESIBLE_NAME_HALL_SHORT, ACCESIBLE_NAME_SCHOENFLIES, ACCESIBLE_NAME_HM_SHORT,
ACCESIBLE_REFERENCE_SETTING)
])
def get_it_number_by_name_hm_short(name: str) -> int:
if name in ACCESIBLE_NAME_HM_SHORT:
it_number = ACCESIBLE_NAME_HM_SHORT.index(name) + 1
else:
it_number = None
return it_number
def get_it_number_by_name_schoenflies(name: str) -> int:
if (name in ACCESIBLE_NAME_SCHOENFLIES):
it_number = ACCESIBLE_NAME_SCHOENFLIES.index(name) + 1
else:
it_number = None
return it_number
def get_it_number_by_name_hall(name: str) -> int:
if (name in ACCESIBLE_NAME_HALL_SHORT):
it_number = ACCESIBLE_NAME_HALL_SHORT.index(name) + 1
else:
it_number = None
return it_number
def get_name_hm_short_by_it_number(it_number: int) -> str:
if (it_number in ACCESIBLE_IT_NUMBER):
name = ACCESIBLE_NAME_HM_SHORT[it_number - 1]
else:
name = None
return name
def get_name_schoenflies_by_it_number(it_number: int) -> str:
if it_number in ACCESIBLE_IT_NUMBER:
name = ACCESIBLE_NAME_SCHOENFLIES[it_number - 1]
else:
name = None
return name
def get_name_hall_by_it_number(it_number: int) -> str:
if it_number in ACCESIBLE_IT_NUMBER:
name = ACCESIBLE_NAME_HALL_SHORT[it_number - 1]
else:
name = None
return name
#FIXME it should be checked
REFERENCE_TABLE_TRICLINIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
(1, None, "P 1 1 1"), (2, None, "P 1 1 1")
)
# from IT A Table 4.3.2.1
REFERENCE_TABLE_MONOCLINIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
(3, "b1", "P 1 2 1"), (3, "-b1", "P 1 2 1"), (3, "c1", "P 1 1 2"), (3, "-c1", "P 1 1 2"), (3, "a1", "P 2 1 1"),
(3, "-a1", "P 2 1 1"),
(4, "b1", "P 1 21 1"), (4, "-b1", "P 1 21 1"), (4, "c1", "P 1 1 21"), (4, "-c1", "P 1 1 21"), (4, "a1", "P 21 1 1"),
(4, "-a1", "P 21 1 1"),
(5, "b1", "C 1 2 1"), (5, "-b1", "A 1 2 1"), (5, "c1", "A 1 1 2"), (5, "-c1", "B 1 1 2"), (5, "a1", "B 2 1 1"),
(5, "-a1", "C 2 1 1"),
(5, "b2", "A 1 2 1"), (5, "-b2", "C 1 2 1"), (5, "c2", "B 1 1 2"), (5, "-c2", "A 1 1 2"), (5, "a2", "C 2 1 1"),
(5, "-a2", "B 2 1 1"),
(5, "b3", "I 1 2 1"), (5, "-b3", "I 1 2 1"), (5, "c3", "I 1 1 2"), (5, "-c3", "I 1 1 2"), (5, "a3", "I 2 1 1"),
(5, "-a3", "I 2 1 1"),
(6, "b1", "P 1 m 1"), (6, "-b1", "P 1 m 1"), (6, "c1", "P 1 1 m"), (6, "-c1", "P 1 1 m"), (6, "a1", "P m 1 1"),
(6, "-a1", "P m 1 1"),
(7, "b1", "P 1 c 1"), (7, "-b1", "P 1 a 1"), (7, "c1", "P 1 1 a"), (7, "-c1", "P 1 1 b"), (7, "a1", "P b 1 1"),
(7, "-a1", "P c 1 1"),
(7, "b2", "P 1 n 1"), (7, "-b2", "P 1 n 1"), (7, "c2", "P 1 1 n"), (7, "-c2", "P 1 1 n"), (7, "a2", "P n 1 1"),
(7, "-a2", "P n 1 1"),
(7, "b3", "P 1 a 1"), (7, "-b3", "P 1 c 1"), (7, "c3", "P 1 1 b"), (7, "-c3", "P 1 1 a"), (7, "a3", "P c 1 1"),
(7, "-a3", "P b 1 1"),
(8, "b1", "C 1 m 1"), (8, "-b1", "A 1 m 1"), (8, "c1", "A 1 1 m"), (8, "-c1", "B 1 1 m"), (8, "a1", "B m 1 1"),
(8, "-a1", "C m 1 1"),
(8, "b2", "A 1 m 1"), (8, "-b2", "C 1 m 1"), (8, "c2", "B 1 1 m"), (8, "-c2", "A 1 1 m"), (8, "a2", "C m 1 1"),
(8, "-a2", "B m 1 1"),
(8, "b3", "I 1 m 1"), (8, "-b3", "I 1 m 1"), (8, "c3", "I 1 1 m"), (8, "-c3", "I 1 1 m"), (8, "a3", "I m 1 1"),
(8, "-a3", "I m 1 1"),
(9, "b1", "C 1 c 1"), (9, "-b1", "A 1 a 1"), (9, "c1", "A 1 1 a"), (9, "-c1", "B 1 1 b"), (9, "a1", "B b 1 1"),
(9, "-a1", "C c 1 1"),
(9, "b2", "A 1 n 1"), (9, "-b2", "C 1 n 1"), (9, "c2", "B 1 1 n"), (9, "-c2", "A 1 1 n"), (9, "a2", "C n 1 1"),
(9, "-a2", "B n 1 1"),
(9, "b3", "I 1 a 1"), (9, "-b3", "I 1 c 1"), (9, "c3", "I 1 1 b"), (9, "-c3", "I 1 1 a"), (9, "a3", "I c 1 1"),
(9, "-a3", "I b 1 1"),
(10, "b1", "P 1 2/m 1"), (10, "-b1", "P 1 2/m 1"), (10, "c1", "P 1 1 2/m"), (10, "-c1", "P 1 1 2/m"),
(10, "a1", "P 2/m 1 1"), (10, "-a1", "P 2/m 1 1"),
(11, "b1", "P 1 21/m 1"), (11, "-b1", "P 1 21/m 1"), (11, "c1", "P 1 1 21/m"), (11, "-c1", "P 1 1 21/m"),
(11, "a1", "P 21/m 1 1"), (11, "-a1", "P 21/m 1 1"),
(12, "b1", "C 1 2/m 1"), (12, "-b1", "A 1 2/m 1"), (12, "c1", "A 1 1 2/m"), (12, "-c1", "B 1 1 2/m"),
(12, "a1", "B 2/m 1 1"), (12, "-a1", "C 2/m 1 1"),
(12, "b2", "A 1 2/m 1"), (12, "-b2", "C 1 2/m 1"), (12, "c2", "B 1 1 2/m"), (12, "-c2", "A 1 1 2/m"),
(12, "a2", "C 2/m 1 1"), (12, "-a2", "B 2/m 1 1"),
(12, "b3", "I 1 2/m 1"), (12, "-b3", "I 1 2/m 1"), (12, "c3", "I 1 1 2/m"), (12, "-c3", "I 1 1 2/m"),
(12, "a3", "I 2/m 1 1"), (12, "-a3", "I 2/m 1 1"),
(13, "b1", "P 1 2/c 1"), (13, "-b1", "P 1 2/a 1"), (13, "c1", "P 1 1 2/a"), (13, "-c1", "P 1 1 2/b"),
(13, "a1", "P 2/b 1 1"), (13, "-a1", "P 2/c 1 1"),
(13, "b2", "P 1 2/n 1"), (13, "-b2", "P 1 2/n 1"), (13, "c2", "P 1 1 2/n"), (13, "-c2", "P 1 1 2/n"),
(13, "a2", "P 2/n 1 1"), (13, "-a2", "P 2/n 1 1"),
(13, "b3", "P 1 2/a 1"), (13, "-b3", "P 1 2/c 1"), (13, "c3", "P 1 1 2/b"), (13, "-c3", "P 1 1 2/a"),
(13, "a3", "P 2/c 1 1"), (13, "-a3", "P 2/b 1 1"),
(14, "b1", "P 1 21/c 1"), (14, "-b1", "P 1 21/a 1"), (14, "c1", "P 1 1 21/a"), (14, "-c1", "P 1 1 21/b"),
(14, "a1", "P 21/b 1 1"), (14, "-a1", "P 21/c 1 1"),
(14, "b2", "P 1 21/n 1"), (14, "-b2", "P 1 21/n 1"), (14, "c2", "P 1 1 21/n"), (14, "-c2", "P 1 1 21/n"),
(14, "a2", "P 21/n 1 1"), (14, "-a2", "P 21/n 1 1"),
(14, "b3", "P 1 21/a 1"), (14, "-b3", "P 1 21/c 1"), (14, "c3", "P 1 1 21/b"), (14, "-c3", "P 1 1 21/a"),
(14, "a3", "P 21/c 1 1"), (14, "-a3", "P 21/b 1 1"),
(15, "b1", "C 1 2/c 1"), (15, "-b1", "A 1 2/a 1"), (15, "c1", "A 1 1 2/a"), (15, "-c1", "B 1 1 2/b"),
(15, "a1", "B 2/b 1 1"), (15, "-a1", "C 2/c 1 1"),
(15, "b2", "A 1 2/n 1"), (15, "-b2", "C 1 2/n 1"), (15, "c2", "B 1 1 2/n"), (15, "-c2", "A 1 1 2/n"),
(15, "a2", "C 2/n 1 1"), (15, "-a2", "B 2/n 1 1"),
(15, "b3", "I 1 2/a 1"), (15, "-b3", "I 1 2/c 1"), (15, "c3", "I 1 1 2/b"), (15, "-c3", "I 1 1 2/a"),
(15, "a3", "I 2/c 1 1"), (15, "-a3", "I 2/b 1 1"))
# from IT A Table 4.3.2.1
REFERENCE_TABLE_ORTHORHOMBIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
(16, "abc", "P 2 2 2"), (16, "ba-c", "P 2 2 2"), (16, "cab", "P 2 2 2"), (16, "-cba", "P 2 2 2"),
(16, "bca", "P 2 2 2"), (16, "a-cb", "P 2 2 2"),
(17, "abc", "P 2 2 21"), (17, "ba-c", "P 2 2 21"), (17, "cab", "P 21 2 2"), (17, "-cba", "P 21 2 2"),
(17, "bca", "P 2 21 2"), (17, "a-cb", "P 2 21 2"),
(18, "abc", "P 21 21 2"), (18, "ba-c", "P 21 21 2"), (18, "cab", "P 2 21 21"), (18, "-cba", "P 2 21 21"),
(18, "bca", "P 21 2 21"), (18, "a-cb", "P 21 2 21"),
(19, "abc", "P 21 21 21"), (19, "ba-c", "P 21 21 21"), (19, "cab", "P 21 21 21"), (19, "-cba", "P 21 21 21"),
(19, "bca", "P 21 21 21"), (19, "a-cb", "P 21 21 21"),
(20, "abc", "C 2 2 21"), (20, "ba-c", "C 2 2 21"), (20, "cab", "A 21 2 2"), (20, "-cba", "A 21 2 2"),
(20, "bca", "B 2 21 2"), (20, "a-cb", "B 2 21 2"),
(21, "abc", "C 2 2 2"), (21, "ba-c", "C 2 2 2"), (21, "cab", "A 2 2 2"), (21, "-cba", "A 2 2 2"),
(21, "bca", "B 2 2 2"), (21, "a-cb", "B 2 2 2"),
(22, "abc", "F 2 2 2"), (22, "ba-c", "F 2 2 2"), (22, "cab", "F 2 2 2"), (22, "-cba", "F 2 2 2"),
(22, "bca", "F 2 2 2"), (22, "a-cb", "F 2 2 2"),
(23, "abc", "I 2 2 2"), (23, "ba-c", "I 2 2 2"), (23, "cab", "I 2 2 2"), (23, "-cba", "I 2 2 2"),
(23, "bca", "I 2 2 2"), (23, "a-cb", "I 2 2 2"),
(24, "abc", "I 21 21 21"), (24, "ba-c", "I 21 21 21"), (24, "cab", "I 21 21 21"), (24, "-cba", "I 21 21 21"),
(24, "bca", "I 21 21 21"), (24, "a-cb", "I 21 21 21"),
(25, "abc", "P m m 2"), (25, "ba-c", "P m m 2"), (25, "cab", "P 2 m m"), (25, "-cba", "P 2 m m"),
(25, "bca", "P m 2 m"), (25, "a-cb", "P m 2 m"),
(26, "abc", "P m c 21"), (26, "ba-c", "P c m 21"), (26, "cab", "P 21 m a"), (26, "-cba", "P 21 a m"),
(26, "bca", "P b 21 m"), (26, "a-cb", "P m 21 b"),
(27, "abc", "P c c 2"), (27, "ba-c", "P c c 2"), (27, "cab", "P 2 a a"), (27, "-cba", "P 2 a a"),
(27, "bca", "P b 2 b"), (27, "a-cb", "P b 2 b"),
(28, "abc", "P m a 2"), (28, "ba-c", "P b m 2"), (28, "cab", "P 2 m b"), (28, "-cba", "P 2 c m"),
(28, "bca", "P c 2 m"), (28, "a-cb", "P m 2 a"),
(29, "abc", "P c a 21"), (29, "ba-c", "P b c 21"), (29, "cab", "P 21 a b"), (29, "-cba", "P 21 c a"),
(29, "bca", "P c 21 b"), (29, "a-cb", "P b 21 a"),
(30, "abc", "P n c 2"), (30, "ba-c", "P c n 2"), (30, "cab", "P 2 n a"), (30, "-cba", "P 2 a n"),
(30, "bca", "P b 2 n"), (30, "a-cb", "P n 2 b"),
(31, "abc", "P m n 21"), (31, "ba-c", "P n m 21"), (31, "cab", "P 21 m n"), (31, "-cba", "P 21 n m"),
(31, "bca", "P n 21 m"), (31, "a-cb", "P m 21 n"),
(32, "abc", "P b a 2"), (32, "ba-c", "P b a 2"), (32, "cab", "P 2 c b"), (32, "-cba", "P 2 c b"),
(32, "bca", "P c 2 a"), (32, "a-cb", "P c 2 a"),
(33, "abc", "P n a 21"), (33, "ba-c", "P b n 21"), (33, "cab", "P 21 n b"), (33, "-cba", "P 21 c n"),
(33, "bca", "P c 21 n"), (33, "a-cb", "P n 21 a"),
(34, "abc", "P n n 2"), (34, "ba-c", "P n n 2"), (34, "cab", "P 2 n n"), (34, "-cba", "P 2 n n"),
(34, "bca", "P n 2 n"), (34, "a-cb", "P n 2 n"),
(35, "abc", "C m m 2"), (35, "ba-c", "C m m 2"), (35, "cab", "A 2 m m"), (35, "-cba", "A 2 m m"),
(35, "bca", "B m 2 m"), (35, "a-cb", "B m 2 m"),
(36, "abc", "C m c 21"), (36, "ba-c", "C c m 21"), (36, "cab", "A 21 m a"), (36, "-cba", "A 21 a m"),
(36, "bca", "B b 21 m"), (36, "a-cb", "B m 21 b"),
(37, "abc", "C c c 2"), (37, "ba-c", "C c c 2"), (37, "cab", "A 2 a a"), (37, "-cba", "A 2 a a"),
(37, "bca", "B b 2 b"), (37, "a-cb", "B b 2 b"),
(38, "abc", "A m m 2"), (38, "ba-c", "B m m 2"), (38, "cab", "B 2 m m"), (38, "-cba", "C 2 m m"),
(38, "bca", "C m 2 m"), (38, "a-cb", "A m 2 m"),
(39, "abc", "A e m 2"), (39, "ba-c", "B m e 2"), (39, "cab", "B 2 e m"), (39, "-cba", "C 2 m e"),
(39, "bca", "C m 2 e"), (39, "a-cb", "A e 2 m"),
(40, "abc", "A m a 2"), (40, "ba-c", "B b m 2"), (40, "cab", "B 2 m b"), (40, "-cba", "C 2 c m"),
(40, "bca", "C c 2 m"), (40, "a-cb", "A m 2 a"),
(41, "abc", "A e a 2"), (41, "ba-c", "B b e 2"), (41, "cab", "B 2 e b"), (41, "-cba", "C 2 c e"),
(41, "bca", "C c 2 e"), (41, "a-cb", "A e 2 a"),
(42, "abc", "F m m 2"), (42, "ba-c", "F m m 2"), (42, "cab", "F 2 m m"), (42, "-cba", "F 2 m m"),
(42, "bca", "F m 2 m"), (42, "a-cb", "F m 2 m"),
(43, "abc", "F d d 2"), (43, "ba-c", "F d d 2"), (43, "cab", "F 2 d d"), (43, "-cba", "F 2 d d"),
(43, "bca", "F d 2 d"), (43, "a-cb", "F d 2 d"),
(44, "abc", "I m m 2"), (44, "ba-c", "I m m 2"), (44, "cab", "I 2 m m"), (44, "-cba", "I 2 m m"),
(44, "bca", "I m 2 m"), (44, "a-cb", "I m 2 m"),
(45, "abc", "I b a 2"), (45, "ba-c", "I b a 2"), (45, "cab", "I 2 c b"), (45, "-cba", "I 2 c b"),
(45, "bca", "I c 2 a"), (45, "a-cb", "I c 2 a"),
(46, "abc", "I m a 2"), (46, "ba-c", "I b m 2"), (46, "cab", "I 2 m b"), (46, "-cba", "I 2 c m"),
(46, "bca", "I c 2 m"), (46, "a-cb", "I m 2 a"),
(47, "abc", "P m m m"), (47, "ba-c", "P m m m"), (47, "cab", "P m m m"), (47, "-cba", "P m m m"),
(47, "bca", "P m m m"), (47, "a-cb", "P m m m"),
(48, "1abc", "P n n n"), (48, "2abc", "P n n n"), (48, "1ba-c", "P n n n"), (48, "2ba-c", "P n n n"),
(48, "1cab", "P n n n"), (48, "2cab", "P n n n"),
(48, "1-cba", "P n n n"), (48, "2-cba", "P n n n"), (48, "1bca", "P n n n"), (48, "2bca", "P n n n"),
(48, "1a-cb", "P n n n"), (48, "2a-cb", "P n n n"),
(49, "abc", "P c c m"), (49, "ba-c", "P c c m"), (49, "cab", "P m a a"), (49, "-cba", "P m a a"),
(49, "bca", "P b m b"), (49, "a-cb", "P b m b"),
(50, "1abc", "P b a n"), (50, "2abc", "P b a n"), (50, "1ba-c", "P b a n"), (50, "2ba-c", "P b a n"),
(50, "1cab", "P n c b"), (50, "2cab", "P n c b"),
(50, "1-cba", "P n c b"), (50, "2-cba", "P n c b"), (50, "1bca", "P c n a"), (50, "2bca", "P c n a"),
(50, "1a-cb", "P c n a"), (50, "2a-cb", "P c n a"),
(51, "abc", "P m m a"), (51, "ba-c", "P m m b"), (51, "cab", "P b m m"), (51, "-cba", "P c m m"),
(51, "bca", "P m c m"), (51, "a-cb", "P m a m"),
(52, "abc", "P n n a"), (52, "ba-c", "P n n b"), (52, "cab", "P b n n"), (52, "-cba", "P c n n"),
(52, "bca", "P n c n"), (52, "a-cb", "P n a n"),
(53, "abc", "P m n a"), (53, "ba-c", "P n m b"), (53, "cab", "P b m n"), (53, "-cba", "P c n m"),
(53, "bca", "P n c m"), (53, "a-cb", "P m a n"),
(54, "abc", "P c c a"), (54, "ba-c", "P c c b"), (54, "cab", "P b a a"), (54, "-cba", "P c a a"),
(54, "bca", "P b c b"), (54, "a-cb", "P b a b"),
(55, "abc", "P b a m"), (55, "ba-c", "P b a m"), (55, "cab", "P m c b"), (55, "-cba", "P m c b"),
(55, "bca", "P c m a"), (55, "a-cb", "P c m a"),
(56, "abc", "P c c n"), (56, "ba-c", "P c c n"), (56, "cab", "P n a a"), (56, "-cba", "P n a a"),
(56, "bca", "P b n b"), (56, "a-cb", "P b n b"),
(57, "abc", "P b c m"), (57, "ba-c", "P c a m"), (57, "cab", "P m c a"), (57, "-cba", "P m a b"),
(57, "bca", "P b m a"), (57, "a-cb", "P c m b"),
(58, "abc", "P n n m"), (58, "ba-c", "P n n m"), (58, "cab", "P m n n"), (58, "-cba", "P m n n"),
(58, "bca", "P n m n"), (58, "a-cb", "P n m n"),
(59, "1abc", "P m m n"), (59, "2abc", "P m m n"), (59, "1ba-c", "P m m n"), (59, "2ba-c", "P m m n"),
(59, "1cab", "P n m m"), (59, "2cab", "P n m m"),
(59, "1-cba", "P n m m"), (59, "2-cba", "P n m m"), (59, "1bca", "P m n m"), (59, "2bca", "P m n m"),
(59, "1a-cb", "P m n m"), (59, "2a-cb", "P m n m"),
(60, "abc", "P b c n"), (60, "ba-c", "P c a n"), (60, "cab", "P n c a"), (60, "-cba", "P n a b"),
(60, "bca", "P b n a"), (60, "a-cb", "P c n b"),
(61, "abc", "P b c a"), (61, "ba-c", "P c a b"), (61, "cab", "P b c a"), (61, "-cba", "P c a b"),
(61, "bca", "P b c a"), (61, "a-cb", "P c a b"),
(62, "abc", "P n m a"), (62, "ba-c", "P m n b"), (62, "cab", "P b n m"), (62, "-cba", "P c m n"),
(62, "bca", "P m c n"), (62, "a-cb", "P n a m"),
(63, "abc", "C m c m"), (63, "ba-c", "C c m m"), (63, "cab", "A m m a"), (63, "-cba", "A m a m"),
(63, "bca", "B b m m"), (63, "a-cb", "B m m b"),
(64, "abc", "C m c e"), (64, "ba-c", "C c m e"), (64, "cab", "A e m a"), (64, "-cba", "A e a m"),
(64, "bca", "B b e m"), (64, "a-cb", "B m e b"),
(65, "abc", "C m m m"), (65, "ba-c", "C m m m"), (65, "cab", "A m m m"), (65, "-cba", "A m m m"),
(65, "bca", "B m m m"), (65, "a-cb", "B m m m"),
(66, "abc", "C c c m"), (66, "ba-c", "C c c m"), (66, "cab", "A m a a"), (66, "-cba", "A m a a"),
(66, "bca", "B b m b"), (66, "a-cb", "B b m b"),
(67, "abc", "C m m e"), (67, "ba-c", "C m m e"), (67, "cab", "A e m m"), (67, "-cba", "A e m m"),
(67, "bca", "B m e m"), (67, "a-cb", "B m e m"),
(68, "1abc", "C c c e"), (68, "2abc", "C c c e"), (68, "1ba-c", "C c c e"), (68, "2ba-c", "C c c e"),
(68, "1cab", "A e a a"), (68, "2cab", "A e a a"),
(68, "1-cba", "A e a a"), (68, "2-cba", "A e a a"), (68, "1bca", "B b e b"), (68, "2bca", "B b e b"),
(68, "1a-cb", "B b e b"), (68, "2a-cb", "B b e b"),
(69, "abc", "F m m m"), (69, "ba-c", "F m m m"), (69, "cab", "F m m m"), (69, "-cba", "F m m m"),
(69, "bca", "F m m m"), (69, "a-cb", "F m m m"),
(70, "1abc", "F d d d"), (70, "2abc", "F d d d"), (70, "1ba-c", "F d d d"), (70, "2ba-c", "F d d d"),
(70, "1cab", "F d d d"), (70, "2cab", "F d d d"),
(70, "1-cba", "F d d d"), (70, "2-cba", "F d d d"), (70, "1bca", "F d d d"), (70, "2bca", "F d d d"),
(70, "1a-cb", "F d d d"), (70, "2a-cb", "F d d d"),
(71, "abc", "I m m m"), (71, "ba-c", "I m m m"), (71, "cab", "I m m m"), (71, "-cba", "I m m m"),
(71, "bca", "I m m m"), (71, "a-cb", "I m m m"),
(72, "abc", "I b a m"), (72, "ba-c", "I b a m"), (72, "cab", "I m c b"), (72, "-cba", "I m c b"),
(72, "bca", "I c m a"), (72, "a-cb", "I c m a"),
(73, "abc", "I b c a"), (73, "ba-c", "I c a b"), (73, "cab", "I b c a"), (73, "-cba", "I c a b"),
(73, "bca", "I b c a"), (73, "a-cb", "I c a b"),
(74, "abc", "I m m a"), (74, "ba-c", "I m m b"), (74, "cab", "I b m m"), (74, "-cba", "I c m m"),
(74, "bca", "I m c m"), (74, "a-cb", "I m a m")
)
# from IT A Table 4.3.2.1
REFERENCE_TABLE_TETRAGONAL_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
(79, "1", "I 4"), (80, "1", "I 41"), (87, "1", "I 4/m"), (88, "1", "I 41/a"), (88, "2", "I 41/a"),
(89, "1", "P 4 2 2"),
(90, "1", "P 4 21 2"), (91, "1", "P 41 2 2"), (92, "1", "P 41 21 2"), (93, "1", "P 42 2 2"), (94, "1", "P 42 21 2"),
(95, "1", "P 43 2 2"),
(96, "1", "P 43 21 2"), (97, "1", "I 4 2 2"), (98, "1", "I 41 2 2"), (99, "1", "P 4 m m"), (100, "1", "P 4 b m"),
(101, "1", "P 42 c m"),
(102, "1", "P 42 n m"), (103, "1", "P 4 c c"), (104, "1", "P 4 n c"), (105, "1", "P 42 m c"),
(106, "1", "P 42 b c"), (107, "1", "I 4 m m"),
(108, "1", "I 4 c e"), (109, "1", "I 41 m d"), (110, "1", "I 41 c d"), (111, "1", "P -4 2 m"),
(112, "1", "P -4 2 c"), (113, "1", "P -4 21 m"),
(114, "1", "P -4 21 c"), (115, "1", "P -4 m 2"), (116, "1", "P -4 c 2"), (117, "1", "P -4 b 2"),
(118, "1", "P -4 n 2"), (119, "1", "I -4 m 2"),
(120, "1", "I -4 c 2"), (121, "1", "I -4 2 m"), (122, "1", "I -4 2 d"), (123, "1", "P 4/m 2/m 2/m"),
(124, "1", "P 4/m 2/c 2/c"), (125, "1", "P 4/n 2/b 2/m"),
(125, "2", "P 4/n 2/b 2/m"), (126, "1", "P 4/n 2/n 2/c"), (126, "2", "P 4/n 2/n 2/c"), (127, "1", "P 4/m 21/b 2/m"),
(128, "1", "P 4/m 21/n 2/c"), (129, "1", "P 4/n 21/m 2/m"),
(129, "2", "P 4/n 21/m 2/m"), (130, "1", "P 4/n 21/c 2/c"), (130, "2", "P 4/n 21/c 2/c"),
(131, "1", "P 42/m 2/m 2/c"), (132, "1", "P 42/m 2/c 2/m"), (133, "1", "P 42/n 2/b 2/c"),
(133, "2", "P 42/n 2/b 2/c"), (134, "1", "P 42/n 2/n 2/m"), (134, "2", "P 42/n 2/n 2/m"),
(135, "1", "P 42/m 21/b 2/c"), (136, "1", "P 42/m 21/n 2/m"), (137, "1", "P 42/n 21/m 2/c"),
(137, "2", "P 42/n 21/m 2/c"), (138, "1", "P 42/n 21/c 2/m"), (138, "2", "P 42/n 21/c 2/m"),
(139, "1", "I 4/m 21/m 2/m"), (140, "1", "I 4/m 2/c 2/m"), (141, "1", "I 41/a 2/m 2/d"),
(141, "2", "I 41/a 2/m 2/d"), (142, "1", "I 41/a 2/c 2/d"), (142, "2", "I 41/a 2/c 2/d")
)
# from IT A Table 4.3.2.1
REFERENCE_TABLE_TRIGONAL_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
(146, "r", "R 3"), (146, "h", "R 3"), (148, "r", "R -3"), (148, "h", "R -3"), (149, "r", "P 3 1 2"),
(150, "r", "P 3 2 1"),
(151, "r", "P 31 1 2"), (152, "r", "P 31 2 1"), (153, "r", "P 32 1 2"), (154, "r", "P 32 2 1"), (155, "r", "R 3 2"),
(155, "h", "R 3 2"),
(156, "r", "P 3 m 1"), (157, "r", "P 3 1 m"), (158, "r", "P 3 c 1"), (159, "r", "P 3 1 c"), (160, "r", "R 3 m"),
(160, "h", "R 3 m"),
(161, "r", "R 3 c"), (161, "h", "R 3 c"), (162, "r", "P -3 1 2/m"), (163, "r", "P -3 1 2/c"),
(164, "r", "P -3 2/m 1"), (165, "r", "P -3 2/c 1"),
(166, "r", "R -3 2/m"), (166, "h", "R -3 2/m"), (167, "r", "R -3 2/c"), (167, "h", "R -3 2/c")
)
# from IT A Table 4.3.2.1
REFERENCE_TABLE_HEXAGONAL_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
(177, "h", "P 6 2 2"), (178, "h", "P 61 2 2"), (179, "h", "P 65 2 2"), (180, "h", "P 62 2 2"),
(181, "h", "P 64 2 2"), (182, "h", "P 63 2 2"),
(183, "h", "P 6 m m"), (184, "h", "P 6 c c"), (185, "h", "P 63 c m"), (186, "h", "P 63 m c"),
(187, "h", "P -6 m 2"), (188, "h", "P -6 c 2"),
(189, "h", "P -6 2 m"), (190, "h", "P -6 2 c"), (191, "h", "P 6/m 2/m 2/m"), (192, "h", "P 6/m 2/c 2/c"),
(193, "h", "P 63/m 2/c 2/m"), (194, "h", "P 63/m 2/m 2/c")
)
REFERENCE_TABLE_CUBIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
(196, "1", "F 2 3"), (197, "1", "I 2 3"), (199, "1", "I 21 3"), (202, "1", "F 2/m -3"), (203, "1", "F 2/d -3"),
(203, "2", "F 2/d -3"),
(204, "1", "I 2/m -3"), (206, "1", "I 21/a -3"), (207, "1", "P 4 3 2"), (208, "1", "P 42 3 2"),
(209, "1", "F 4 3 2"), (210, "1", "F 41 3 2"),
(211, "1", "I 4 3 2"), (212, "1", "P 43 3 2"), (213, "1", "P 41 3 2"), (214, "1", "I 41 3 2"),
(215, "1", "P -4 3 m"), (216, "1", "F -4 3 m"),
(217, "1", "I -4 3 m"), (218, "1", "P -4 3 n"), (219, "1", "F -4 3 c"), (220, "1", "I -4 3 d"),
(221, "1", "P 4/m -3 2/m"), (222, "1", "P 4/n -3 2/n"),
(222, "2", "P 4/n -3 2/n"), (223, "1", "P 42/m -3 2/n"), (224, "1", "P 42/n -3 2/m"), (224, "2", "P 42/n -3 2/m"),
(225, "1", "F 4/m -3 2/m"), (226, "1", "F 4/m -3 2/c"),
(227, "1", "F 41/d -3 2/m"), (227, "2", "F 41/d -3 2/m"), (228, "1", "F 41/d -3 2/n"), (228, "2", "F 41/d -3 2/n"),
(229, "1", "I 4/m -3 2/m"), (230, "1", "I 41/a -3 2/d")
)
REFERENCE_TABLE_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED = (
REFERENCE_TABLE_TRICLINIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED+
REFERENCE_TABLE_MONOCLINIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED +
REFERENCE_TABLE_ORTHORHOMBIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED +
REFERENCE_TABLE_TETRAGONAL_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED +
REFERENCE_TABLE_TRIGONAL_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED +
REFERENCE_TABLE_HEXAGONAL_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED +
REFERENCE_TABLE_CUBIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED)
ACCESIBLE_NAME_HM_EXTENDED = frozenset([_[2] for _ in REFERENCE_TABLE_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED])
def get_it_number_it_coordinate_system_codes_by_name_hm_extended(name: str) -> int:
flag = True
it_number = None
it_coordinate_system_codes = []
for _it_number, _it_coordinate_system_code, _name in REFERENCE_TABLE_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED:
if name == _name:
if it_number is not None:
flag &= it_number == _it_number
it_number = _it_number
it_coordinate_system_codes.append(_it_coordinate_system_code)
if not (flag):
print(f"For some reason for hm_name_extended \"{name:}\" it_number is not unique")
return it_number, tuple(it_coordinate_system_codes)
def get_name_hm_extended_by_it_number_it_coordinate_system_code(it_number: int, it_coordinate_system_code) -> str:
name_hm_extended = None
for _it_number, _it_coordinate_system_code, _name in REFERENCE_TABLE_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED:
if ((it_number == _it_number) & (it_coordinate_system_code == _it_coordinate_system_code)):
name_hm_extended = _name
break
return name_hm_extended
# IT A Table 12.3.4.1. Standard space-group symbols
REFERENCE_TABLE_TRICLINIC_IT_NUMBER_NAME_HM_FULL = (
(1, "P 1"), (2, "P -1")
)
REFERENCE_TABLE_MONOCLINIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_FULL = (
(3, "b1", "P 1 2 1"), (3, "c1", "P 1 1 2"), (4, "b1", "P 1 21 1"), (4, "c1", "P 1 1 21"),
(5, "b1", "C 1 2 1"), (5, "c1", "A 1 1 2"), (5, "-c1", "B 1 1 2"), (6, "b1", "P 1 m 1"), (6, "c1", "P 1 1 m"),
(7, "b1", "P 1 c 1"),
(7, "c1", "P 1 1 a"), (7, "-c1", "P 1 1 b"), (8, "b1", "C 1 m 1"), (8, "c1", "A 1 1 m"), (8, "-c1", "B 1 1 m"),
(9, "b1", "C 1 c 1"),
(9, "c1", "A 1 1 a"), (9, "-c1", "B 1 1 b"), (10, "b1", "P 1 2/m 1"), (10, "c1", "P 1 1 2/m"),
(11, "b1", "P 1 21/m 1"), (11, "c1", "P 1 1 21/m"),
(12, "b1", "C 1 2/m 1"), (12, "c1", "A 1 1 2/m"), (12, "-c1", "B 1 1 2/m"), (13, "b1", "P 1 2/c 1"),
(13, "c1", "P 1 1 2/a"), (13, "-c1", "P 1 1 2/b"),
(14, "b1", "P 1 21/c 1"), (14, "c1", "P 1 1 21/a"), (14, "-c1", "P 1 1 21/b"), (15, "b1", "C 1 2/c 1"),
(15, "c1", "A 1 1 2/a"), (15, "-c1", "B 1 1 2/b"))
REFERENCE_TABLE_ORTHORHOMBIC_IT_NUMBER_NAME_HM_FULL = (
(16, "P 2 2 2"), (17, "P 2 2 21"), (18, "P 21 21 2"), (19, "P 21 21 21"), (20, "C 2 2 21"), (21, "C 2 2 2"),
(22, "F 2 2 2"), (23, "I 2 2 2"), (24, "I 21 21 21"), (25, "P m m 2"), (26, "P m c 21"), (27, "P c c 2"),
(28, "P m a 2"), (29, "P c a 21"), (30, "P n c 2"), (31, "P m n 21"), (32, "P b a 2"), (33, "P n a 21"),
(34, "P n n 2"), (35, "C m m 2"), (36, "C m c 21"), (37, "C c c 2"), (38, "A m m 2"), (39, "A e m 2"),
(40, "A m a 2"), (41, "A e a 2"), (42, "F m m 2"), (43, "F d d 2"), (44, "I m m 2"), (45, "I b a 2"),
(46, "I m a 2"), (47, "P 2/m 2/m 2/m"), (48, "P 2/n 2/n 2/n"), (49, "P 2/c 2/c 2/m"), (50, "P 2/b 2/a 2/n"),
(51, "P 21/m 2/m 2/a"),
(52, "P 2/n 21/n 2/a"), (53, "P 2/m 2/n 21/a"), (54, "P 21/c 2/c 2/a"), (55, "P 21/b 21/a 2/m"),
(56, "P 21/c 21/c 2/n"), (57, "P 2/b 21/c 21/m"),
(58, "P 21/n 21/n 2/m"), (59, "P 21/m 21/m 2/n"), (60, "P 21/b 2/c 21/n"), (61, "P 21/b 21/c 21/a"),
(62, "P 21/n 21/m 21/a"), (63, "C 2/m 2/c 21/m"),
(64, "C 2/m 2/c 21/e"), (65, "C 2/m 2/m 2/m"), (66, "C 2/c 2/c 2/m"), (67, "C 2/m 2/m 2/e"), (68, "C 2/c 2/c 2/e"),
(69, "F 2/m 2/m 2/m"),
(70, "F 2/d 2/d 2/d"), (71, "I 2/m 2/m 2/m"), (72, "I 2/b 2/a 2/m"), (73, "I 21/b 21/c 21/a"),
(74, "I 21/m 21/m 21/a")
)
REFERENCE_TABLE_TETRAGONAL_IT_NUMBER_NAME_HM_FULL = (
(75, "P 4"), (76, "P 41"), (77, "P 42"), (78, "P 43"), (79, "I 4"), (80, "I 41"),
(81, "P -4"), (82, "I -4"), (83, "P 4/m"), (84, "P 42/m"), (85, "P 4/n"), (86, "P 42/n"),
(87, "I 4/m"), (88, "I 41/a"), (89, "P 4 2 2"), (90, "P 4 21 2"), (91, "P 41 2 2"), (92, "P 41 21 2"),
(93, "P 42 2 2"), (94, "P 42 21 2"), (95, "P 43 2 2"), (96, "P 43 21 2"), (97, "I 4 2 2"), (98, "I 41 2 2"),
(99, "P 4 m m"), (100, "P 4 b m"), (101, "P 42 c m"), (102, "P 42 n m"), (103, "P 4 c c"), (104, "P 4 n c"),
(105, "P 42 m c"), (106, "P 42 b c"), (107, "I 4 m m"), (108, "I 4 c m"), (109, "I 41 m d"), (110, "I 41 c d"),
(111, "P -4 2 m"), (112, "P -4 2 c"), (113, "P -4 21 m"), (114, "P -4 21 c"), (115, "P -4 m 2"), (116, "P -4 c 2"),
(117, "P -4 b 2"), (118, "P -4 n 2"), (119, "I -4 m 2"), (120, "I -4 c 2"), (121, "I -4 2 m"), (122, "I -4 2 d"),
(123, "P 4/m 2/m 2/m"), (124, "P 4/m 2/c 2/c"), (125, "P 4/n 2/b 2/m"), (126, "P 4/n 2/n 2/c"),
(127, "P 4/m 21/b 2/m"), (128, "P 4/m 21/n 2/c"),
(129, "P 4/n 21/m 2/m"), (130, "P 4/n 21/c 2/c"), (131, "P 42/m 2/m 2/c"), (132, "P 42/m 2/c 2/m"),
(133, "P 42/n 2/b 2/c"), (134, "P 42/n 2/n 2/m"),
(135, "P 42/m 21/b 2/c"), (136, "P 42/m 21/n 2/m"), (137, "P 42/n 21/m 2/c"), (138, "P 42/n 21/c 2/m"),
(139, "I 4/m 21/m 2/m"), (140, "I 4/m 2/c 2/m"),
(141, "I 41/a 2/m 2/d"), (142, "I 41/a 2/c 2/d")
)
REFERENCE_TABLE_TRIGONAL_IT_NUMBER_NAME_HM_FULL = (
(143, "P 3"), (144, "P 31"), (145, "P 32"), (146, "R 3"), (147, "P -3"), (148, "R -3"),
(149, "P 3 1 2"), (150, "P 3 2 1"), (151, "P 31 1 2"), (152, "P 31 2 1"), (153, "P 32 1 2"), (154, "P 32 2 1"),
(155, "R 3 2"), (156, "P 3 m 1"), (157, "P 3 1 m"), (158, "P 3 c 1"), (159, "P 3 1 c"), (160, "R 3 m"),
(161, "R 3 c"), (162, "P -3 1 2/m"), (163, "P -3 1 2/c"), (164, "P -3 2/m 1"), (165, "P -3 2/c 1"),
(166, "R -3 2/m"),
(167, "R -3 2/c")
)
REFERENCE_TABLE_HEXAGONAL_IT_NUMBER_NAME_HM_FULL = (
(168, "P 6"), (169, "P 61"), (170, "P 65"), (171, "P 62"), (172, "P 64"), (173, "P 63"),
(174, "P -6"), (175, "P 6/m "), (176, "P 63/m"), (177, "P 6 2 2"), (178, "P 61 2 2"), (179, "P 65 2 2"),
(180, "P 62 2 2"), (181, "P 64 2 2"), (182, "P 63 2 2"), (183, "P 6 m m"), (184, "P 6 c c"), (185, "P 63 c m"),
(186, "P 63 m c"), (187, "P -6 m 2"), (188, "P -6 c 2"), (189, "P -6 2 m"), (190, "P -6 2 c"),
(191, "P 6/m 2/m 2/m"), (192, "P 6/m 2/c 2/c"), (193, "P 63/m 2/c 2/m"), (194, "P 63/m 2/m 2/c")
)
REFERENCE_TABLE_CUBIC_IT_NUMBER_NAME_HM_FULL = (
(195, "P 23"), (196, "F 23"), (197, "I 23"), (198, "P 21 3"), (199, "I 21 3"),
(200, "P 2/m -3"), (201, "P 2/n -3"), (202, "F 2/m -3"), (203, "F 2/d -3"), (204, "I 2/m -3"), (205, "P 21/a -3"),
(206, "I 21/a -3"),
(207, "P 4 3 2"), (208, "P 42 3 2"), (209, "F 4 3 2"), (210, "F 41 3 2"), (211, "I 4 3 2"), (212, "P 43 3 2"),
(213, "P 41 3 2"), (214, "I 41 3 2"),
(215, "P -4 3 m"), (216, "F -4 3 m"), (217, "I -4 3 m"), (218, "P -4 3 n"), (219, "F -4 3 c"), (220, "I -4 3 d"),
(221, "P 4/m -3 2/m"), (222, "P 4/n -3 2/n"), (223, "P 42/m -3 2/n"), (224, "P 42/n -3 2/m"), (225, "F 4/m -3 2/m"),
(226, "F 4/m -3 2/c"),
(227, "F 41/d -3 2/m"), (228, "F 41/d -3 2/c"), (229, "I 4/m -3 2/m"), (230, "I 41/a -3 2/d")
)
REFERENCE_TABLE_IT_NUMBER_NAME_HM_FULL = (REFERENCE_TABLE_TRICLINIC_IT_NUMBER_NAME_HM_FULL +
REFERENCE_TABLE_ORTHORHOMBIC_IT_NUMBER_NAME_HM_FULL +
REFERENCE_TABLE_TETRAGONAL_IT_NUMBER_NAME_HM_FULL +
REFERENCE_TABLE_TRIGONAL_IT_NUMBER_NAME_HM_FULL +
REFERENCE_TABLE_HEXAGONAL_IT_NUMBER_NAME_HM_FULL +
REFERENCE_TABLE_CUBIC_IT_NUMBER_NAME_HM_FULL)
ACCESIBLE_NAME_HM_FULL = frozenset([_[1] for _ in REFERENCE_TABLE_IT_NUMBER_NAME_HM_FULL])
def get_it_number_by_name_hm_full(name: str) -> int:
_l = [_it_number for _it_number, _name in REFERENCE_TABLE_IT_NUMBER_NAME_HM_FULL if (name == _name)]
if len(_l) == 0:
it_number = None
else:
it_number = _l[0]
return it_number
def get_name_hm_full_by_it_number(it_number: int) -> str:
_l = [_name for _it_number, _name in REFERENCE_TABLE_IT_NUMBER_NAME_HM_FULL if (it_number == _it_number)]
if len(_l) == 0:
name_hm_full = None
else:
name_hm_full = _l[0]
return name_hm_full
REFERENCE_TABLE_CENTRING_TYPE_SHIFT = (
("P", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),)),
("A", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(0, 2), Fraction(1, 2), Fraction(1, 2)))),
("B", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(1, 2), Fraction(0, 2), Fraction(1, 2)))),
("C", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(1, 2), Fraction(1, 2), Fraction(0, 2)))),
("F", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(0, 2), Fraction(1, 2), Fraction(1, 2)),
(Fraction(1, 2), Fraction(0, 2), Fraction(1, 2)),
(Fraction(1, 2), Fraction(1, 2), Fraction(0, 2)))),
("I", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(1, 2), Fraction(1, 2), Fraction(1, 2)))),
("R", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3)))),
("Rrev", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(1, 3), Fraction(2, 3), Fraction(1, 3)),
(Fraction(2, 3), Fraction(1, 3), Fraction(2, 3)))),
("H", ((Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)),
(Fraction(2, 3), Fraction(1, 3), Fraction(0, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(0, 3))))
)
ACCESIBLE_CENTRING_TYPE = frozenset([_[0] for _ in REFERENCE_TABLE_CENTRING_TYPE_SHIFT])
def get_shift_by_centring_type(centring_type: str):
shift = ()
for _1, _2 in REFERENCE_TABLE_CENTRING_TYPE_SHIFT:
if _1 == centring_type:
shift = _2
break
return shift
def get_centring_type_by_name_hm_extended(hm_extended: str) -> str:
centring_type = hm_extended[0] # it is not correct for Rrev
if not (centring_type in ACCESIBLE_CENTRING_TYPE):
centring_type = None
return centring_type
ACCESIBLE_LATTICE_TYPE = ("P", "C", "I", "F", "R")
def get_lattice_type_by_name_hm_short(hm_short: str) -> str:
lattice_type = hm_short[0]
if not (lattice_type in ACCESIBLE_LATTICE_TYPE):
lattice_type = None
return lattice_type
REFERENCE_TABLE_PATTERSON_NAME_HM_LATTICE_TYPE_LAUE_CLASS = (
("P -1", "P", "-1"), ("P 2/m", "P", "2/m"), ("C 2/m", "C", "2/m"), ("P m m m", "P", "mmm"), ("C m m m", "C", "mmm"),
("I m m m", "I", "mmm"),
("F m m m", "F", "mmm"), ("P 4/m", "P", "4/m"), ("I 4/m", "I", "4/m"), ("P 4/m m m", "P", "4/mmm"),
("I 4/m m m", "I", "4/mmm"), ("P -3", "P", "-3"),
("R -3", "R", "-3"), ("P -3 m 1", "P", "-3m1"), ("R -3 m", "R", "-3m"), ("P -3 1 m", "P", "-31m"),
("P 6/m", "P", "6/m"), ("P 6/m m m", "P", "6/mmm"),
("P m -3", "P", "m-3"), ("I m -3", "I", "m-3"), ("F m -3", "F", "m-3"), ("P m -3 m", "P", "m-3m"),
("I m -3 m", "I", "m-3m"), ("F m -3 m", "F", "m-3m")
)
ACCESIBLE_PATTERSON_NAME_HM = frozenset([_[0] for _ in REFERENCE_TABLE_PATTERSON_NAME_HM_LATTICE_TYPE_LAUE_CLASS])
def get_patterson_name_hm_by_lattice_type_laue_class(lattice_type: str, laue_class: str) -> str:
patterson_name_hm = None
for _1, _2, _3 in REFERENCE_TABLE_PATTERSON_NAME_HM_LATTICE_TYPE_LAUE_CLASS:
if ((_2 == lattice_type) & (_3 == laue_class)):
patterson_name_hm = _1
break
return patterson_name_hm
REFERENCE_TABLE_BRAVAIS_TYPE_CENTRING_TYPE_CRYSTAL_SYSTEM = (
("aP", "P", "triclinic"),
("mP", "P", "monoclinic"),
("mS", "A", "monoclinic"),
("mS", "B", "monoclinic"),
("mS", "C", "monoclinic"),
("oP", "P", "orthorhombic"),
("oS", "A", "orthorhombic"),
("oS", "B", "orthorhombic"),
("oS", "C", "orthorhombic"),
("oI", "I", "orthorhombic"),
("oF", "F", "orthorhombic"),
("tP", "P", "tetragonal"),
("tI", "I", "tetragonal"),
("hP", "P", "hexagonal"),
("hP", "P", "trigonal"), # FIXME: not sure
("hR", "R", "trigonal"),
("hR", "Rrev", "trigonal"),
("hR", "H", "trigonal"),
("cP", "P", "cubic"),
("cI", "I", "cubic"),
("cF", "F", "cubic")
)
ACCESIBLE_BRAVAIS_TYPE = frozenset([_[0] for _ in REFERENCE_TABLE_BRAVAIS_TYPE_CENTRING_TYPE_CRYSTAL_SYSTEM])
ACCESIBLE_CRYSTAL_SYSTEM = frozenset([_[2] for _ in REFERENCE_TABLE_BRAVAIS_TYPE_CENTRING_TYPE_CRYSTAL_SYSTEM])
def get_bravais_type_by_centring_type_crystal_system(centring_type: str, crystal_system: str) -> str:
bravais_type = None
for _bravais_type, _centring_type, _crystal_system in REFERENCE_TABLE_BRAVAIS_TYPE_CENTRING_TYPE_CRYSTAL_SYSTEM:
if ((_centring_type == centring_type) & (_crystal_system == crystal_system)):
bravais_type = _bravais_type
return bravais_type
def get_bravais_types_by_crystal_system(crystal_system: str) -> str:
if crystal_system.startswith("tric"):
bravais_types = ("aP",)
elif crystal_system.startswith("m"):
bravais_types = ("mP", "mS")
elif crystal_system.startswith("o"):
bravais_types = ("oP", "oS", "oI", "oF")
elif crystal_system.startswith("te"):
bravais_types = ("tI", "tF")
elif crystal_system.startswith("h"):
bravais_types = ("hP",)
elif crystal_system.startswith("trig"):
bravais_types = ("hR",)
elif crystal_system.startswith("c"):
bravais_types = ("cP", "cI", "cF")
else:
bravais_types = ()
return bravais_types
def get_centring_types_by_bravais_type(_bravais_type: str) -> str:
if _bravais_type.endswith("P"):
centring_types = ("P",)
elif _bravais_type.endswith("I"):
centring_types = ("I",)
elif _bravais_type.endswith("F"):
centring_types = ("F",)
elif _bravais_type.endswith("S"):
centring_types = ("A", "B", "C")
elif _bravais_type.endswith("R"):
centring_types = ("R", "Rrev", "H",)
return centring_types
def get_crystal_system_by_bravais_type(_bravais_type: str) -> str:
crystal_system = None
if _bravais_type.startswith("a"):
crystal_system = "triclinic"
elif _bravais_type.startswith("m"):
crystal_system = "monoclinic"
elif _bravais_type.startswith("o"):
crystal_system = "orthorhombic"
elif _bravais_type.startswith("t"):
crystal_system = "tetragonal"
elif _bravais_type == "hP":
crystal_system = "hexagonal"
elif _bravais_type == "hR":
crystal_system = "trigonal"
elif _bravais_type.startswith("c"):
crystal_system = "cubic"
return crystal_system
def get_type_hm(_name: str) -> str:
l_res = []
if _name in ACCESIBLE_NAME_HM_SHORT:
l_res.append("short")
if _name in ACCESIBLE_NAME_HM_FULL:
l_res.append("full")
if _name in ACCESIBLE_NAME_HM_EXTENDED:
l_res.append("extended")
return tuple(l_res)
def get_notation(_name: str) -> str:
res = None
if len(get_type_hm(_name)) != 0: res = "Hermann-Mauguin"
if _name in ACCESIBLE_NAME_HALL_SHORT: res = "Hall"
if _name in ACCESIBLE_NAME_SCHOENFLIES: res = "Schoenflies"
return res
# IT A: Table 8.3.5.1. Sequence of generators for the crystal classes
# The space-group generators differ from those listed here by their glide or screw
# components. The generator 1 is omitted, except for crystal class 1. The
# subscript of a symbol denotes the characteristic direction of that operation,
# where necessary. The subscripts z, y, 110, 1-10, 10-1 and 111 refer to the
# directions [001], [010], [110], [1-10], [10-1] and [111], respectively. For mirror
# reflections m, the ‘direction of m’ refers to the normal to the mirror plane. The
# subscripts may be likewise interpreted as Miller indices of that plane
# Hermann–Mauguin symbol of crystal class
# Generators Gi (sequence left to right)
REFERENCE_TABLE_POINT_GROUP_HM_SYMBOL_GENERATORS = (
("1", ("1",)), ("-1", ("-1",)), ("2", ("2",)), ("m", ("m",)), ("2/m", (2, -1)), ("222", ("2z", "2y")),
("mm2", ("2z", "my")),
("mmm", ("2z", "2y", "-1")), ("4", ("2z", "4")), ("-4", ("2z", "-4")), ("4/m", ("2z", "4", "-1")),
("422", ("2z", "4", "2y")),
("4mm", ("2z", "4", "my")), ("-42m", ("2z", "-4", "2y")), ("-4m2", ("2z", "-4", "my")),
("4/mmm", ("2z", "4", "2y", "-1")), ("3", ("3",)),
("-3", ("3", "-1")), ("321", ("3", "2110")), ("321:r", ("3111", "210-1")), ("312", ("3", "21-10")),
("3m1", ("3", "m110")),
("3m1:r", ("3111", "m10-1")),
("31m", ("3", "m1-10")),
("-3m1", ("3", "2110", "-1")),
("-3m1:r", ("3111", "210-1", -1)),
("-31m", ("3", "21-10", "-1")),
("6", ("3", "2z")),
("-6", ("3", "mz")),
("6-m", ("3", "2z", "-1")),
("622", ("3", "2z", "2110")),
("6mm", ("3", "2z", "m110")),
("-6m2", ("3", "mz", "m110")),
("-62m", ("3", "mz", "2110")),
("6/mmm", ("3", "2z", "2110", "-1")),
("23", ("2z", "2y", "3111")),
("m-3", ("2z", "2y", "3111", "-1")),
("432", ("2z", "2y", "3111", "2110")),
("-43m", ("2z", "2y", "3111", "m1-10")),
("m-3m", ("2z", "2y", "3111", "2110", "-1"))
)
def get_generators_by_point_group_hm(name: str) -> Tuple[str]:
generators = ()
for _1, _2 in REFERENCE_TABLE_POINT_GROUP_HM_SYMBOL_GENERATORS:
if _1 == name:
generators = _2
break
return generators
# IT A: Table 10.1.2.4. Names and symbols of the 32 crystal classes
REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES = (
("-1", "-1", "1", "1", "C1"),
("-1", "-1", "-1", "-1", "Ci"),
("2/m", "2/m", "2", "2", "C2"),
("2/m", "2/m", "m", "m", "Cs"),
("2/m", "2/m", "2/m", "2/m", "C2h"),
("mmm", "2/m2/m2/m", "222", "222", "D2"),
("mmm", "2/m2/m2/m", "mm2", "mm2", "C2v"),
("mmm", "2/m2/m2/m", "mmm", "2/m2/m2/m", "D2h"),
("4/m", "4/m", "4", "4", "C4"),
("4/m", "4/m", "-4", "-4", "S4"),
("4/m", "4/m", "4/m", "4/m", "C4h"),
("4/mmm", "4/m2/m2/m", "422", "422", "D4"),
("4/mmm", "4/m2/m2/m", "4mm", "4mm", "C4v"),
("4/mmm", "4/m2/m2/m", "-42m", "-42m", "D2d"),
("4/mmm", "4/m2/m2/m", "4/mmm", "4/m2/m2/m", "D4h"),
("-3", "-3", "3", "3", "C3"),
("-3", "-3", "-3", "-3", "C3i"),
("-3m", "-32/m", "32", "32", "D3"),
("-3m", "-32/m", "3m", "3m", "C3v"),
("-3m", "-32/m", "-3m", "-32/m", "D3d"),
("6/m", "6/m", "6", "6", "C6"),
("6/m", "6/m", "-6", "-6", "C3h"),
("6/m", "6/m", "6/m", "6/m", "C6h"),
("6/mmm", "6/m2/m2/m", "622", "622", "D6"),
("6/mmm", "6/m2/m2/m", "6mm", "6mm", "D6v"),
("6/mmm", "6/m2/m2/m", "-62m", "-62m", "D3h"),
("6/mmm", "6/m2/m2/m", "6/mmm", "6/m2/m2/m", "D6h"),
("m-3", "2/m-3", "23", "23", "T"),
("m-3", "2/m-3", "m-3", "2/m-3", "Th"),
("m-3m", "4/m-32/m", "432", "432", "O"),
("m-3m", "4/m-32/m", "-43m", "-43m", "Td"),
("m-3m", "4/m-32/m", "m-3m", "4/m-32/m", "Oh")
)
ACCESIBLE_LAUE_CLASS_FULL = frozenset(
[_[1] for _ in REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES])
ACCESIBLE_POINT_GROUP_SYMBOL_SHORT = frozenset(
[_[2] for _ in REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES])
ACCESIBLE_POINT_GROUP_SYMBOL_FULL = frozenset(
[_[3] for _ in REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES])
def get_laue_class_by_name_schoenflies(name: str) -> str:
laue_class = None
symb = name.split(".")[0]
for _1, _2, _3, _4, _5 in REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES:
if _5 == symb:
laue_class = _1
break
return laue_class
def get_point_group_hm_full_by_name_schoenflies(name: str) -> str:
point_group_hm_full = None
symb = name.split(".")[0]
for _1, _2, _3, _4, _5 in REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES:
if _5 == symb:
point_group_hm_full = _4
break
return point_group_hm_full
def get_point_group_hm_short_by_name_schoenflies(name: str) -> str:
point_group_hm_short = None
symb = name.split(".")[0]
for _1, _2, _3, _4, _5 in REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES:
if _5 == symb:
point_group_hm_short = _3
break
return point_group_hm_short
def get_name_schoenfliess_by_laue_class(laue_class: str) -> Tuple[str]:
l_symb = [_5 for _1, _2, _3, _4, _5 in
REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES if _1 == laue_class]
l_res = []
for symb in l_symb:
for _name_schoenflies in ACCESIBLE_NAME_SCHOENFLIES:
_symb = _name_schoenflies.split(".")[0]
if symb == _symb:
l_res.append(_name_schoenflies)
return tuple(l_res)
def get_name_schoenfliess_by_point_group_hm_short(point_group: str) -> Tuple[str]:
l_symb = [_5 for _1, _2, _3, _4, _5 in
REFERENCE_TABLE_LAUE_CLASS_SHORT_FULL_POINT_GROUP_HM_SYMBOL_SHORT_FULL_SCHOENFLIES if _3 == point_group]
l_res = []
for symb in l_symb:
for _name_schoenflies in ACCESIBLE_NAME_SCHOENFLIES:
_symb = _name_schoenflies.split(".")[0]
if symb == _symb:
l_res.append(_name_schoenflies)
return tuple(l_res)
def get_centrosymmetry_by_name_hall(name: str) -> str:
centrosymmetry = name.startswith("-")
if not (name in ACCESIBLE_NAME_HALL_SHORT):
centrosymmetry = None
return centrosymmetry
def separate_notation_it_coordinate_system_code(name: str):
l_h = name.strip().split(":")
notation = l_h[0].strip()
if notation.isdigit():
notation = int(notation)
if len(l_h) == 1:
it_coordinate_system_code = None
else:
it_coordinate_system_code = l_h[1].strip()
if not (it_coordinate_system_code in ACCESIBLE_IT_COORDINATE_SYSTEM_CODE):
it_coordinate_system_code = None
return notation, it_coordinate_system_code
def get_symop_pcentr_multiplicity_letter_site_symmetry_coords_xyz_2(
it_number: int, it_coordinate_system_code: str):
"""
FIXME: HOW it works for 166 space group
crystal system should be trigonal or hexagonal
"""
crystal_system = get_crystal_system_by_it_number(it_number)
if it_coordinate_system_code is None:
choice = "1"
elif "3" in it_coordinate_system_code:
choice = "3"
elif "2" in it_coordinate_system_code:
choice = "2"
elif "1" in it_coordinate_system_code:
choice = "1"
elif "h" in it_coordinate_system_code:
# FIXME: IT SHOULD BE CHECKED
# if crystal_system.startswith("trigonal"):
# choice = "2"
# else: # hexagonal
choice = "1"
elif "r" in it_coordinate_system_code:
choice = "1"
else:
choice = "1"
symop, p_centr = None, None
for _el_card in EL_CARDS:
if ((_el_card["it_number"] == it_number) & (_el_card["choice"][0] == choice)):
symop = tuple(_el_card["symmetry"])
p_centr = array([Fraction(_).limit_denominator(10) for _ in _el_card["pcentr"][0].split(",")],
dtype=Fraction)
break
_s_name, _choice = get_transform_pp_abc_choice_by_it_number_it_coordinate_system_code(it_number,
it_coordinate_system_code)
Q, p = transform_string_to_r_b(_s_name, ("a", "b", "c"))
P = transpose(Q)
q = -1 * mult_matrix_vector(Q, p)
p_centr_new = p_centr + q
symop_2 = [transform_symop_operation_xyz_by_pp_abc(_symop, P, p) for _symop in symop]
for _el_card in WYCKOFF:
if ((_el_card["it_number"] == it_number) & (_el_card["choice"] == int(choice))):
wyckoff = _el_card["wyckoff"]
break
l_multiplicity = [_h["multiplicity"] for _h in wyckoff]
l_letter = [_h["letter"] for _h in wyckoff]
l_site_symmetry = [_h["site_symmetry"] for _h in wyckoff]
l_coord_xyz = [_h["symop"] for _h in wyckoff]
l_coord_xyz_2 = [[transform_symop_operation_xyz_by_pp_abc(_coord_xyz, P, p) for _coord_xyz in coord_xyz] for
coord_xyz in l_coord_xyz]
return symop_2, p_centr_new, l_multiplicity, l_letter, l_site_symmetry, l_coord_xyz_2
def transform_symop_operation_xyz_by_pp_abc(symop_operation_xyz: str, P, p) -> str:
Q = transpose(P) # TODO: here is proposed that Q^T = Q**-1, but I am not sure that it is true.
q = -1 * mult_matrix_vector(Q, p)
r_xyz, b_xyz = transform_string_to_r_b(symop_operation_xyz, ("x", "y", "z"))
b_new = zeros(shape=(3), dtype=float)
r_new = zeros(shape=(3, 3), dtype=float)
QW = mult_matrixes(Q, r_xyz)
QWP = mult_matrixes(QW, P)
QWp = mult_matrix_vector(QW, p)
Qw = mult_matrix_vector(Q, b_xyz)
r_new = QWP
b_new = QWp + Qw + q
_s = transform_r_b_to_string(r_new, b_new, ("x", "y", "z"))
return _s
def transform_symop_operation_xyz_by_Qq_xyz(symop_operation_xyz: str, Q, q) -> str:
P = transpose(q) # TODO: here is proposed that Q^T = Q**-1, but I am not sure that it is true.
p = -1 * mult_matrix_vector(P, q)
_s = transform_symop_operation_xyz_by_pp_abc(symop_operation_xyz, P, p)
return _s
def mult_matrix_vector(a, v):
cond_1 = isinstance(v[0], Fraction)
cond_2 = isinstance(a[0, 0], Fraction)
if (cond_1 & cond_2):
p_0 = a[0, 0]*v[0] + a[0, 1]*v[1] + a[0, 2]*v[2]
p_1 = a[1, 0]*v[0] + a[1, 1]*v[1] + a[1, 2]*v[2]
p_2 = a[2, 0]*v[0] + a[2, 1]*v[1] + a[2, 2]*v[2]
b = array([p_0, p_1, p_2], dtype=Fraction)
else:
p_0 = float(a[0, 0])*float(v[0]) + float(a[0, 1])*float(v[1]) + float(a[0, 2])*float(v[2])
p_1 = float(a[1, 0])*float(v[0]) + float(a[1, 1])*float(v[1]) + float(a[1, 2])*float(v[2])
p_2 = float(a[2, 0])*float(v[0]) + float(a[2, 1])*float(v[1]) + float(a[2, 2])*float(v[2])
b = array([p_0, p_1, p_2], dtype=float)
return b
def mult_matrixes(a, b):
c = 0. * a
for _i in range(3):
for _j in range(3):
c[_i, _j] = sum(a[_i, :] * b[:, _j])
return c
def auto_choose_it_coordinate_system_code(it_number:int, it_coordinate_system_codes:list)->str:
if len(it_coordinate_system_codes) == 0:
it_coordinate_system_code = None
elif len(it_coordinate_system_codes) > 1:
print(f"Several values of it_coordinate_system_code have been defined:")
print_long_list(it_coordinate_system_codes)
default_i_c_s_c = get_default_it_coordinate_system_code_by_it_number(it_number)
if default_i_c_s_c in it_coordinate_system_codes:
it_coordinate_system_code = default_i_c_s_c
print(f"The default value has been choosen:'{it_coordinate_system_code:}'.")
else:
l_1 = [_ for _ in it_coordinate_system_codes if not ("-" in _)]
if len(l_1) != 0:
_choice = l_1[0]
else:
_choice = it_coordinate_system_codes[0]
it_coordinate_system_code = _choice
print(f"The \"{it_coordinate_system_code:}\" has been choosen.")
else:
it_coordinate_system_code = it_coordinate_system_codes[0]
return it_coordinate_system_code
def get_transform_pp_abc_choice_by_it_number_it_coordinate_system_code(it_number: int,
it_coordinate_system_code: str) -> Tuple:
# TODO: not sure about -b1, c1, -c1, a1, -a1
if it_coordinate_system_code in ("b1", "b2", "b3", "abc", "1abc", "2abc", "1", "2", "h", "r", None):
transform_pp_abc = "a,b,c"
elif it_coordinate_system_code in ("-a1", "-a2", "-a3", "ba-c", "1ba-c", "2ba-c"):
transform_pp_abc = "b,a,-c"
elif it_coordinate_system_code in ("c1", "c2", "c3", "cab", "1cab", "2cab"):
transform_pp_abc = "c,a,b"
elif it_coordinate_system_code in ("-b1", "-b2", "-b3", "-cba", "1-cba", "2-cba"):
transform_pp_abc = "-c,b,a"
elif it_coordinate_system_code in ("a1", "a2", "a3", "bca", "1bca", "2bca"):
transform_pp_abc = "b,c,a"
elif it_coordinate_system_code in ("-c1", "-c2", "-c3", "a-cb", "1bca", "2a-cb"):
transform_pp_abc = "a,-c,b"
if it_coordinate_system_code is None:
choice = 1
elif "2" in it_coordinate_system_code:
choice = 2
elif "h" in it_coordinate_system_code:
crystal_system = get_crystal_system_by_it_number(it_number)
if crystal_system.startswith("trigonal"):
choice = 2
else: # hexagonal
choice = 1
elif "3" in it_coordinate_system_code:
choice = 3
else:
choice = 1
return transform_pp_abc, choice
def print_long_list(ll):
ls_out, s_line = [], []
max_size = max([len(str(_)) for _ in ll])
length_size = 80
number_per_line = int(length_size // max_size)
_i = Fraction(1, number_per_line)
for _ in ll:
s_line.append(str(_).rjust(max_size))
if _i.denominator == 1:
ls_out.append(", ".join(s_line))
s_line = []
_i += Fraction(1, number_per_line)
ls_out.append(", ".join(s_line))
print("\n".join(ls_out).rstrip())
return
def devide(l_a, b, dev):
if dev is not None:
l_a_o = [a / dev for a in l_a]
b_o = b / dev
else:
l_a_o = [a for a in l_a]
b_o = b
return l_a_o, b_o
def one_line(l_a, b, l_ind_exclude):
l_ind_non_zeros = [i_a for i_a, a in enumerate(l_a) if (not (i_a in l_ind_exclude) & (a != 0))]
l_ind_non_zeros = []
for i_a, a in enumerate(l_a):
flag_1 = not (i_a in l_ind_exclude)
flag_2 = (a != 0)
if (flag_1 & flag_2):
l_ind_non_zeros.append(i_a)
ind_1, dev_1 = None, None
if len(l_ind_non_zeros) != 0:
ind_1 = l_ind_non_zeros[0]
dev_1 = l_a[ind_1]
l_a_o, b_o = devide(l_a, b, dev_1)
return l_a_o, b_o, ind_1, dev_1
def is_solution_a_b(ll_a, l_b):
if all([b == 0 for b in l_b]):
return True
l_ind_exclude = []
l_a_in_1, b_in_1 = ll_a[0], (l_b[0])%1
l_a_in_2, b_in_2 = ll_a[1], (l_b[1])%1
l_a_in_3, b_in_3 = ll_a[2], (l_b[2])%1
l_a_1, b_1, ind_1, dev_1 = one_line(l_a_in_1, b_in_1, l_ind_exclude)
if ind_1 is not None:
val_2 = l_a_in_2[ind_1]
l_a_in_2 = [_1 - val_2 * _2 for _1, _2 in zip(l_a_in_2, l_a_1)]
b_in_2 = (b_in_2 - val_2 * b_1) % 1
val_3 = l_a_in_3[ind_1]
l_a_in_3 = [_1 - val_3 * _2 for _1, _2 in zip(l_a_in_3, l_a_1)]
b_in_3 = (b_in_3 - val_3 * b_1) % 1
l_ind_exclude.append(ind_1)
elif b_in_1 != 0:
return False
l_a_2, b_2, ind_2, dev_2 = one_line(l_a_in_2, b_in_2, l_ind_exclude)
if ind_2 is not None:
val_3 = l_a_in_3[ind_2]
l_a_in_3 = [_1 - val_3 * _2 for _1, _2 in zip(l_a_in_3, l_a_2)]
b_in_3 = (b_in_3 - val_3 * b_2) % 1
l_ind_exclude.append(ind_2)
elif b_in_2 != 0:
return False
l_a_3, b_3, ind_3, dev_3 = one_line(l_a_in_3, b_in_3, l_ind_exclude)
if ind_3 is not None:
l_ind_exclude.append(ind_3)
elif b_in_3 != 0:
return False
return True
def is_good_for_mask(r, b, fract_x, fract_y, fract_z):
b_1 = array([(fract_x - b[0]) % 1, (fract_y - b[1]) % 1, (fract_z - b[2]) % 1], dtype=Fraction)
flag_1 = is_solution_a_b(r, b_1)
return flag_1
# if __name__ == "__main__":
# print("List of functions: ")
# print("List of constants: ")
# def print_parameters_by_it_number_it_coordinate_system_code(it_number: int, it_coordinate_system_code=None):
# bravais_type, laue_class, patterson_name_hm, centring_type, crystal_system = None, None, None, None, None
# name_hm_extended, name_hm_full, name_hm_short = None, None, None
# name_hall, name_schoenflies, point_group_hm = None, None, None
# lattice_type = None
# generators = ()
# symop, pcentr, l_multiplicity, l_letter, l_site_symmetry, l_coord_xyz_2 = get_symop_pcentr_multiplicity_letter_site_symmetry_coords_xyz_2(
# it_number, it_coordinate_system_code)
# crystal_system = get_crystal_system_by_it_number(it_number)
# if it_coordinate_system_code is not None:
# it_c_s_c = it_coordinate_system_code
# else:
# it_c_s_c = get_default_it_coordinate_system_code_by_it_number(it_number)
# name_hm_extended = get_name_hm_extended_by_it_number_it_coordinate_system_code(it_number, it_c_s_c)
# if (name_hm_extended is not None):
# centring_type = get_centring_type_by_name_hm_extended(name_hm_extended)
# if ((centring_type is not None) & (crystal_system is not None)):
# bravais_type = get_bravais_type_by_centring_type_crystal_system(centring_type, crystal_system)
# name_hm_short = get_name_hm_short_by_it_number(it_number)
# if (name_hm_short is not None):
# lattice_type = get_lattice_type_by_name_hm_short(name_hm_short)
# hm_full = get_name_hm_full_by_it_number(it_number)
# name_hall = get_name_hall_by_it_number(it_number)
# if name_hall is not None:
# centrosymmetry = get_centrosymmetry_by_name_hall(name_hall)
# name_schoenflies = get_name_schoenflies_by_it_number(it_number)
# if name_schoenflies is not None:
# laue_class = get_laue_class_by_name_schoenflies(name_schoenflies)
# point_group_hm = get_point_group_hm_short_by_name_schoenflies(name_schoenflies)
# if point_group_hm is not None:
# generators = get_generators_by_point_group_hm(point_group_hm)
# if ((lattice_type is not None) & (laue_class is not None)):
# patterson_name_hm = get_patterson_name_hm_by_lattice_type_laue_class(lattice_type, laue_class)
# print(70 * "-")
# print("SPACE GROUP")
# width_left, width_right = 30, 40
# print(f"IT_number: ".rjust(width_left) + f"{it_number:}".ljust(width_right))
# if name_hm_extended is not None: print(
# "Name H-M extended: ".rjust(width_left) + f"\"{name_hm_extended:}\"".ljust(width_right))
# if name_hm_full is not None: print(
# f"Name H-M full: ".rjust(width_left) + f"\"{name_hm_full:}\"".ljust(width_right))
# if name_hm_short is not None: print(
# f"Name H-M short: ".rjust(width_left) + f"\"{name_hm_short:}\"".ljust(width_right))
# if name_hall is not None: print(f"Name Hall short: ".rjust(width_left) + f"\"{name_hall:}\"".ljust(width_right))
# if name_schoenflies is not None: print(
# f"Name Schoenflies: ".rjust(width_left) + f"\"{name_schoenflies:}\"".ljust(width_right))
# print(f"IT_coordinate_system_code: ".rjust(width_left) + f"\"{it_c_s_c:}\"".ljust(width_right))
# print()
# if point_group_hm is not None: print(
# f"Point group H-M: ".rjust(width_left) + f"\"{point_group_hm:}\"".ljust(width_right))
# if laue_class is not None: print(f"Laue class: ".rjust(width_left) + f"\"{laue_class:}\"".ljust(width_right))
# if patterson_name_hm is not None: print(
# f"Patterson name H-M: ".rjust(width_left) + f"\"{patterson_name_hm:}\"".ljust(width_right))
# if centring_type is not None: print(
# f"Centring type: ".rjust(width_left) + f"\"{centring_type:}\"".ljust(width_right))
# if bravais_type is not None: print(
# f"Bravais type: ".rjust(width_left) + f"\"{bravais_type:}\"".ljust(width_right))
# if crystal_system is not None: print(
# f"Crystal system: ".rjust(width_left) + f"\"{crystal_system:}\"".ljust(width_right))
# print()
# if centrosymmetry is not None: print(
# f"Centrosymmetry: ".rjust(width_left) + f"{'Yes' if centrosymmetry else 'No':}".ljust(width_right))
# if generators != (): print(
# f"Generators: ".rjust(width_left) + ", ".join([f"\"{_}\"" for _ in generators]).ljust(width_right))
# if symop is not None:
# print("Symop: ") # pcentr
# print_long_list([f"\"{_:}\"" for _ in symop])
# print("Multiplicity letter syte_symmetry coord_xyz")
# for _1, _2, _3, _4 in zip(l_multiplicity, l_letter, l_site_symmetry, l_coord_xyz_2):
# print(f"{_1:} {_2:} {_3:} {('(' + '), ('.join(_4) + ')'):}")
# return
# def dialog():
# answ = input(
# "Introduce space group notation (IT_number, H-M, Hall, Schoenflies) \nand it_coordinate_system_code (if it is needed)\n(example: '8:-b2')\n..... ")
# notation, it_coordinate_system_code = separate_notation_it_coordinate_system_code(answ)
# flag_print = True
# if notation in ACCESIBLE_IT_NUMBER:
# it_number = notation
# else:
# res = get_notation(notation)
# if "Hall" == res:
# it_number = get_it_number_by_name_hall(notation)
# elif "Schoenflies" == res:
# it_number = get_it_number_by_name_schoenflies(notation)
# elif "Hermann-Mauguin" == res:
# res_2 = get_type_hm(notation)
# if "extended" in res_2:
# it_number, it_coordinate_system_codes = get_it_number_it_coordinate_system_codes_by_name_hm_extended(
# notation)
# if (not (it_coordinate_system_code in it_coordinate_system_codes)):
# it_coordinate_system_code = auto_choose_it_coordinate_system_code(it_number,
# it_coordinate_system_codes)
# elif "full" in res_2:
# it_number = get_it_number_by_name_hm_full(notation)
# elif "short" in res_2:
# it_number = get_it_number_by_name_hm_short(notation)
# else:
# print(f"Notation \"{notation:}\" is not found")
# flag_print = False
# else:
# print(f"Notation \"{notation:}\" is not found")
# flag_print = False
# if flag_print:
# it_coordinate_system_codes = get_it_coordinate_system_codes_by_it_number(it_number)
# if (not (it_coordinate_system_code in it_coordinate_system_codes)):
# it_coordinate_system_code = auto_choose_it_coordinate_system_code(it_number, it_coordinate_system_codes)
# print_parameters_by_it_number_it_coordinate_system_code(it_number, it_coordinate_system_code)
# return flag_print
# flag_print = True
# while flag_print:
# print(70 * "-")
# print(70 * "-")
# flag_print = dialog()
# """
# print("\nACCESIBLE_BRAVAIS_TYPE: ")
# print_long_list(ACCESIBLE_BRAVAIS_TYPE)
# print("\nACCESIBLE_LAUE_CLASS: ")
# print_long_list(ACCESIBLE_LAUE_CLASS)
# print("\nACCESIBLE_IT_COORDINATE_SYSTEM_CODE: ")
# print_long_list(ACCESIBLE_IT_COORDINATE_SYSTEM_CODE)
# print("\nACCESIBLE_CENTRING_TYPE: ")
# print_long_list(ACCESIBLE_CENTRING_TYPE)
# print("\nACCESIBLE_CRYSTAL_SYSTEM: ")
# print_long_list(ACCESIBLE_CRYSTAL_SYSTEM)
# print("\nACCESIBLE_NAME_HM_SHORT: ")
# print_long_list(ACCESIBLE_NAME_HM_SHORT)
# print("\nACCESIBLE_NAME_HM_FULL")
# print_long_list(ACCESIBLE_NAME_HM_FULL)
# print("\nACCESIBLE_NAME_HM_EXTENDED")
# print_long_list(ACCESIBLE_NAME_HM_EXTENDED)
# print("\nACCESIBLE_NAME_SCHOENFLIES: ")
# print_long_list(ACCESIBLE_NAME_SCHOENFLIES)
# print("\nACCESIBLE_NAME_HALL_SHORT: ")
# print_long_list(ACCESIBLE_NAME_HALL_SHORT)
# print("\nACCESIBLE_REFERENCE_SETTING: ")
# print_long_list(ACCESIBLE_REFERENCE_SETTING)
# print("\nDEFAULT_REFERENCE_TABLE_IT_NUMBER_NAME_HALL_NAME_SCHOENFLIES_NAME_HM_SHORT_REFERENCE_SETTING_IT_COORDINATE_SYSTEM_CODE: ")
# print_long_list(DEFAULT_REFERENCE_TABLE_IT_NUMBER_NAME_HALL_NAME_SCHOENFLIES_NAME_HM_SHORT_REFERENCE_SETTING_IT_COORDINATE_SYSTEM_CODE)
# print("\nREFERENCE_TABLE_ORTHORHOMBIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED: ")
# print_long_list(REFERENCE_TABLE_ORTHORHOMBIC_IT_COORDINATE_SYSTEM_CODE_NAME_HM_EXTENDED)
# print("\nD_CENTRING_TYPE_SHIFT: ")
# print(D_CENTRING_TYPE_SHIFT)
# print("\nD_CRYSTAL_FAMILY_DESCRIPTION: ")
# print(D_CRYSTAL_FAMILY_DESCRIPTION)
# print("\nD_BRAVAIS_TYPE_CELL_CONSTRAINT_MODE_ABC: ")
# print(D_BRAVAIS_TYPE_CELL_CONSTRAINT_MODE_ABC)
# print("\nT_BRAVAIS_TYPE_CENTRING_TYPE_CRYSTAL_SYSTEM: ")
# print_long_list(T_BRAVAIS_TYPE_CENTRING_TYPE_CRYSTAL_SYSTEM)
# """
# FUNCTIONS = [
# transs,
# calc_GCF
# ]
| 51.107057
| 161
| 0.509438
|
7f46e7dc23fddfcaaeab0865926b982ff47f6918
| 25,720
|
py
|
Python
|
libraries/second_processes.py
|
MickyHCorbett/MorfLess
|
9761197d7767c250cc27262e1ab41adf21c59333
|
[
"MIT"
] | null | null | null |
libraries/second_processes.py
|
MickyHCorbett/MorfLess
|
9761197d7767c250cc27262e1ab41adf21c59333
|
[
"MIT"
] | null | null | null |
libraries/second_processes.py
|
MickyHCorbett/MorfLess
|
9761197d7767c250cc27262e1ab41adf21c59333
|
[
"MIT"
] | null | null | null |
# secondary process functions - e.g. creating lists files
from libraries import constants as ct
from libraries import globals as gb
from libraries import schematics as sch
from libraries import html_elements as he
from libraries import string_processes as sp
from libraries import lists as ls
import json
def pcom_process_postlist(postlist_info,postlist,settings,list_meta,fileroot):
postlist_constant = ct.PCOM_NO_ENTRY
processed = False
if postlist_info:
postlist_constant = ''
entry_end = '},'
array_end = ']'
for ind,info in enumerate(postlist_info):
list_end = False
if info['content'] == ct.PCOM_SETTINGS_TYPE_POSTS:
# this section produces an array of post objects
list_of_posts = ls.pcom_create_posts_pages_array(postlist['posts'],'post')
# order most recent first
list_of_posts = ls.pcom_order_postlist_posts(list_of_posts)
# separate sticky posts from non sticky
list_of_posts,list_of_stickies = ls.pcom_find_sticky_posts_by_meta(list_of_posts)
else:
# this section produces a list of postnames
list_of_posts,list_of_stickies = ls.pcom_find_sticky_posts(info['content'],info['manual_sticky'])
if ind == 0:
postlist_constant += 'window._postlist_' + fileroot.replace('-','_') + ' = {' + ct.NL
postlist_constant += ct.T1 + "identifier: '" + sch.PM_POST_LIST_IDENTIFIER + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_element: '" + sch.POSTLIST_PAGINATION + "'," + ct.NL
postlist_constant += ct.T1 + "page_numbers_selected_class: '" + ct.PCOM_PAGE_NUMBERS_CURRENT_CLASS + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_class: '" + ct.PCOM_POSTLIST_PAGINATION_CLASS + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_selector_id: '" + sch.PM_POSTLIST_PAGINATION_SELECTOR_IDENT + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_number_sub: '" + sch.PM_POSTLIST_PAGINATION_NUMBER + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_number_ident: '" + sch.PM_POSTLIST_PAGINATION_IDENT + "'," + ct.NL
postlist_constant += ct.T1 + 'entries: [' + ct.NL
postlist_constant += ct.T1 + '{' + ct.NL
postlist_constant += ct.T2 + 'posts_per_page: ' + info['ppp'] + ',' + ct.NL
postlist_constant += ct.T2 + 'posts: ['+ ct.NL
# add non sticky posts
for ind2,entry in enumerate(list_of_posts):
if info['content'] == ct.PCOM_SETTINGS_TYPE_POSTS:
post = entry
else:
post = ls.pcom_find_post(postlist,entry)
if ind2 == (len(list_of_posts)-1):
list_end = True
if post['postname'] != ct.PCOM_NO_ENTRY:
if list_of_stickies:
entry_html = he.pcom_create_post_list_entry(post,settings,list_meta,list_end,ignore_meta=True)
else:
entry_html = he.pcom_create_post_list_entry(post,settings,list_meta,list_end)
postlist_constant += sp.pcom_add_3tabs_to_content_line(entry_html)
if list_end:
postlist_constant += ct.T2 + '],' + ct.NL
# add non sticky posts
postlist_constant += ct.T2 + 'sticky: ['+ ct.NL
if list_of_stickies:
list_end = False
for ind3,entry in enumerate(list_of_stickies):
if info['content'] == ct.PCOM_SETTINGS_TYPE_POSTS:
post = entry
else:
post = ls.pcom_find_post(postlist,entry)
if ind3 == (len(list_of_stickies)-1):
list_end = True
if post['postname'] != ct.PCOM_NO_ENTRY:
entry_html = he.pcom_create_post_list_entry(post,settings,list_meta,list_end,manual_sticky=True)
postlist_constant += sp.pcom_add_3tabs_to_content_line(entry_html)
if list_end:
postlist_constant += ct.T2 + ']' + ct.NL
else:
postlist_constant += ct.T2 + ']' + ct.NL
if ind == (len(postlist_info)-1):
entry_end = '}'
postlist_constant += ct.T1 + entry_end + ct.NL
processed = True
# close list
postlist_constant += ct.T1 + ']' + ct.NL + '};'
return postlist_constant,processed
# template postlist
def pcom_create_sub_template_backlink(type,settings):
back_link_text = ''
back_link = ''
if settings['template_sub_header_back_link_text'][type] != ct.PCOM_JSON_LOAD_ERROR:
back_link_text = settings['template_sub_header_back_link_text'][type].rstrip().lstrip()
back_link_text = sp.pcom_replace_quotes(back_link_text)
back_link_template = sch.PM_SUB_TEMPLATE_BACK_LINK
back_link_url = "/" + sp.pcom_create_template_fileroot(type,settings) + "/"
if back_link_text:
back_link = '\\' +ct.NL
back_link += back_link_template.replace(sch.PM_POSTLIST_TEMPLATE_BACKLINK_NAME,back_link_text)
back_link = back_link.replace(sch.PM_POSTLIST_TEMPLATE_BACKLINK,back_link_url)
back_link = sp.pcom_add_3tabs_to_content_line(back_link)
return back_link
def pcom_create_sub_template_title(type,settings,sub):
sub_title = ''
back_link = ''
if type != ct.PCOM_SETTINGS_TYPE_POSTS:
if settings['template_sub_header_text'][type] != ct.PCOM_JSON_LOAD_ERROR:
sub_title = settings['template_sub_header_text'][type] + ' ' + sub
back_link = pcom_create_sub_template_backlink(type,settings)
else:
if settings['template_main_header_text'][type] != ct.PCOM_JSON_LOAD_ERROR:
sub_title = settings['template_main_header_text'][type]
return sub_title,back_link
def pcom_determine_post_list_from_type(postlist,archive,settings,list_meta,type,sub):
list_of_posts = []
if type == ct.PCOM_SETTINGS_TYPE_POSTS:
list_of_posts = ls.pcom_create_posts_pages_array(postlist['posts'],'post')
if type == ct.PCOM_SETTINGS_TYPE_CATEGORIES:
list_of_posts = ls.pcom_find_sub_list(postlist['posts'],[],type,sub,'post')
if type == ct.PCOM_SETTINGS_TYPE_AUTHORS:
# get posts and pages
list_of_posts = ls.pcom_find_sub_list(postlist['posts'],list_meta['authors']['authors'],type,sub,'',True)
if type == ct.PCOM_SETTINGS_TYPE_ARCHIVE:
list_of_posts = ls.pcom_find_sub_list_archive(archive,postlist,sub,'post')
return list_of_posts
def pcom_process_template_postlist(postlist,archive,type,settings,list_meta,fileroot,sub=''):
processed = False
postlist_constant = ''
sub_title = ''
back_link = ''
list_of_posts = pcom_determine_post_list_from_type(postlist,archive,settings,list_meta,type,sub)
sub_title,back_link = pcom_create_sub_template_title(type,settings,sub)
# order most recent first
list_of_posts = ls.pcom_order_postlist_posts(list_of_posts)
# separate sticky posts from non sticky
list_of_posts,list_of_stickies = ls.pcom_find_sticky_posts_by_meta(list_of_posts)
postlist_constant += 'window._postlist_' + fileroot.replace('-','_') + ' = {' + ct.NL
postlist_constant += ct.T1 + "identifier: '" + sch.PM_POST_LIST_TEMPLATE_IDENTIFIER + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_element: '" + sch.POSTLIST_PAGINATION + "'," + ct.NL
postlist_constant += ct.T1 + "page_numbers_selected_class: '" + ct.PCOM_PAGE_NUMBERS_CURRENT_CLASS + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_class: '" + ct.PCOM_POSTLIST_PAGINATION_CLASS + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_selector_id: '" + sch.PM_POSTLIST_PAGINATION_SELECTOR_IDENT + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_number_sub: '" + sch.PM_POSTLIST_PAGINATION_NUMBER + "'," + ct.NL
postlist_constant += ct.T1 + "pagination_number_ident: '" + sch.PM_POSTLIST_PAGINATION_IDENT + "'," + ct.NL
postlist_constant += ct.T1 + 'sub_title: "' + sub_title + '",' + ct.NL
postlist_constant += ct.T1 + "back_link: '" + back_link + "'," + ct.NL
postlist_constant += ct.T1 + "header: '" + sch.PM_TEMPLATE_HEADER_FORMAT + "'," + ct.NL
postlist_constant += ct.T1 + 'entries: [' + ct.NL
postlist_constant += ct.T1 + '{' + ct.NL
postlist_constant += ct.T2 + 'posts_per_page: ' + str(settings['posts_per_page']) + ',' + ct.NL
postlist_constant += ct.T2 + 'posts: ['+ ct.NL
if list_of_posts:
list_end = False
for ind2,post in enumerate(list_of_posts):
if ind2 == (len(list_of_posts)-1):
list_end = True
if post['postname'] != ct.PCOM_NO_ENTRY:
entry_html = he.pcom_create_post_list_entry(post,settings,list_meta,list_end)
postlist_constant += sp.pcom_add_3tabs_to_content_line(entry_html)
postlist_constant += ct.T2 + '],' + ct.NL
# add non sticky posts
postlist_constant += ct.T2 + 'sticky: ['+ ct.NL
if list_of_stickies:
list_end = False
for ind3,post in enumerate(list_of_stickies):
if ind3 == (len(list_of_stickies)-1):
list_end = True
if post['postname'] != ct.PCOM_NO_ENTRY:
entry_html = he.pcom_create_post_list_entry(post,settings,list_meta,list_end)
postlist_constant += sp.pcom_add_3tabs_to_content_line(entry_html)
if list_end:
postlist_constant += ct.T2 + ']' + ct.NL
else:
postlist_constant += ct.T2 + ']' + ct.NL
postlist_constant += ct.T1 + '}' + ct.NL
processed = True
# close list
postlist_constant += ct.T1 + ']' + ct.NL + '};'
return postlist_constant,processed
# create main list page of template categories, authors etc
def pcom_process_template_list_info(list,settings,base_url,fileroot):
processed = False
list_constant = ''
sub_title = ''
info = pcom_create_template_info_references(list,base_url,settings)
if base_url == ct.PCOM_SETTINGS_TYPE_CATEGORIES:
if settings['template_main_header_text'][base_url] != ct.PCOM_JSON_LOAD_ERROR:
sub_title = settings['template_main_header_text'][base_url]
if base_url == ct.PCOM_SETTINGS_TYPE_AUTHORS:
if settings['template_main_header_text'][base_url] != ct.PCOM_JSON_LOAD_ERROR:
sub_title = settings['template_main_header_text'][base_url]
list_constant += 'window._postlist_' + fileroot + ' = {' + ct.NL
list_constant += ct.T1 + "identifier: '" + sch.PM_POST_LIST_TEMPLATE_IDENTIFIER + "'," + ct.NL
list_constant += ct.T1 + "pagination_element: '" + sch.POSTLIST_PAGINATION + "'," + ct.NL
list_constant += ct.T1 + "page_numbers_selected_class: '" + ct.PCOM_PAGE_NUMBERS_CURRENT_CLASS + "'," + ct.NL
list_constant += ct.T1 + "pagination_class: '" + ct.PCOM_POSTLIST_PAGINATION_CLASS + "'," + ct.NL
list_constant += ct.T1 + "pagination_selector_id: '" + sch.PM_POSTLIST_PAGINATION_SELECTOR_IDENT + "'," + ct.NL
list_constant += ct.T1 + "pagination_number_sub: '" + sch.PM_POSTLIST_PAGINATION_NUMBER + "'," + ct.NL
list_constant += ct.T1 + "pagination_number_ident: '" + sch.PM_POSTLIST_PAGINATION_IDENT + "'," + ct.NL
list_constant += ct.T1 + "sub_title: '" + sub_title + "'," + ct.NL
list_constant += ct.T1 + "back_link: ''," + ct.NL
list_constant += ct.T1 + "header: '" + sch.PM_TEMPLATE_HEADER_FORMAT + "'," + ct.NL
list_constant += ct.T1 + 'entries: [' + ct.NL
list_constant += ct.T1 + '{' + ct.NL
list_constant += ct.T2 + 'posts_per_page: ' + str(settings['posts_per_page']) + ',' + ct.NL
list_constant += ct.T2 + 'posts: ['+ ct.NL
if list:
list_end = False
for ind2,entry in enumerate(list):
if ind2 == (len(list)-1):
list_end = True
if entry['name'] != ct.PCOM_NO_ENTRY:
entry_html = he.pcom_create_info_list_entry(entry,info[ind2]['url'],settings,list_end)
list_constant += sp.pcom_add_3tabs_to_content_line(entry_html)
list_constant += ct.T2 + '],' + ct.NL
list_constant += ct.T2 + 'sticky: []'+ ct.NL
list_constant += ct.T1 + '}' + ct.NL
processed = True
# close list
list_constant += ct.T1 + ']' + ct.NL + '};'
return list_constant,info,processed
def pcom_process_archive_info(archive,settings,base_url,base_name,fileroot):
processed = False
list_constant = ''
sub_title = ''
list = archive['created']
info = pcom_create_archive_info_references(list,base_url,settings)
if settings['template_main_header_text'][base_url] != ct.PCOM_JSON_LOAD_ERROR:
sub_title = settings['template_main_header_text'][base_url]
list_constant += 'window._postlist_' + fileroot + ' = {' + ct.NL
list_constant += ct.T1 + "identifier: '" + sch.PM_POST_LIST_TEMPLATE_IDENTIFIER + "'," + ct.NL
list_constant += ct.T1 + "pagination_element: '" + sch.POSTLIST_PAGINATION + "'," + ct.NL
list_constant += ct.T1 + "page_numbers_selected_class: '" + ct.PCOM_PAGE_NUMBERS_CURRENT_CLASS + "'," + ct.NL
list_constant += ct.T1 + "pagination_class: '" + ct.PCOM_POSTLIST_PAGINATION_CLASS + "'," + ct.NL
list_constant += ct.T1 + "pagination_selector_id: '" + sch.PM_POSTLIST_PAGINATION_SELECTOR_IDENT + "'," + ct.NL
list_constant += ct.T1 + "pagination_number_sub: '" + sch.PM_POSTLIST_PAGINATION_NUMBER + "'," + ct.NL
list_constant += ct.T1 + "pagination_number_ident: '" + sch.PM_POSTLIST_PAGINATION_IDENT + "'," + ct.NL
list_constant += ct.T1 + "sub_title: '" + sub_title + "'," + ct.NL
list_constant += ct.T1 + "back_link: ''," + ct.NL
list_constant += ct.T1 + "header: '" + sch.PM_TEMPLATE_HEADER_FORMAT + "'," + ct.NL
list_constant += ct.T1 + 'entries: [' + ct.NL
list_constant += ct.T1 + '{' + ct.NL
list_constant += ct.T2 + 'posts_per_page: 9999,' + ct.NL
list_constant += ct.T2 + 'posts: ['+ ct.NL
if list:
list_constant += "'" + ct.JS_ESCAPE + ct.NL
list_constant += ct.T3 + sch.PM_POST_OPEN_ONLY + ct.JS_ESCAPE + ct.NL
for ind2,entry in enumerate(list):
entry_html = he.pcom_create_archive_entry(entry,base_name,settings)
list_constant += sp.pcom_add_3tabs_to_content_line(entry_html)
list_constant += ct.T3 + sch.PM_POST_CLOSE + "'" + ct.NL
list_constant += ct.T2 + '],' + ct.NL
list_constant += ct.T2 + 'sticky: []'+ ct.NL
list_constant += ct.T1 + '}' + ct.NL
processed = True
# close list
list_constant += ct.T1 + ']' + ct.NL + '};'
return list_constant,info,processed
# create category, author refererence dictionary
def pcom_update_template_meta(template_content,info,no_meta=False):
title_meta = ''
desc_meta = ''
js_meta = ''
constant_meta = ''
if template_content:
if not no_meta:
title_meta = ' - ' + info['title']
if info['description']:
desc_meta = ' - ' + info['description'].replace('"','').replace(ct.PCOM_META_IGNORE_QUOTES,'')
js_meta = info['sub_js_root']
constant_meta = info['sub_fileroot']
template_content = template_content.replace(sch.PM_TEMPLATE_TITLE_REPLACEMENT,title_meta)
template_content = template_content.replace(sch.PM_TEMPLATE_DESCRIPTION_REPLACEMENT,desc_meta)
template_content = template_content.replace(sch.PM_TEMPLATE_JS_NAME,js_meta)
template_content = template_content.replace(sch.PM_TEMPLATE_CONSTANT_NAME,constant_meta)
return template_content
def pcom_create_template_info_references(list,base_string,settings):
references = []
for entry in list:
sub_js_constant_root = entry['name'].lower().replace("'","-").replace(' ','-')
full_js_root = base_string.lower() + '-' + sub_js_constant_root
filename = ct.PCOM_POSTLIST_CONSTANT_NAME_BASE + full_js_root + '.js'
sub_fileroot = '_' + entry['name'].lower().replace(' ','_').replace("'","_").replace('-','_')
fileroot = base_string.lower() + sub_fileroot
base_name = sp.pcom_create_template_fileroot(base_string,settings)
url = base_name + '/' + sub_js_constant_root + "/"
s3url = url + 'index.html'
test_html = base_name + '-' + sub_js_constant_root + ".html"
title = sp.pcom_replace_quotes(entry['name'])
info = {'title': entry['name'],
'description': entry['description'],
'sub_js_root': ('-' + sub_js_constant_root),
'full_js_root': full_js_root,
'sub_fileroot': sub_fileroot,
'fileroot': fileroot,
'js_filename': filename,
'test_html': test_html,
'url': ("/" + url),
's3url': s3url,
'js_constant': '',
'template_content':''}
references.append(info)
return references
def pcom_create_archive_info_references(list,base_string,settings):
references = []
for entry in list:
sub_js_constant_root = entry['name']
full_js_root = base_string.lower() + '-' + sub_js_constant_root
filename = ct.PCOM_POSTLIST_CONSTANT_NAME_BASE + full_js_root + '.js'
sub_fileroot = '_' + entry['fileroot']
fileroot = base_string.lower() + sub_fileroot
base_name = sp.pcom_create_template_fileroot(base_string,settings)
url = base_name + '/' + sub_js_constant_root + "/"
s3url = url + 'index.html'
test_html = base_name + '-' + sub_js_constant_root + ".html"
title = entry['name']
info = {'title': entry['name'],
'description': '',
'sub_js_root': ('-' + sub_js_constant_root),
'full_js_root': full_js_root,
'sub_fileroot': sub_fileroot,
'fileroot': fileroot,
'js_filename': filename,
'test_html': test_html,
'url': ("/" + url),
's3url': s3url,
'js_constant': '',
'template_content':''}
references.append(info)
return references
# Pagination
def pcom_process_pagination(postlist,pg_name,fileroot,info):
pagination_constant = ''
processed = False
if info:
# manual refs
if info['next_ref'] or info['prev_ref']:
links,found = ls.pcom_find_manual_pagination(postlist,info['next_ref'],info['prev_ref'])
else:
links,found = ls.pcom_find_post_pagination(postlist,info['postname'],info['type'])
pagination,links_created = he.pcom_create_pagination_link(links)
if found:
pagination_constant = 'window._pagination_' + fileroot.replace('-','_') + ' = {' + ct.NL
pagination_constant += ct.T1 + 'pagination: ' + pagination
pagination_constant += '};'
processed = True
return pagination_constant,processed
# --- PROCESS PAGES sesction
def pcom_process_posts_page(postlist,archive,settings,list_meta,log,template_content):
info_out = {'template_content': '',
's3url': '',
'posts_name': '',
'posts_js_name': '',
'posts_js_constant': '',
'processed': False}
post_type = ct.PCOM_SETTINGS_TYPE_POSTS
posts_js = ct.PCOM_POSTLIST_CONSTANT_NAME_BASE + post_type + '.js'
posts_name = sp.pcom_create_template_fileroot(post_type,settings)
log['template_names'].append("Posts template base name: " + posts_name)
if template_content:
template_content = pcom_update_template_meta(template_content,{},no_meta=True)
# create postlist js
fileroot = post_type
posts_constant,processed = pcom_process_template_postlist(postlist,archive,post_type,settings,list_meta,fileroot)
if processed:
info_out['template_content'] = template_content
info_out['posts_js_name'] = posts_js
info_out['posts_js_constant'] = posts_constant
info_out['posts_name'] = posts_name
info_out['s3url'] = posts_name + "/index.html"
info_out['processed'] = True
return info_out,log
def pcom_process_info_base_pages(info_list,base_type,template_content,postlist,archive,settings,list_meta,log):
info_out = {'template_content': '',
's3url': '',
'base_name': '',
'js_name': '',
'js_constant': '',
'processed': False}
base_sub_info = []
js_name = ct.PCOM_POSTLIST_CONSTANT_NAME_BASE + base_type + '.js'
base_name = sp.pcom_create_template_fileroot(base_type,settings)
log['template_names'].append(base_type + " template base name: " + base_name)
if template_content:
main_content = pcom_update_template_meta(template_content,{},no_meta=True)
# create base info js
if base_type == ct.PCOM_SETTINGS_TYPE_ARCHIVE:
js_constant,base_sub_info,processed = \
pcom_process_archive_info(archive,settings,base_type,base_name,base_type)
else:
js_constant,base_sub_info,processed = \
pcom_process_template_list_info(info_list,settings,base_type,base_type)
if processed:
info_out['template_content'] = main_content
info_out['js_name'] = js_name
info_out['js_constant'] = js_constant
info_out['base_name'] = base_name
info_out['s3url'] = base_name + "/index.html"
info_out['processed'] = True
for ind,info in enumerate(base_sub_info):
sub_content = pcom_update_template_meta(template_content,info)
sub_js_constant,processed = \
pcom_process_template_postlist(postlist,archive,base_type,settings,list_meta,info['fileroot'],sub=info['title'])
base_sub_info[ind]['template_content'] = sub_content
base_sub_info[ind]['js_constant'] = sub_js_constant
return info_out,base_sub_info,log
# search config
def pcom_process_search_config(settings):
list_constant = ''
sub_title = ''
if settings['template_main_header_text'][ct.PCOM_SETTINGS_TYPE_SEARCH] != ct.PCOM_JSON_LOAD_ERROR:
sub_title = settings['template_main_header_text'][ct.PCOM_SETTINGS_TYPE_SEARCH]
list_constant += 'window._search_config = {' + ct.NL
list_constant += ct.T1 + "api: '" + settings['search_api_url'] + "'," + ct.NL
list_constant += ct.T1 + "content_ident: '" + sch.PM_SEARCH_CONTENT_IDENTIFIER + "'," + ct.NL
list_constant += ct.T1 + "header_ident: '" + sch.PM_SEARCH_QUERY_IDENTIFIER + "'," + ct.NL
list_constant += ct.T1 + "pagination_element: '" + sch.POSTLIST_PAGINATION + "'," + ct.NL
list_constant += ct.T1 + "page_numbers_selected_class: '" + ct.PCOM_PAGE_NUMBERS_CURRENT_CLASS + "'," + ct.NL
list_constant += ct.T1 + "pagination_ident: '" + sch.PM_SEARCH_PAGINATION_IDENTIFIER + "'," + ct.NL
list_constant += ct.T1 + "pagination_selector_id: '" + sch.PM_POSTLIST_PAGINATION_SELECTOR_IDENT + "'," + ct.NL
list_constant += ct.T1 + "pagination_number_sub: '" + sch.PM_POSTLIST_PAGINATION_NUMBER + "'," + ct.NL
list_constant += ct.T1 + "pagination_number_ident: '" + sch.PM_POSTLIST_PAGINATION_IDENT + "'," + ct.NL
list_constant += ct.T1 + "sub_title: '" + sub_title + "'," + ct.NL
list_constant += ct.T1 + 'posts_per_page: ' + str(settings['posts_per_page']) + ct.NL
list_constant += '};'
return list_constant
def pcom_create_search_response(search_content,postlist,settings,list_meta):
# json data with js formatting for elements
list_data = {'entries': [], 'sticky': []}
if search_content:
for ind2,entry in enumerate(search_content):
# check post list
post = ls.pcom_find_post(postlist,entry['name'])
if post['postname'] != ct.PCOM_NO_ENTRY:
entry_html = he.pcom_create_search_post_list_entry(post,settings,list_meta,ignore_meta=True)
entry_html = sp.pcom_add_3tabs_to_content_line(entry_html)
# create json compliant data
entry_html = json.dumps(entry_html,indent=4)
list_data['entries'].append(entry_html)
# check template search content
post = ls.pcom_find_template_search_content(settings,entry['name'])
# print(json.dumps(post))
if post['name'] != ct.PCOM_NO_ENTRY:
url = sp.pcom_create_template_search_content_url(entry['name'],settings)
entry_html = he.pcom_create_template_search_list_entry(post,url,settings)
entry_html = sp.pcom_add_3tabs_to_content_line(entry_html)
# create json compliant data
entry_html = json.dumps(entry_html,indent=4)
list_data['entries'].append(entry_html)
return list_data
def pcom_search_content(search_content,search_term):
results = []
if search_term and search_content:
for ind,entry in enumerate(search_content):
search_term = search_term.lower().replace("'",ct.JS_APOS_REPLACE)
search_content[ind]['count'] = entry['content'].lower().count(search_term)
# order
search_content_ordered = sorted(search_content, key=lambda entry: entry['count'],reverse=True)
for entry in search_content_ordered:
entry_name = entry['name'].replace('.content','')
if entry['count'] > 0:
searched = {'name': entry_name, 'count': entry['count']}
results.append(searched)
return results
| 41.550889
| 131
| 0.642729
|
010c53eb6e8d1948dcdcbc8a622e5af5598c5dc9
| 3,011
|
py
|
Python
|
examples/basic_operations/pause_ad.py
|
wfansh/google-ads-python
|
f94228abd210b0f7e69eadea6df7b60404a1e676
|
[
"Apache-2.0"
] | null | null | null |
examples/basic_operations/pause_ad.py
|
wfansh/google-ads-python
|
f94228abd210b0f7e69eadea6df7b60404a1e676
|
[
"Apache-2.0"
] | null | null | null |
examples/basic_operations/pause_ad.py
|
wfansh/google-ads-python
|
f94228abd210b0f7e69eadea6df7b60404a1e676
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example pauses an ad."""
import argparse
import sys
from google.api_core import protobuf_helpers
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, ad_group_id, ad_id):
ad_group_ad_service = client.get_service("AdGroupAdService")
ad_group_ad_operation = client.get_type("AdGroupAdOperation")
ad_group_ad = ad_group_ad_operation.update
ad_group_ad.resource_name = ad_group_ad_service.ad_group_ad_path(
customer_id, ad_group_id, ad_id
)
ad_group_ad.status = client.enums.AdGroupStatusEnum.PAUSED
client.copy_from(
ad_group_ad_operation.update_mask,
protobuf_helpers.field_mask(None, ad_group_ad._pb),
)
ad_group_ad_response = ad_group_ad_service.mutate_ad_group_ads(
customer_id=customer_id, operations=[ad_group_ad_operation]
)
print(
f"Paused ad group ad {ad_group_ad_response.results[0].resource_name}."
)
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v8")
parser = argparse.ArgumentParser(
description=("Pauses an ad in the specified customer's ad group.")
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="The ad group ID."
)
parser.add_argument(
"-i", "--ad_id", type=str, required=True, help="The ad ID."
)
args = parser.parse_args()
try:
main(googleads_client, args.customer_id, args.ad_group_id, args.ad_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f' Error with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| 35.011628
| 79
| 0.696114
|
92a4cd138513303317f477cf2bd344622d260fcd
| 16,471
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20181101/get_virtual_network_gateway_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20181101/get_virtual_network_gateway_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20181101/get_virtual_network_gateway_connection.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayConnectionResult',
'AwaitableGetVirtualNetworkGatewayConnectionResult',
'get_virtual_network_gateway_connection',
]
@pulumi.output_type
class GetVirtualNetworkGatewayConnectionResult:
"""
A common class for general resource information
"""
def __init__(__self__, authorization_key=None, connection_protocol=None, connection_status=None, connection_type=None, egress_bytes_transferred=None, enable_bgp=None, etag=None, express_route_gateway_bypass=None, id=None, ingress_bytes_transferred=None, ipsec_policies=None, local_network_gateway2=None, location=None, name=None, peer=None, provisioning_state=None, resource_guid=None, routing_weight=None, shared_key=None, tags=None, tunnel_connection_status=None, type=None, use_policy_based_traffic_selectors=None, virtual_network_gateway1=None, virtual_network_gateway2=None):
if authorization_key and not isinstance(authorization_key, str):
raise TypeError("Expected argument 'authorization_key' to be a str")
pulumi.set(__self__, "authorization_key", authorization_key)
if connection_protocol and not isinstance(connection_protocol, str):
raise TypeError("Expected argument 'connection_protocol' to be a str")
pulumi.set(__self__, "connection_protocol", connection_protocol)
if connection_status and not isinstance(connection_status, str):
raise TypeError("Expected argument 'connection_status' to be a str")
pulumi.set(__self__, "connection_status", connection_status)
if connection_type and not isinstance(connection_type, str):
raise TypeError("Expected argument 'connection_type' to be a str")
pulumi.set(__self__, "connection_type", connection_type)
if egress_bytes_transferred and not isinstance(egress_bytes_transferred, float):
raise TypeError("Expected argument 'egress_bytes_transferred' to be a float")
pulumi.set(__self__, "egress_bytes_transferred", egress_bytes_transferred)
if enable_bgp and not isinstance(enable_bgp, bool):
raise TypeError("Expected argument 'enable_bgp' to be a bool")
pulumi.set(__self__, "enable_bgp", enable_bgp)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if express_route_gateway_bypass and not isinstance(express_route_gateway_bypass, bool):
raise TypeError("Expected argument 'express_route_gateway_bypass' to be a bool")
pulumi.set(__self__, "express_route_gateway_bypass", express_route_gateway_bypass)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ingress_bytes_transferred and not isinstance(ingress_bytes_transferred, float):
raise TypeError("Expected argument 'ingress_bytes_transferred' to be a float")
pulumi.set(__self__, "ingress_bytes_transferred", ingress_bytes_transferred)
if ipsec_policies and not isinstance(ipsec_policies, list):
raise TypeError("Expected argument 'ipsec_policies' to be a list")
pulumi.set(__self__, "ipsec_policies", ipsec_policies)
if local_network_gateway2 and not isinstance(local_network_gateway2, dict):
raise TypeError("Expected argument 'local_network_gateway2' to be a dict")
pulumi.set(__self__, "local_network_gateway2", local_network_gateway2)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peer and not isinstance(peer, dict):
raise TypeError("Expected argument 'peer' to be a dict")
pulumi.set(__self__, "peer", peer)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if routing_weight and not isinstance(routing_weight, int):
raise TypeError("Expected argument 'routing_weight' to be a int")
pulumi.set(__self__, "routing_weight", routing_weight)
if shared_key and not isinstance(shared_key, str):
raise TypeError("Expected argument 'shared_key' to be a str")
pulumi.set(__self__, "shared_key", shared_key)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tunnel_connection_status and not isinstance(tunnel_connection_status, list):
raise TypeError("Expected argument 'tunnel_connection_status' to be a list")
pulumi.set(__self__, "tunnel_connection_status", tunnel_connection_status)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if use_policy_based_traffic_selectors and not isinstance(use_policy_based_traffic_selectors, bool):
raise TypeError("Expected argument 'use_policy_based_traffic_selectors' to be a bool")
pulumi.set(__self__, "use_policy_based_traffic_selectors", use_policy_based_traffic_selectors)
if virtual_network_gateway1 and not isinstance(virtual_network_gateway1, dict):
raise TypeError("Expected argument 'virtual_network_gateway1' to be a dict")
pulumi.set(__self__, "virtual_network_gateway1", virtual_network_gateway1)
if virtual_network_gateway2 and not isinstance(virtual_network_gateway2, dict):
raise TypeError("Expected argument 'virtual_network_gateway2' to be a dict")
pulumi.set(__self__, "virtual_network_gateway2", virtual_network_gateway2)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[str]:
"""
The authorizationKey.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="connectionProtocol")
def connection_protocol(self) -> Optional[str]:
"""
Connection protocol used for this connection
"""
return pulumi.get(self, "connection_protocol")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> str:
"""
Virtual network Gateway connection status. Possible values are 'Unknown', 'Connecting', 'Connected' and 'NotConnected'.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> str:
"""
Gateway connection type. Possible values are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> float:
"""
The egress bytes transferred in this connection.
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> Optional[bool]:
"""
EnableBgp flag
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteGatewayBypass")
def express_route_gateway_bypass(self) -> Optional[bool]:
"""
Bypass ExpressRoute Gateway for data forwarding
"""
return pulumi.get(self, "express_route_gateway_bypass")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> float:
"""
The ingress bytes transferred in this connection.
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> Optional[Sequence['outputs.IpsecPolicyResponse']]:
"""
The IPSec Policies to be considered by this connection.
"""
return pulumi.get(self, "ipsec_policies")
@property
@pulumi.getter(name="localNetworkGateway2")
def local_network_gateway2(self) -> Optional['outputs.LocalNetworkGatewayResponse']:
"""
The reference to local network gateway resource.
"""
return pulumi.get(self, "local_network_gateway2")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peer(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference to peerings resource.
"""
return pulumi.get(self, "peer")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the VirtualNetworkGatewayConnection resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the VirtualNetworkGatewayConnection resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> Optional[int]:
"""
The routing weight.
"""
return pulumi.get(self, "routing_weight")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[str]:
"""
The IPSec shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tunnelConnectionStatus")
def tunnel_connection_status(self) -> Sequence['outputs.TunnelConnectionHealthResponse']:
"""
Collection of all tunnels' connection health status.
"""
return pulumi.get(self, "tunnel_connection_status")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> Optional[bool]:
"""
Enable policy-based traffic selectors.
"""
return pulumi.get(self, "use_policy_based_traffic_selectors")
@property
@pulumi.getter(name="virtualNetworkGateway1")
def virtual_network_gateway1(self) -> 'outputs.VirtualNetworkGatewayResponse':
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway1")
@property
@pulumi.getter(name="virtualNetworkGateway2")
def virtual_network_gateway2(self) -> Optional['outputs.VirtualNetworkGatewayResponse']:
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway2")
class AwaitableGetVirtualNetworkGatewayConnectionResult(GetVirtualNetworkGatewayConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayConnectionResult(
authorization_key=self.authorization_key,
connection_protocol=self.connection_protocol,
connection_status=self.connection_status,
connection_type=self.connection_type,
egress_bytes_transferred=self.egress_bytes_transferred,
enable_bgp=self.enable_bgp,
etag=self.etag,
express_route_gateway_bypass=self.express_route_gateway_bypass,
id=self.id,
ingress_bytes_transferred=self.ingress_bytes_transferred,
ipsec_policies=self.ipsec_policies,
local_network_gateway2=self.local_network_gateway2,
location=self.location,
name=self.name,
peer=self.peer,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
routing_weight=self.routing_weight,
shared_key=self.shared_key,
tags=self.tags,
tunnel_connection_status=self.tunnel_connection_status,
type=self.type,
use_policy_based_traffic_selectors=self.use_policy_based_traffic_selectors,
virtual_network_gateway1=self.virtual_network_gateway1,
virtual_network_gateway2=self.virtual_network_gateway2)
def get_virtual_network_gateway_connection(resource_group_name: Optional[str] = None,
virtual_network_gateway_connection_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayConnectionResult:
"""
A common class for general resource information
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_connection_name: The name of the virtual network gateway connection.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayConnectionName'] = virtual_network_gateway_connection_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20181101:getVirtualNetworkGatewayConnection', __args__, opts=opts, typ=GetVirtualNetworkGatewayConnectionResult).value
return AwaitableGetVirtualNetworkGatewayConnectionResult(
authorization_key=__ret__.authorization_key,
connection_protocol=__ret__.connection_protocol,
connection_status=__ret__.connection_status,
connection_type=__ret__.connection_type,
egress_bytes_transferred=__ret__.egress_bytes_transferred,
enable_bgp=__ret__.enable_bgp,
etag=__ret__.etag,
express_route_gateway_bypass=__ret__.express_route_gateway_bypass,
id=__ret__.id,
ingress_bytes_transferred=__ret__.ingress_bytes_transferred,
ipsec_policies=__ret__.ipsec_policies,
local_network_gateway2=__ret__.local_network_gateway2,
location=__ret__.location,
name=__ret__.name,
peer=__ret__.peer,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
routing_weight=__ret__.routing_weight,
shared_key=__ret__.shared_key,
tags=__ret__.tags,
tunnel_connection_status=__ret__.tunnel_connection_status,
type=__ret__.type,
use_policy_based_traffic_selectors=__ret__.use_policy_based_traffic_selectors,
virtual_network_gateway1=__ret__.virtual_network_gateway1,
virtual_network_gateway2=__ret__.virtual_network_gateway2)
| 43.459103
| 584
| 0.689697
|
87e616d76cd779507b22c724d0d80c434d8cfb6f
| 4,562
|
py
|
Python
|
var/spack/repos/builtin/packages/bzip2/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/bzip2/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2018-07-06T19:11:46.000Z
|
2018-07-06T19:12:28.000Z
|
var/spack/repos/builtin/packages/bzip2/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bzip2(Package):
"""bzip2 is a freely available, patent free high-quality data
compressor. It typically compresses files to within 10% to 15%
of the best available techniques (the PPM family of statistical
compressors), whilst being around twice as fast at compression
and six times faster at decompression."""
homepage = "https://sourceware.org/bzip2/"
url = "https://sourceware.org/pub/bzip2/bzip2-1.0.8.tar.gz"
# The server is sometimes a bit slow to respond
fetch_options = {'timeout': 60}
version('1.0.8', sha256='ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269', fetch_options=fetch_options)
version('1.0.7', sha256='e768a87c5b1a79511499beb41500bcc4caf203726fff46a6f5f9ad27fe08ab2b', fetch_options=fetch_options)
version('1.0.6', sha256='a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd', fetch_options=fetch_options)
variant('shared', default=True, description='Enables the build of shared libraries.')
depends_on('diffutils', type='build')
# override default implementation
@property
def libs(self):
shared = '+shared' in self.spec
return find_libraries(
'libbz2', root=self.prefix, shared=shared, recursive=True
)
def patch(self):
# bzip2 comes with two separate Makefiles for static and dynamic builds
# Tell both to use Spack's compiler wrapper instead of GCC
filter_file(r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile')
filter_file(
r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile-libbz2_so'
)
# The Makefiles use GCC flags that are incompatible with PGI
if self.compiler.name == 'pgi':
filter_file('-Wall -Winline', '-Minform=inform', 'Makefile')
filter_file('-Wall -Winline', '-Minform=inform', 'Makefile-libbz2_so') # noqa
# Patch the link line to use RPATHs on macOS
if 'darwin' in self.spec.architecture:
v = self.spec.version
v1, v2, v3 = (v.up_to(i) for i in (1, 2, 3))
kwargs = {'ignore_absent': False, 'backup': False, 'string': True}
mf = FileFilter('Makefile-libbz2_so')
mf.filter('$(CC) -shared -Wl,-soname -Wl,libbz2.so.{0} -o libbz2.so.{1} $(OBJS)' # noqa
.format(v2, v3),
'$(CC) -dynamiclib -Wl,-install_name -Wl,@rpath/libbz2.{0}.dylib -current_version {1} -compatibility_version {2} -o libbz2.{3}.dylib $(OBJS)' # noqa
.format(v1, v2, v3, v3),
**kwargs)
mf.filter(
'$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.so.{0}'.format(v3), # noqa
'$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.{0}.dylib'
.format(v3), **kwargs)
mf.filter(
'rm -f libbz2.so.{0}'.format(v2),
'rm -f libbz2.{0}.dylib'.format(v2), **kwargs)
mf.filter(
'ln -s libbz2.so.{0} libbz2.so.{1}'.format(v3, v2),
'ln -s libbz2.{0}.dylib libbz2.{1}.dylib'.format(v3, v2),
**kwargs)
def install(self, spec, prefix):
# Build the dynamic library first
if '+shared' in spec:
make('-f', 'Makefile-libbz2_so')
# Build the static library and everything else
make()
make('install', 'PREFIX={0}'.format(prefix))
if '+shared' in spec:
install('bzip2-shared', join_path(prefix.bin, 'bzip2'))
v1, v2, v3 = (self.spec.version.up_to(i) for i in (1, 2, 3))
if 'darwin' in self.spec.architecture:
lib = 'libbz2.dylib'
lib1, lib2, lib3 = ('libbz2.{0}.dylib'.format(v)
for v in (v1, v2, v3))
else:
lib = 'libbz2.so'
lib1, lib2, lib3 = ('libbz2.so.{0}'.format(v)
for v in (v1, v2, v3))
install(lib3, join_path(prefix.lib, lib3))
with working_dir(prefix.lib):
for l in (lib, lib1, lib2):
symlink(lib3, l)
with working_dir(prefix.bin):
force_remove('bunzip2', 'bzcat')
symlink('bzip2', 'bunzip2')
symlink('bzip2', 'bzcat')
| 42.240741
| 171
| 0.581105
|
cf550d38058014d0a73c53d26fd36d030369d8e3
| 688
|
py
|
Python
|
api/citations/serializers.py
|
fabmiz/osf.io
|
8d86af3f0a6e5388bd5b18383e68e27b65a66247
|
[
"Apache-2.0"
] | 1
|
2015-10-02T18:35:53.000Z
|
2015-10-02T18:35:53.000Z
|
api/citations/serializers.py
|
fabmiz/osf.io
|
8d86af3f0a6e5388bd5b18383e68e27b65a66247
|
[
"Apache-2.0"
] | 18
|
2020-03-24T15:26:02.000Z
|
2022-03-08T21:30:39.000Z
|
api/citations/serializers.py
|
fabmiz/osf.io
|
8d86af3f0a6e5388bd5b18383e68e27b65a66247
|
[
"Apache-2.0"
] | 1
|
2019-07-16T00:14:49.000Z
|
2019-07-16T00:14:49.000Z
|
from rest_framework import serializers as ser
from api.base.serializers import JSONAPISerializer, DateByVersion
class CitationSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'title',
'short_title',
'summary',
'id'
])
id = ser.CharField(source='_id', required=True)
title = ser.CharField(max_length=200)
date_parsed = DateByVersion(read_only=True, help_text='Datetime the csl file was last parsed')
short_title = ser.CharField(max_length=500)
summary = ser.CharField(max_length=200)
def get_absolute_url(self, obj):
return obj.get_absolute_url()
class Meta:
type_ = 'citation-styles'
| 27.52
| 98
| 0.694767
|
92694057f51d90e6481e437275a49d182ee09358
| 1,309
|
py
|
Python
|
platform/mcu/atsamd5x_e5x/atsam_binadder_crc32.py
|
ruoranluomu/AliOS-Things
|
d0f3431bcacac5b61645e9beb231a0a53be8078b
|
[
"Apache-2.0"
] | 4
|
2019-11-22T04:28:29.000Z
|
2021-07-06T10:45:10.000Z
|
platform/mcu/atsamd5x_e5x/atsam_binadder_crc32.py
|
ruoranluomu/AliOS-Things
|
d0f3431bcacac5b61645e9beb231a0a53be8078b
|
[
"Apache-2.0"
] | 1
|
2019-04-02T10:03:10.000Z
|
2019-04-02T10:03:10.000Z
|
platform/mcu/atsamd5x_e5x/atsam_binadder_crc32.py
|
ruoranluomu/AliOS-Things
|
d0f3431bcacac5b61645e9beb231a0a53be8078b
|
[
"Apache-2.0"
] | 6
|
2019-08-30T09:43:03.000Z
|
2021-04-05T04:20:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import datetime
import sys
import argparse
import logging
import os
import binascii
import struct
#
# MAIN top level application entry point
#
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--infile', action='store')
args = parser.parse_args()
if args.debug:
debug_level = logging.DEBUG
else:
debug_level = logging.ERROR
FORMAT = '%(asctime)s :: %(levelname)s :: %(name)s :: %(funcName)s :: %(message)s'
logging.basicConfig(level=debug_level, format=FORMAT)
log = logging.getLogger(__name__)
infile = os.path.abspath(args.infile)
outfile = infile[:-4] + "_crc" + infile[-4:]
log.info("IN file: %s", infile)
log.info("OUT file: %s", outfile)
buf = open(args.infile,'rb').read()
while len(buf) % 4 > 0:
buf += struct.pack('<B', 0xFF)
log.info("Padding 0xFF")
log.info("File Length : %s", len(buf))
crc32 = (binascii.crc32(buf) & 0xFFFFFFFF)
log.info("Computed crc : %s",crc32)
print("Computed CRC 0x{0:08X}".format(crc32))
out = open(outfile,'wb')
out.write(buf)
record = struct.pack('<L',crc32)
out.write(record)
out.close()
| 24.698113
| 86
| 0.628724
|
2a7b76d594e57a7f0bdbc11a71c75c921af67bde
| 12,435
|
py
|
Python
|
nipype/interfaces/mrtrix/tracking.py
|
sebastientourbier/nipype_lts5
|
3b9718d154443574cc6a5d0bbd76ccf7964e6a45
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/mrtrix/tracking.py
|
sebastientourbier/nipype_lts5
|
3b9718d154443574cc6a5d0bbd76ccf7964e6a45
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/mrtrix/tracking.py
|
sebastientourbier/nipype_lts5
|
3b9718d154443574cc6a5d0bbd76ccf7964e6a45
|
[
"BSD-3-Clause"
] | 1
|
2020-02-19T13:47:05.000Z
|
2020-02-19T13:47:05.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File
from nipype.utils.filemanip import split_filename
import os, os.path as op
class Tracks2ProbInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2,
desc='tract file')
template_file = File(exists=True, argstr='-template %s', position=1,
desc='an image file to be used as a template for the output (the output image wil have the same transform and field of view)')
voxel_dims = traits.List(traits.Float, argstr='-vox %s', sep=',', position=2, minlen=3, maxlen=3,
desc='Three comma-separated numbers giving the size of each voxel in mm.')
colour = traits.Bool(argstr='-colour', position=3, desc="add colour to the output image according to the direction of the tracks.")
fraction = traits.Bool(argstr='-fraction', position=3, desc="produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count.")
output_datatype = traits.Enum("nii", "float", "char", "short", "int", "long", "double", argstr='-datatype %s', position=2,
desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"') #, usedefault=True)
resample = traits.Float(argstr='-resample %d', position=3,
units='mm', desc='resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.')
out_filename = File(genfile=True, argstr='%s', position= -1, desc='output data file')
class Tracks2ProbOutputSpec(TraitedSpec):
tract_image = File(exists=True, desc='Output tract count or track density image')
class Tracks2Prob(CommandLine):
"""
Convert a tract file into a map of the fraction of tracks to enter
each voxel - also known as a tract density image (TDI) - in MRtrix's
image format (.mif). This can be viewed using MRview or converted to
Nifti using MRconvert.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> tdi = mrt.Tracks2Prob()
>>> tdi.inputs.in_file = 'dwi_CSD_tracked.tck'
>>> tdi.inputs.colour = True
>>> tdi.run() # doctest: +SKIP
"""
_cmd = 'tracks2prob'
input_spec=Tracks2ProbInputSpec
output_spec=Tracks2ProbOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tract_image'] = op.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '_TDI.mif'
class StreamlineTrackInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the image containing the source data.' \
'The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, ' \
'the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.')
seed_file = File(exists=True, argstr='-seed %s', mandatory=True, desc='seed file')
seed_spec = traits.List(traits.Int, desc='seed specification in voxels and radius (x y z r)',
argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='voxels')
include_file = File(exists=True, argstr='-include %s', mandatory=False, desc='inclusion file')
include_spec = traits.List(traits.Int, desc='inclusion specification in voxels and radius (x y z r)',
argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='voxels')
exclude_file = File(exists=True, argstr='-exclude %s', mandatory=False, desc='exclusion file')
exclude_spec = traits.List(traits.Int, desc='exclusion specification in voxels and radius (x y z r)',
argstr='-exclude %s', minlen=4, maxlen=4, sep=',', units='voxels')
mask_file = File(exists=True, argstr='-mask %s', mandatory=False, desc='mask file. Only tracks within mask.')
mask_spec = traits.List(traits.Int, desc='Mask specification in voxels and radius (x y z r). Tracks will be terminated when they leave the ROI.',
argstr='-mask %s', minlen=4, maxlen=4, sep=',', units='voxels')
gradient_encoding_file = File(exists=True, argstr='-grad %s', mandatory=False,
desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix')
inputmodel = traits.Enum('DT_STREAM', 'DT_PROB','SD_PROB', 'SD_STREAM',
argstr='%s', desc='input model type', usedefault=True, position=-3)
stop = traits.Bool(argstr='-gzip', desc="stop track as soon as it enters any of the include regions.")
do_not_precompute = traits.Bool(argstr='-noprecomputed', desc="Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4.")
unidirectional = traits.Bool(argstr='-unidirectional', desc="Track from the seed point in one direction only (default is to track in both directions).")
no_mask_interpolation = traits.Bool(argstr='-nomaskinterp', desc="Turns off trilinear interpolation of mask images.")
step_size = traits.Float(argstr='-step %s', units='mm',
desc="Set the step size of the algorithm in mm (default is 0.2).")
minimum_radius_of_curvature = traits.Float(argstr='-curvature %s', units='mm',
desc="Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)")
desired_number_of_tracks = traits.Int(argstr='-number %d', desc='Sets the desired number of tracks.' \
'The program will continue to generate tracks until this number of tracks have been selected and written to the output file' \
'(default is 100 for *_STREAM methods, 1000 for *_PROB methods).')
maximum_number_of_tracks = traits.Int(argstr='-maxnum %d', desc='Sets the maximum number of tracks to generate.' \
"The program will not generate more tracks than this number, even if the desired number of tracks hasn't yet been reached" \
'(default is 100 x number).')
minimum_tract_length = traits.Float(argstr='-minlength %s', units='mm',
desc="Sets the minimum length of any track in millimeters (default is 10 mm).")
maximum_tract_length = traits.Float(argstr='-length %s', units='mm',
desc="Sets the maximum length of any track in millimeters (default is 200 mm).")
cutoff_value = traits.Float(argstr='-cutoff %s', units='NA',
desc="Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1).")
initial_cutoff_value = traits.Float(argstr='-initcutoff %s', units='NA',
desc="Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff).")
initial_direction = traits.List(traits.Int, desc='Specify the initial tracking direction as a vector',
argstr='-initdirection %s', minlen=2, maxlen=2, units='voxels')
out_file = File(argstr='%s', position= -1, genfile=True, desc='output data file')
class StreamlineTrackOutputSpec(TraitedSpec):
tracked = File(exists=True, desc='output file containing reconstructed tracts')
class StreamlineTrack(CommandLine):
"""
Performs tractography using one of the following models:
'dt_prob', 'dt_stream', 'sd_prob', 'sd_stream',
Where 'dt' stands for diffusion tensor, 'sd' stands for spherical
deconvolution, and 'prob' stands for probabilistic.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> strack = mrt.StreamlineTrack()
>>> strack.inputs.inputmodel = 'SD_PROB'
>>> strack.inputs.in_file = 'data.Bfloat'
>>> strack.inputs.seed_file = 'seed_mask.nii'
>>> strack.run() # doctest: +SKIP
"""
_cmd = 'streamtrack'
input_spec = StreamlineTrackInputSpec
output_spec = StreamlineTrackOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tracked'] = op.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '_tracked.tck'
class DiffusionTensorStreamlineTrackInputSpec(StreamlineTrackInputSpec):
gradient_encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=-2,
desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix')
class DiffusionTensorStreamlineTrack(StreamlineTrack):
"""
Specialized interface to StreamlineTrack. This interface is used for
streamline tracking from diffusion tensor data, and calls the MRtrix
function 'streamtrack' with the option 'DT_STREAM'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> dtstrack = mrt.DiffusionTensorStreamlineTrack()
>>> dtstrack.inputs.in_file = 'data.Bfloat'
>>> dtstrack.inputs.seed_file = 'seed_mask.nii'
>>> dtstrack.run() # doctest: +SKIP
"""
input_spec = DiffusionTensorStreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "DT_STREAM"
return super(DiffusionTensorStreamlineTrack, self).__init__(command, **inputs)
class ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec(StreamlineTrackInputSpec):
maximum_number_of_trials = traits.Int(argstr='-trials %s', units='mm',
desc="Set the maximum number of sampling trials at each point (only used for probabilistic tracking).")
class ProbabilisticSphericallyDeconvolutedStreamlineTrack(StreamlineTrack):
"""
Performs probabilistic tracking using spherically deconvolved data
Specialized interface to StreamlineTrack. This interface is used for
probabilistic tracking from spherically deconvolved data, and calls
the MRtrix function 'streamtrack' with the option 'SD_PROB'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> sdprobtrack = mrt.ProbabilisticSphericallyDeconvolutedStreamlineTrack()
>>> sdprobtrack.inputs.in_file = 'data.Bfloat'
>>> sdprobtrack.inputs.seed_file = 'seed_mask.nii'
>>> sdprobtrack.run() # doctest: +SKIP
"""
input_spec = ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "SD_PROB"
return super(ProbabilisticSphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs)
class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack):
"""
Performs streamline tracking using spherically deconvolved data
Specialized interface to StreamlineTrack. This interface is used for
streamline tracking from spherically deconvolved data, and calls
the MRtrix function 'streamtrack' with the option 'SD_STREAM'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> sdtrack = mrt.SphericallyDeconvolutedStreamlineTrack()
>>> sdtrack.inputs.in_file = 'data.Bfloat'
>>> sdtrack.inputs.seed_file = 'seed_mask.nii'
>>> sdtrack.run() # doctest: +SKIP
"""
input_spec = StreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "SD_STREAM"
return super(SphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs)
| 52.468354
| 230
| 0.690149
|
5ced2b6ce34a01cbc4cbbd77236cad99a9527e5d
| 8,032
|
py
|
Python
|
sepa_sctinst/sct_inst_interbank.py
|
lquastana/sepa-sctinst
|
066458b8a58712a564520829b0f27caf1397f4ea
|
[
"Apache-2.0"
] | 2
|
2021-08-22T03:20:13.000Z
|
2022-03-24T00:07:20.000Z
|
sepa_sctinst/sct_inst_interbank.py
|
lquastana/sepa-sctinst
|
066458b8a58712a564520829b0f27caf1397f4ea
|
[
"Apache-2.0"
] | 13
|
2021-08-22T03:03:08.000Z
|
2021-09-01T21:29:16.000Z
|
sepa_sctinst/sct_inst_interbank.py
|
lquastana/sepa-sctinst
|
066458b8a58712a564520829b0f27caf1397f4ea
|
[
"Apache-2.0"
] | null | null | null |
from sepa_sctinst.participant import Participant
import xml.etree.ElementTree as ET
from datetime import datetime,date
import random
from faker import Faker
SERVICE_LEVEL_CODE = 'SEPA'
LOCAL_INSTRUMENT = 'INST'
CHARGE_BEARER='SLEV'
CURRENCY='EUR'
class GroupHeader:
"""A class to represent the group header in interbank SCTInst message
Set of characteristics shared by all individual transactions included in the message.
"""
message_identification:str
"""Message Identification assigned by the
instructing party, and sent to the next party in the
chain to unambiguously identify the message."""
creation_datetime:datetime
"""Date and time at which the message was created."""
interbank_sttlmt_date:date
"""Date on which the amount of money ceases to be
available to the agent that owes it and when the
amount of money becomes available to the agent
to which it is due."""
sttlmt_method:str
"""Method used to settle the (batch of) payment
instructions.Only CLRG, INGA and INDA are allowed"""
def __init__(self,
message_identification:str,
creation_datetime:datetime,
interbank_sttlmt_date:date,
sttlmt_method:str
):
"""Initializes a group header object
"""
self.message_identification = message_identification
self.creation_datetime = creation_datetime
self.interbank_sttlmt_date = interbank_sttlmt_date
self.sttlmt_method = sttlmt_method
class Transaction:
"""A class to represent a transaction in interbank SCTInst message
"""
beneficiary:Participant
"""Beneficiary informations as :class:`sepa_sctinst.participant.Participant`"""
amount:float
"""The amount of the SCT Inst in Euro """
end_to_end_id:str
"""Original End To End Identification. Unique identification, as assigned by the original
initiating party """
tx_id:str
"""Original Transaction Identification. Unique identification, as assigned by the original
first instructing agent """
acceptance_datetime:datetime
"""Point in time when the payment order from the
initiating party meets the processing conditions of
the account servicing agent."""
reference:str
"""Reference information provided by the creditor to
allow the identification of the underlying
documents."""
remittance_information:str
"""Remittance information"""
def __init__(self,beneficiary:Participant,amount:float,end_to_end_id:str,tx_id:str,acceptance_datetime:datetime,reference:str,remittance_information:str):
"""Initializes a transaction object
"""
self.beneficiary = beneficiary
self.amount = amount
self.tx_id = tx_id
self.end_to_end_id = end_to_end_id
self.acceptance_datetime = acceptance_datetime
self.reference = reference
self.remittance_information = remittance_information,
class SCTInst:
"""A class to represent a SCTInst interbank message
"""
group_header:GroupHeader
""":class:`sepa_sctinst.sct_inst.GroupHeader` object shared by all individual transactions included in the message. """
originator:Participant
"""Originator :class:`sepa_sctinst.participant.Participant` object that initiates the payment. """
transaction:Transaction
""":class:`sepa_sctinst.sct_inst.Transaction` object give information about the transaction. """
def __init__(self,group_header:GroupHeader,originator:Participant,transaction:Transaction):
"""Initializes a SCTInst object
"""
self.group_header = group_header
self.originator = originator
self.transaction = transaction
@staticmethod
def random():
"""Generate random SCTInst object
Returns :class:`sepa_sctinst.sct_inst_interbank.SCTInst` object with random value
"""
fake = Faker()
group_header = GroupHeader(fake.bothify(text='MSGID?????????'),fake.date_time(),fake.date_object(),'CLRG')
originator = Participant(fake.lexify(text='????',letters='ABCDEFGRHIJKL') + fake.bank_country() + 'PPXXX',
fake.iban(),fake.name())
beneficiary = Participant(fake.lexify(text='????',letters='ABCDEFGRHIJKL') + fake.bank_country() + 'PPXXX',
fake.iban(),fake.name())
transation = Transaction(beneficiary,
str(round(random.uniform(1,2), 2)),
fake.bothify(text='ENDTOEND?????????'),
fake.bothify(text='TXID?????????'),
fake.date_time(),
fake.bothify(text='REF?????????'),
fake.bothify(text='REMINF?????????'))
return SCTInst(group_header,originator,transation)
def to_xml(self):
""" Generate message as XML Document
Returns a string as XML dcoument
"""
root = ET.Element("Document")
root.set('xmlns',"urn:iso:std:iso:20022:tech:xsd:pacs.008.001.02")
root_fito = ET.SubElement(root, "FIToFICstmrCdtTrf")
self.xml_header(root_fito)
self.xml_transaction(root_fito)
ET.ElementTree(root)
return ET.tostring(root,encoding='utf-8',xml_declaration=True).decode('utf-8')
def xml_transaction(self, root_fito):
cdt_tx = ET.SubElement(root_fito, "CdtTrfTxInf")
cdt_tx_pmt = ET.SubElement(cdt_tx, "PmtId")
cdt_tx_pmt_e2e = ET.SubElement(cdt_tx_pmt, "EndToEndId")
cdt_tx_pmt_e2e.text = self.transaction.end_to_end_id
cdt_tx_pmt_id = ET.SubElement(cdt_tx_pmt, "TxId")
cdt_tx_pmt_id.text = self.transaction.tx_id
cdt_tx_pmt_amt = ET.SubElement(cdt_tx, "IntrBkSttlmAmt")
cdt_tx_pmt_amt.set('Ccy',CURRENCY)
cdt_tx_pmt_amt.text = str(self.transaction.amount)
cdt_tx_pmt_acceptance_datetime = ET.SubElement(cdt_tx, "AccptncDtTm")
cdt_tx_pmt_acceptance_datetime.text = self.transaction.acceptance_datetime.isoformat()
cdt_tx_pmt_chrbr = ET.SubElement(cdt_tx, "ChrgBr")
cdt_tx_pmt_chrbr.text = CHARGE_BEARER
Participant.to_xml(self,cdt_tx,self.transaction,'Dbtr')
Participant.to_xml(self,cdt_tx,self.transaction,'Cdtr')
def xml_header(self, root_fito):
grp_header = ET.SubElement(root_fito, "GrpHdr")
header_id = ET.SubElement(grp_header, "MsgId")
header_id.text = str(self.group_header.message_identification)
header_cre_dt_tm = ET.SubElement(grp_header, "CreDtTm")
header_cre_dt_tm.text = self.group_header.creation_datetime.isoformat()
header_nb_txs = ET.SubElement(grp_header, "NbOfTxs")
header_nb_txs.text = '1'
header_tt_amount = ET.SubElement(grp_header, "TtlIntrBkSttlmAmt")
header_tt_amount.set('Ccy','EUR')
header_tt_amount.text = str(self.transaction.amount)
header_sttlm_dt = ET.SubElement(grp_header, "IntrBkSttlmDt")
header_sttlm_dt.text = self.group_header.interbank_sttlmt_date.isoformat()
header_sttlm = ET.SubElement(grp_header, "SttlmInf")
header_sttlm_mdt = ET.SubElement(header_sttlm, "SttlmMtd")
header_sttlm_mdt.text = self.group_header.sttlmt_method
header_pmt_tp = ET.SubElement(grp_header, "PmtTpInf")
header_pmt_tp_svc = ET.SubElement(header_pmt_tp, "SvcLvl")
header_pmt_tp_svc_cd = ET.SubElement(header_pmt_tp_svc, "Cd")
header_pmt_tp_svc_cd.text = SERVICE_LEVEL_CODE
header_pmt_tp_lcl_inst = ET.SubElement(header_pmt_tp, "LclInstrm")
header_pmt_tp_lcl_inst_cd = ET.SubElement(header_pmt_tp_lcl_inst, "Cd")
header_pmt_tp_lcl_inst_cd.text = LOCAL_INSTRUMENT
| 39.762376
| 158
| 0.661604
|
abd70c06800f8774a0c530cf34c9929fb3e09dd9
| 1,348
|
py
|
Python
|
wxAnimation/decoders/flif_animation_decoder/filebuf.py
|
kdschlosser/wxAnimation
|
ad472719a77a081da5e51280d469cfd5d5bfcd3c
|
[
"MIT"
] | 2
|
2020-03-23T11:29:56.000Z
|
2021-11-24T22:10:07.000Z
|
wxAnimation/decoders/flif_animation_decoder/filebuf.py
|
kdschlosser/wxAnimation
|
ad472719a77a081da5e51280d469cfd5d5bfcd3c
|
[
"MIT"
] | null | null | null |
wxAnimation/decoders/flif_animation_decoder/filebuf.py
|
kdschlosser/wxAnimation
|
ad472719a77a081da5e51280d469cfd5d5bfcd3c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Helper for getting buffers from file objects
from __future__ import unicode_literals, division
import mmap
class FileBuffer(object):
def __init__(self, fileobj):
self.file = fileobj
try:
self.fileno = self.file.fileno()
except OSError:
self.fileno = -1
if self.fileno != -1:# and self.fd.seekable(): # Python 2.x doesn't have seekable()
# size
self.file.seek(0, 2)
self.size = self.file.tell()
self.buffer = mmap.mmap(self.fileno, self.size, access=mmap.ACCESS_READ)
self.type = "mmap"
elif hasattr(self.file, "getbuffer"): # BytesIO
self.buffer = self.file.getbuffer()
self.size = len(self.buffer)
self.type = "buffer"
else:
self.buffer = self.file.read()
self.size = len(self.buffer)
self.type = "bytes"
def close(self):
if self.type == "mmap":
self.file.close()
elif self.type == "bytes":
del self.buffer
elif self.type == "buffer":
self.buffer = None
else:
raise RuntimeError("Unknown FileBuffer type %s" % self.type)
def __enter__(self):
return self
def __exit__(self, t, e, tb):
self.close()
| 28.083333
| 91
| 0.551187
|
9544e1dfc575953cbbbbe83a3df8ac301df21467
| 1,381
|
py
|
Python
|
src/OCR/GeneratedImages/DataModule.py
|
tsteffek/LicensePlateReconstructor
|
4930a080fbdf6e7d726e5282b2d75650566fd5d4
|
[
"MIT"
] | 2
|
2020-12-21T02:02:13.000Z
|
2021-11-09T06:25:36.000Z
|
src/OCR/GeneratedImages/DataModule.py
|
tsteffek/LicensePlateReconstructor
|
4930a080fbdf6e7d726e5282b2d75650566fd5d4
|
[
"MIT"
] | 1
|
2021-11-09T06:25:36.000Z
|
2021-11-18T08:35:35.000Z
|
src/OCR/GeneratedImages/DataModule.py
|
tsteffek/LicensePlateReconstructor
|
4930a080fbdf6e7d726e5282b2d75650566fd5d4
|
[
"MIT"
] | null | null | null |
import os
from typing import Tuple, Union
import torch
from src.OCR.GeneratedImages.model.Image import TypedImageWithText
from src.base import IO
from src.base.data import ImagesDataModule, ImageDataset
class GeneratedImagesDataModule(ImagesDataModule):
def __init__(
self,
path: str,
batch_size: int,
multi_core: bool = True,
cuda: bool = torch.cuda.is_available(),
shuffle: bool = True,
precision: int = 32,
image_file_glob: str = '**/*.jpg',
target_size: Union[float, Tuple[int, int]] = None,
language_file: str = 'languages.json',
**kwargs
):
chars, self.languages, noise = IO.load_languages_file(path, language_file)
super().__init__(path, batch_size, chars, multi_core, cuda, shuffle, precision, image_file_glob, noise,
target_size, **kwargs)
def _make_dataset(self, stage):
return ImageDataset(
path=self.path,
load_fn=self.load_fn,
encode_fn=self.vocab.encode_text,
image_file_glob=os.path.join(stage, self.image_file_glob),
precision=self.precision,
target_size=self.target_size
)
def load_fn(self, path) -> TypedImageWithText:
return TypedImageWithText.load(path, self.languages)
| 33.682927
| 111
| 0.623461
|
19f1ac84e443ad3bbbb9d28e90c84c39a3b73ed1
| 320
|
py
|
Python
|
App/ocr_dispatcher/apps.py
|
JulesVautier/rabbitmq-ocr
|
5e5e30145fc4420be690ce1242ddda54d74ee1f7
|
[
"MIT"
] | null | null | null |
App/ocr_dispatcher/apps.py
|
JulesVautier/rabbitmq-ocr
|
5e5e30145fc4420be690ce1242ddda54d74ee1f7
|
[
"MIT"
] | null | null | null |
App/ocr_dispatcher/apps.py
|
JulesVautier/rabbitmq-ocr
|
5e5e30145fc4420be690ce1242ddda54d74ee1f7
|
[
"MIT"
] | null | null | null |
import sys
import django
from django.apps import AppConfig
class OcrDispatcherConfig(AppConfig):
name = 'ocr_dispatcher'
def ready(self):
if 'runserver' not in sys.argv:
return True
from .rpc_listener import ListenerRpc
listener = ListenerRpc()
listener.start()
| 17.777778
| 45
| 0.659375
|
8f572b34a79d7ecf040da9b011dc10acab805e4a
| 1,574
|
py
|
Python
|
dojo/management/commands/fix_0120.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,772
|
2018-01-22T23:32:15.000Z
|
2022-03-31T14:49:33.000Z
|
dojo/management/commands/fix_0120.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 3,461
|
2018-01-20T19:12:28.000Z
|
2022-03-31T17:14:39.000Z
|
dojo/management/commands/fix_0120.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,173
|
2018-01-23T07:10:23.000Z
|
2022-03-31T14:40:43.000Z
|
from django.core.management.base import BaseCommand
from dojo.models import Test
from django.db.migrations.executor import MigrationExecutor
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.utils import OperationalError
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Usage: manage.py fix_0120'
def handle(self, *args, **options):
connection = connections[DEFAULT_DB_ALIAS]
connection.prepare_database()
executor = MigrationExecutor(connection)
if not (executor.migration_plan([('dojo', '0119_default_group_is_staff')])):
# this means that '0119_default_group_is_staff' was last successful migration
logger.warning('This command will remove field "sonarqube_config" in model "Test" to be able to finish migration 0120_sonarqube_test_and_clean')
try:
with connection.schema_editor() as schema_editor:
schema_editor.remove_field(
model=Test,
field=Test._meta.get_field('sonarqube_config'),
)
except OperationalError:
# We expact exception like:
# django.db.utils.OperationalError: (1091, "Can't DROP 'sonarqube_config_id'; check that column/key exists")
logger.info('There was nothing to fix')
else:
logger.info('Database fixed')
else:
logger.error('Only migrations stacked in front of 0120 can be fixed by this command')
| 42.540541
| 156
| 0.658831
|
78eaa4120a5f09a04e02d685c5e740150a8353f5
| 36,909
|
py
|
Python
|
Collections-a-installer/community-general-2.4.0/plugins/modules/vdo.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/vdo.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 1
|
2022-03-12T02:25:26.000Z
|
2022-03-12T02:25:26.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/vdo.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
#!/usr/bin/python
# Copyright: (c) 2018, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author:
- Bryan Gurney (@bgurney-rh)
module: vdo
short_description: Module to control VDO
description:
- This module controls the VDO dedupe and compression device.
- VDO, or Virtual Data Optimizer, is a device-mapper target that
provides inline block-level deduplication, compression, and
thin provisioning capabilities to primary storage.
options:
name:
description:
- The name of the VDO volume.
type: str
required: true
state:
description:
- Whether this VDO volume should be "present" or "absent".
If a "present" VDO volume does not exist, it will be
created. If a "present" VDO volume already exists, it
will be modified, by updating the configuration, which
will take effect when the VDO volume is restarted.
Not all parameters of an existing VDO volume can be
modified; the "statusparamkeys" list contains the
parameters that can be modified after creation. If an
"absent" VDO volume does not exist, it will not be
removed.
type: str
choices: [ absent, present ]
default: present
activated:
description:
- The "activate" status for a VDO volume. If this is set
to "no", the VDO volume cannot be started, and it will
not start on system startup. However, on initial
creation, a VDO volume with "activated" set to "off"
will be running, until stopped. This is the default
behavior of the "vdo create" command; it provides the
user an opportunity to write a base amount of metadata
(filesystem, LVM headers, etc.) to the VDO volume prior
to stopping the volume, and leaving it deactivated
until ready to use.
type: bool
running:
description:
- Whether this VDO volume is running.
- A VDO volume must be activated in order to be started.
type: bool
device:
description:
- The full path of the device to use for VDO storage.
- This is required if "state" is "present".
type: str
logicalsize:
description:
- The logical size of the VDO volume (in megabytes, or
LVM suffix format). If not specified for a new volume,
this defaults to the same size as the underlying storage
device, which is specified in the 'device' parameter.
Existing volumes will maintain their size if the
logicalsize parameter is not specified, or is smaller
than or identical to the current size. If the specified
size is larger than the current size, a growlogical
operation will be performed.
type: str
deduplication:
description:
- Configures whether deduplication is enabled. The
default for a created volume is 'enabled'. Existing
volumes will maintain their previously configured
setting unless a different value is specified in the
playbook.
type: str
choices: [ disabled, enabled ]
compression:
description:
- Configures whether compression is enabled. The default
for a created volume is 'enabled'. Existing volumes
will maintain their previously configured setting unless
a different value is specified in the playbook.
type: str
choices: [ disabled, enabled ]
blockmapcachesize:
description:
- The amount of memory allocated for caching block map
pages, in megabytes (or may be issued with an LVM-style
suffix of K, M, G, or T). The default (and minimum)
value is 128M. The value specifies the size of the
cache; there is a 15% memory usage overhead. Each 1.25G
of block map covers 1T of logical blocks, therefore a
small amount of block map cache memory can cache a
significantly large amount of block map data. Existing
volumes will maintain their previously configured
setting unless a different value is specified in the
playbook.
type: str
readcache:
description:
- Enables or disables the read cache. The default is
'disabled'. Choosing 'enabled' enables a read cache
which may improve performance for workloads of high
deduplication, read workloads with a high level of
compression, or on hard disk storage. Existing
volumes will maintain their previously configured
setting unless a different value is specified in the
playbook.
- The read cache feature is available in VDO 6.1 and older.
type: str
choices: [ disabled, enabled ]
readcachesize:
description:
- Specifies the extra VDO device read cache size in
megabytes. This is in addition to a system-defined
minimum. Using a value with a suffix of K, M, G, or T
is optional. The default value is 0. 1.125 MB of
memory per bio thread will be used per 1 MB of read
cache specified (for example, a VDO volume configured
with 4 bio threads will have a read cache memory usage
overhead of 4.5 MB per 1 MB of read cache specified).
Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
- The read cache feature is available in VDO 6.1 and older.
type: str
emulate512:
description:
- Enables 512-byte emulation mode, allowing drivers or
filesystems to access the VDO volume at 512-byte
granularity, instead of the default 4096-byte granularity.
Default is 'disabled'; only recommended when a driver
or filesystem requires 512-byte sector level access to
a device. This option is only available when creating
a new volume, and cannot be changed for an existing
volume.
type: bool
default: false
growphysical:
description:
- Specifies whether to attempt to execute a growphysical
operation, if there is enough unused space on the
device. A growphysical operation will be executed if
there is at least 64 GB of free space, relative to the
previous physical size of the affected VDO volume.
type: bool
default: false
slabsize:
description:
- The size of the increment by which the physical size of
a VDO volume is grown, in megabytes (or may be issued
with an LVM-style suffix of K, M, G, or T). Must be a
power of two between 128M and 32G. The default is 2G,
which supports volumes having a physical size up to 16T.
The maximum, 32G, supports a physical size of up to 256T.
This option is only available when creating a new
volume, and cannot be changed for an existing volume.
type: str
writepolicy:
description:
- Specifies the write policy of the VDO volume. The
'sync' mode acknowledges writes only after data is on
stable storage. The 'async' mode acknowledges writes
when data has been cached for writing to stable
storage. The default (and highly recommended) 'auto'
mode checks the storage device to determine whether it
supports flushes. Devices that support flushes will
result in a VDO volume in 'async' mode, while devices
that do not support flushes will run in sync mode.
Existing volumes will maintain their previously
configured setting unless a different value is
specified in the playbook.
type: str
choices: [ async, auto, sync ]
indexmem:
description:
- Specifies the amount of index memory in gigabytes. The
default is 0.25. The special decimal values 0.25, 0.5,
and 0.75 can be used, as can any positive integer.
This option is only available when creating a new
volume, and cannot be changed for an existing volume.
type: str
indexmode:
description:
- Specifies the index mode of the Albireo index. The
default is 'dense', which has a deduplication window of
1 GB of index memory per 1 TB of incoming data,
requiring 10 GB of index data on persistent storage.
The 'sparse' mode has a deduplication window of 1 GB of
index memory per 10 TB of incoming data, but requires
100 GB of index data on persistent storage. This option
is only available when creating a new volume, and cannot
be changed for an existing volume.
type: str
choices: [ dense, sparse ]
ackthreads:
description:
- Specifies the number of threads to use for
acknowledging completion of requested VDO I/O operations.
Valid values are integer values from 1 to 100 (lower
numbers are preferable due to overhead). The default is
1. Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
biothreads:
description:
- Specifies the number of threads to use for submitting I/O
operations to the storage device. Valid values are
integer values from 1 to 100 (lower numbers are
preferable due to overhead). The default is 4.
Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
cputhreads:
description:
- Specifies the number of threads to use for CPU-intensive
work such as hashing or compression. Valid values are
integer values from 1 to 100 (lower numbers are
preferable due to overhead). The default is 2.
Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
logicalthreads:
description:
- Specifies the number of threads across which to
subdivide parts of the VDO processing based on logical
block addresses. Valid values are integer values from
1 to 100 (lower numbers are preferable due to overhead).
The default is 1. Existing volumes will maintain their
previously configured setting unless a different value
is specified in the playbook.
type: str
physicalthreads:
description:
- Specifies the number of threads across which to
subdivide parts of the VDO processing based on physical
block addresses. Valid values are integer values from
1 to 16 (lower numbers are preferable due to overhead).
The physical space used by the VDO volume must be
larger than (slabsize * physicalthreads). The default
is 1. Existing volumes will maintain their previously
configured setting unless a different value is specified
in the playbook.
type: str
notes:
- In general, the default thread configuration should be used.
requirements:
- PyYAML
- kmod-kvdo
- vdo
'''
EXAMPLES = r'''
- name: Create 2 TB VDO volume vdo1 on device /dev/md0
community.general.vdo:
name: vdo1
state: present
device: /dev/md0
logicalsize: 2T
- name: Remove VDO volume vdo1
community.general.vdo:
name: vdo1
state: absent
'''
RETURN = r'''# '''
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
import re
import traceback
YAML_IMP_ERR = None
try:
import yaml
HAS_YAML = True
except ImportError:
YAML_IMP_ERR = traceback.format_exc()
HAS_YAML = False
# Generate a list of VDO volumes, whether they are running or stopped.
#
# @param module The AnsibleModule object.
# @param vdocmd The path of the 'vdo' command.
#
# @return vdolist A list of currently created VDO volumes.
def inventory_vdos(module, vdocmd):
rc, vdostatusout, err = module.run_command("%s status" % (vdocmd))
# if rc != 0:
# module.fail_json(msg="Inventorying VDOs failed: %s"
# % vdostatusout, rc=rc, err=err)
vdolist = []
if (rc == 2 and
re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)):
# If there is no /etc/vdoconf.yml file, assume there are no
# VDO volumes. Return an empty list of VDO volumes.
return vdolist
if rc != 0:
module.fail_json(msg="Inventorying VDOs failed: %s"
% vdostatusout, rc=rc, err=err)
vdostatusyaml = yaml.load(vdostatusout)
if vdostatusyaml is None:
return vdolist
vdoyamls = vdostatusyaml['VDOs']
if vdoyamls is not None:
vdolist = vdoyamls.keys()
return vdolist
def list_running_vdos(module, vdocmd):
rc, vdolistout, err = module.run_command("%s list" % (vdocmd))
runningvdolist = filter(None, vdolistout.split('\n'))
return runningvdolist
# Generate a string containing options to pass to the 'VDO' command.
# Note that a 'create' operation will pass more options than a
# 'modify' operation.
#
# @param params A dictionary of parameters, and their values
# (values of 'None' and/or nonexistent values are ignored).
#
# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
def start_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname))
if rc == 0:
module.log("started VDO volume %s" % vdoname)
return rc
def stop_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname))
if rc == 0:
module.log("stopped VDO volume %s" % vdoname)
return rc
def activate_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s activate --name=%s"
% (vdocmd, vdoname))
if rc == 0:
module.log("activated VDO volume %s" % vdoname)
return rc
def deactivate_vdo(module, vdoname, vdocmd):
rc, out, err = module.run_command("%s deactivate --name=%s"
% (vdocmd, vdoname))
if rc == 0:
module.log("deactivated VDO volume %s" % vdoname)
return rc
def add_vdooptions(params):
vdocmdoptions = ""
options = []
if ('logicalsize' in params) and (params['logicalsize'] is not None):
options.append("--vdoLogicalSize=" + params['logicalsize'])
if (('blockmapcachesize' in params) and
(params['blockmapcachesize'] is not None)):
options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
if ('readcache' in params) and (params['readcache'] == 'enabled'):
options.append("--readCache=enabled")
if ('readcachesize' in params) and (params['readcachesize'] is not None):
options.append("--readCacheSize=" + params['readcachesize'])
if ('slabsize' in params) and (params['slabsize'] is not None):
options.append("--vdoSlabSize=" + params['slabsize'])
if ('emulate512' in params) and (params['emulate512']):
options.append("--emulate512=enabled")
if ('indexmem' in params) and (params['indexmem'] is not None):
options.append("--indexMem=" + params['indexmem'])
if ('indexmode' in params) and (params['indexmode'] == 'sparse'):
options.append("--sparseIndex=enabled")
# Entering an invalid thread config results in a cryptic
# 'Could not set up device mapper for %s' error from the 'vdo'
# command execution. The dmsetup module on the system will
# output a more helpful message, but one would have to log
# onto that system to read the error. For now, heed the thread
# limit warnings in the DOCUMENTATION section above.
if ('ackthreads' in params) and (params['ackthreads'] is not None):
options.append("--vdoAckThreads=" + params['ackthreads'])
if ('biothreads' in params) and (params['biothreads'] is not None):
options.append("--vdoBioThreads=" + params['biothreads'])
if ('cputhreads' in params) and (params['cputhreads'] is not None):
options.append("--vdoCpuThreads=" + params['cputhreads'])
if ('logicalthreads' in params) and (params['logicalthreads'] is not None):
options.append("--vdoLogicalThreads=" + params['logicalthreads'])
if (('physicalthreads' in params) and
(params['physicalthreads'] is not None)):
options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
vdocmdoptions = ' '.join(options)
return vdocmdoptions
def run_module():
# Define the available arguments/parameters that a user can pass to
# the module.
# Defaults for VDO parameters are None, in order to facilitate
# the detection of parameters passed from the playbook.
# Creation param defaults are determined by the creation section.
module_args = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
activated=dict(type='bool'),
running=dict(type='bool'),
growphysical=dict(type='bool', default=False),
device=dict(type='str'),
logicalsize=dict(type='str'),
deduplication=dict(type='str', choices=['disabled', 'enabled']),
compression=dict(type='str', choices=['disabled', 'enabled']),
blockmapcachesize=dict(type='str'),
readcache=dict(type='str', choices=['disabled', 'enabled']),
readcachesize=dict(type='str'),
emulate512=dict(type='bool', default=False),
slabsize=dict(type='str'),
writepolicy=dict(type='str', choices=['async', 'auto', 'sync']),
indexmem=dict(type='str'),
indexmode=dict(type='str', choices=['dense', 'sparse']),
ackthreads=dict(type='str'),
biothreads=dict(type='str'),
cputhreads=dict(type='str'),
logicalthreads=dict(type='str'),
physicalthreads=dict(type='str')
)
# Seed the result dictionary in the object. There will be an
# 'invocation' dictionary added with 'module_args' (arguments
# given).
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
)
if not HAS_YAML:
module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
vdocmd = module.get_bin_path("vdo", required=True)
if not vdocmd:
module.fail_json(msg='VDO is not installed.', **result)
# Print a pre-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
runningvdolist = list_running_vdos(module, vdocmd)
# Collect the name of the desired VDO volume, and its state. These will
# determine what to do.
desiredvdo = module.params['name']
state = module.params['state']
# Create a desired VDO volume that doesn't exist yet.
if (desiredvdo not in vdolist) and (state == 'present'):
device = module.params['device']
if device is None:
module.fail_json(msg="Creating a VDO volume requires specifying "
"a 'device' in the playbook.")
# Create a dictionary of the options from the AnsibleModule
# parameters, compile the vdo command options, and run "vdo create"
# with those options.
# Since this is a creation of a new VDO volume, it will contain all
# all of the parameters given by the playbook; the rest will
# assume default values.
options = module.params
vdocmdoptions = add_vdooptions(options)
rc, out, err = module.run_command("%s create --name=%s --device=%s %s"
% (vdocmd, desiredvdo, device,
vdocmdoptions))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Creating VDO %s failed."
% desiredvdo, rc=rc, err=err)
if (module.params['compression'] == 'disabled'):
rc, out, err = module.run_command("%s disableCompression --name=%s"
% (vdocmd, desiredvdo))
if ((module.params['deduplication'] is not None) and
module.params['deduplication'] == 'disabled'):
rc, out, err = module.run_command("%s disableDeduplication "
"--name=%s"
% (vdocmd, desiredvdo))
if module.params['activated'] == 'no':
deactivate_vdo(module, desiredvdo, vdocmd)
if module.params['running'] == 'no':
stop_vdo(module, desiredvdo, vdocmd)
# Print a post-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
module.log("created VDO volume %s" % desiredvdo)
module.exit_json(**result)
# Modify the current parameters of a VDO that exists.
if (desiredvdo in vdolist) and (state == 'present'):
rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd))
vdostatusyaml = yaml.load(vdostatusoutput)
# An empty dictionary to contain dictionaries of VDO statistics
processedvdos = {}
vdoyamls = vdostatusyaml['VDOs']
if vdoyamls is not None:
processedvdos = vdoyamls
# The 'vdo status' keys that are currently modifiable.
statusparamkeys = ['Acknowledgement threads',
'Bio submission threads',
'Block map cache size',
'CPU-work threads',
'Logical threads',
'Physical threads',
'Read cache',
'Read cache size',
'Configured write policy',
'Compression',
'Deduplication']
# A key translation table from 'vdo status' output to Ansible
# module parameters. This covers all of the 'vdo status'
# parameter keys that could be modified with the 'vdo'
# command.
vdokeytrans = {
'Logical size': 'logicalsize',
'Compression': 'compression',
'Deduplication': 'deduplication',
'Block map cache size': 'blockmapcachesize',
'Read cache': 'readcache',
'Read cache size': 'readcachesize',
'Configured write policy': 'writepolicy',
'Acknowledgement threads': 'ackthreads',
'Bio submission threads': 'biothreads',
'CPU-work threads': 'cputhreads',
'Logical threads': 'logicalthreads',
'Physical threads': 'physicalthreads'
}
# Build a dictionary of the current VDO status parameters, with
# the keys used by VDO. (These keys will be converted later.)
currentvdoparams = {}
# Build a "lookup table" dictionary containing a translation table
# of the parameters that can be modified
modtrans = {}
for statfield in statusparamkeys:
if statfield in processedvdos[desiredvdo]:
currentvdoparams[statfield] = processedvdos[desiredvdo][statfield]
modtrans[statfield] = vdokeytrans[statfield]
# Build a dictionary of current parameters formatted with the
# same keys as the AnsibleModule parameters.
currentparams = {}
for paramkey in modtrans.keys():
currentparams[modtrans[paramkey]] = modtrans[paramkey]
diffparams = {}
# Check for differences between the playbook parameters and the
# current parameters. This will need a comparison function;
# since AnsibleModule params are all strings, compare them as
# strings (but if it's None; skip).
for key in currentparams.keys():
if module.params[key] is not None:
if str(currentparams[key]) != module.params[key]:
diffparams[key] = module.params[key]
if diffparams:
vdocmdoptions = add_vdooptions(diffparams)
if vdocmdoptions:
rc, out, err = module.run_command("%s modify --name=%s %s"
% (vdocmd,
desiredvdo,
vdocmdoptions))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Modifying VDO %s failed."
% desiredvdo, rc=rc, err=err)
if 'deduplication' in diffparams.keys():
dedupemod = diffparams['deduplication']
if dedupemod == 'disabled':
rc, out, err = module.run_command("%s "
"disableDeduplication "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing deduplication on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if dedupemod == 'enabled':
rc, out, err = module.run_command("%s "
"enableDeduplication "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing deduplication on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if 'compression' in diffparams.keys():
compressmod = diffparams['compression']
if compressmod == 'disabled':
rc, out, err = module.run_command("%s disableCompression "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing compression on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if compressmod == 'enabled':
rc, out, err = module.run_command("%s enableCompression "
"--name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing compression on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if 'writepolicy' in diffparams.keys():
writepolmod = diffparams['writepolicy']
if writepolmod == 'auto':
rc, out, err = module.run_command("%s "
"changeWritePolicy "
"--name=%s "
"--writePolicy=%s"
% (vdocmd,
desiredvdo,
writepolmod))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing write policy on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if writepolmod == 'sync':
rc, out, err = module.run_command("%s "
"changeWritePolicy "
"--name=%s "
"--writePolicy=%s"
% (vdocmd,
desiredvdo,
writepolmod))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing write policy on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
if writepolmod == 'async':
rc, out, err = module.run_command("%s "
"changeWritePolicy "
"--name=%s "
"--writePolicy=%s"
% (vdocmd,
desiredvdo,
writepolmod))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Changing write policy on "
"VDO volume %s failed."
% desiredvdo, rc=rc, err=err)
# Process the size parameters, to determine of a growPhysical or
# growLogical operation needs to occur.
sizeparamkeys = ['Logical size', ]
currentsizeparams = {}
sizetrans = {}
for statfield in sizeparamkeys:
currentsizeparams[statfield] = processedvdos[desiredvdo][statfield]
sizetrans[statfield] = vdokeytrans[statfield]
sizeparams = {}
for paramkey in currentsizeparams.keys():
sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey]
diffsizeparams = {}
for key in sizeparams.keys():
if module.params[key] is not None:
if str(sizeparams[key]) != module.params[key]:
diffsizeparams[key] = module.params[key]
if module.params['growphysical']:
physdevice = module.params['device']
rc, devsectors, err = module.run_command("blockdev --getsz %s"
% (physdevice))
devblocks = (int(devsectors) / 8)
dmvdoname = ('/dev/mapper/' + desiredvdo)
currentvdostats = (processedvdos[desiredvdo]
['VDO statistics']
[dmvdoname])
currentphysblocks = currentvdostats['physical blocks']
# Set a growPhysical threshold to grow only when there is
# guaranteed to be more than 2 slabs worth of unallocated
# space on the device to use. For now, set to device
# size + 64 GB, since 32 GB is the largest possible
# slab size.
growthresh = devblocks + 16777216
if currentphysblocks > growthresh:
result['changed'] = True
rc, out, err = module.run_command("%s growPhysical --name=%s"
% (vdocmd, desiredvdo))
if 'logicalsize' in diffsizeparams.keys():
result['changed'] = True
vdocmdoptions = ("--vdoLogicalSize=" +
diffsizeparams['logicalsize'])
rc, out, err = module.run_command("%s growLogical --name=%s %s"
% (vdocmd,
desiredvdo,
vdocmdoptions))
vdoactivatestatus = processedvdos[desiredvdo]['Activate']
if ((module.params['activated'] == 'no') and
(vdoactivatestatus == 'enabled')):
deactivate_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
if ((module.params['activated'] == 'yes') and
(vdoactivatestatus == 'disabled')):
activate_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
if ((module.params['running'] == 'no') and
(desiredvdo in runningvdolist)):
stop_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
# Note that a disabled VDO volume cannot be started by the
# 'vdo start' command, by design. To accurately track changed
# status, don't try to start a disabled VDO volume.
# If the playbook contains 'activated: yes', assume that
# the activate_vdo() operation succeeded, as 'vdoactivatestatus'
# will have the activated status prior to the activate_vdo()
# call.
if (((vdoactivatestatus == 'enabled') or
(module.params['activated'] == 'yes')) and
(module.params['running'] == 'yes') and
(desiredvdo not in runningvdolist)):
start_vdo(module, desiredvdo, vdocmd)
if not result['changed']:
result['changed'] = True
# Print a post-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
if diffparams:
module.log("modified parameters of VDO volume %s" % desiredvdo)
module.exit_json(**result)
# Remove a desired VDO that currently exists.
if (desiredvdo in vdolist) and (state == 'absent'):
rc, out, err = module.run_command("%s remove --name=%s"
% (vdocmd, desiredvdo))
if rc == 0:
result['changed'] = True
else:
module.fail_json(msg="Removing VDO %s failed."
% desiredvdo, rc=rc, err=err)
# Print a post-run list of VDO volumes in the result object.
vdolist = inventory_vdos(module, vdocmd)
module.log("removed VDO volume %s" % desiredvdo)
module.exit_json(**result)
# fall through
# The state for the desired VDO volume was absent, and it does
# not exist. Print a post-run list of VDO volumes in the result
# object.
vdolist = inventory_vdos(module, vdocmd)
module.log("received request to remove non-existent VDO volume %s"
% desiredvdo)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| 42.570934
| 92
| 0.552575
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.