content stringlengths 5 1.05M |
|---|
#import theano
#theano.config.device = 'gpu'
#theano.config.floatX = 'float32'
import argparse
import csv
import numpy
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense
import pandas as pd
from ngram_classifier import NGramClassifier
from sklearn.metrics import precision_recall_fscore_support
from timeit import default_timer as timer
import tensorflow as tf
CLASS_WEIGHTS = [
("num_days", 0.997821848),
("statuses_per_day", 1.065570851),
("followers_per_day", 1.021055002),
("following_per_day", 1.122703153),
("desc_len_terms", 1.171072307),
("num_list_items", 1.017727903),
("num_hashtags", 0.889418197),
("url_count", 1.018365516)
]
def get_input_vector(row, classifier):
'''
(classifier): p_good
(classifier): p_bot
num_days
statuses_per_day
followers_per_day
following_per_day
desc_len_terms
num_list_items
num_hashtags
url_count
'''
class_probs = classifier.classify_text(
str(row["user_profile_description"]))
ret = [class_probs["good"], class_probs["bot"]]
for label, weight in CLASS_WEIGHTS:
ret.append(float(row[label]) * weight)
return ret
def get_training_output(row):
class_label = str(row["class_value"])
return 0.0 if class_label == "good" else 1.0
def print_metrics(which_round, metrics):
print(
f'round: {which_round} : p() {metrics[0][0]:.4f}, {metrics[0][1]:.4f} : r() {metrics[1][0]:.4f}, {metrics[1][1]:.4f} : f() {metrics[2][0]:.4f}, {metrics[2][1]:.4f}')
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="input csv file")
parser.add_argument("-m", "--model", help="ngram model file")
parser.add_argument("-o", "--output", help="output file")
parser.add_argument("-n", "--numrounds", help="number rounds to train")
parser.add_argument("-t", "--testfile", help="testing_file")
args = parser.parse_args()
if not args.input:
raise "missing input file"
if not args.model:
raise "missing model file"
if not args.output:
raise "missing output file"
if not args.numrounds:
raise "missing number of training rounds"
if not args.testfile:
raise "missing test file"
classifier = NGramClassifier(model_path=args.model)
num_training_rounds = int(args.numrounds)
df = pd.read_csv(args.input, keep_default_na=False)
df_test = pd.read_csv(args.testfile, keep_default_na=False)
n_rows = len(df.index)
nnet = Sequential()
nnet.add(Dense(22, input_dim=10, activation='relu'))
nnet.add(Dense(1, activation='sigmoid'))
nnet.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['acc', f1_m, precision_m, recall_m])
print('----------------')
# for i in range(0,num_training_rounds):
start = timer()
df_train = df.sample(frac=1.0).reset_index(drop=True)
x_values = []
y_values = []
for index, row in df_train.iterrows():
input_vector = get_input_vector(row, classifier)
output_val = get_training_output(row)
x_values.append(input_vector)
y_values.append(output_val)
nnet.fit(numpy.array(x_values), numpy.array(y_values),
epochs=num_training_rounds, batch_size=25)
targets_x = []
targets_y = []
predictions = []
for index, row in df_test.iterrows():
input_vector = get_input_vector(row, classifier)
targets_x.append(input_vector)
targets_y.append(get_training_output(row))
loss, accuracy, f1_score, precision, recall = nnet.evaluate(
numpy.array(targets_x), numpy.array(targets_y), verbose=0)
end = timer()
run_time = (end - start)
print("model trained.")
model_json = nnet.to_json()
with open(args.output, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
nnet.save_weights(f'{args.output}.h5')
|
"""SyncFlow for HttpApi"""
import logging
from typing import Dict, List, TYPE_CHECKING
from samcli.lib.sync.flows.generic_api_sync_flow import GenericApiSyncFlow
from samcli.lib.providers.provider import ResourceIdentifier, Stack
from samcli.lib.providers.exceptions import MissingLocalDefinition
# BuildContext and DeployContext will only be imported for type checking to improve performance
# since no instances of contexts will be instantiated in this class
if TYPE_CHECKING: # pragma: no cover
from samcli.commands.build.build_context import BuildContext
from samcli.commands.deploy.deploy_context import DeployContext
LOG = logging.getLogger(__name__)
class HttpApiSyncFlow(GenericApiSyncFlow):
"""SyncFlow for HttpApi's"""
def __init__(
self,
api_identifier: str,
build_context: "BuildContext",
deploy_context: "DeployContext",
physical_id_mapping: Dict[str, str],
stacks: List[Stack],
):
"""
Parameters
----------
api_identifier : str
HttpApi resource identifier that needs to have associated HttpApi updated.
build_context : BuildContext
BuildContext used for build related parameters
deploy_context : BuildContext
DeployContext used for this deploy related parameters
physical_id_mapping : Dict[str, str]
Mapping between resource logical identifier and physical identifier
stacks : List[Stack], optional
List of stacks containing a root stack and optional nested stacks
"""
super().__init__(
api_identifier,
build_context,
deploy_context,
physical_id_mapping,
log_name="HttpApi " + api_identifier,
stacks=stacks,
)
def set_up(self) -> None:
super().set_up()
self._api_client = self._boto_client("apigatewayv2")
def sync(self) -> None:
api_physical_id = self.get_physical_id(self._api_identifier)
if self._definition_uri is None:
raise MissingLocalDefinition(ResourceIdentifier(self._api_identifier), "DefinitionUri")
if self._swagger_body:
LOG.debug("%sTrying to import HttpAPI through client", self.log_prefix)
response = self._api_client.reimport_api(ApiId=api_physical_id, Body=self._swagger_body.decode())
LOG.debug("%sImport HttpApi Result: %s", self.log_prefix, response)
else:
LOG.debug("%sEmpty OpenApi definition, skipping the skip for %s", self.log_prefix, self._api_identifier)
|
def cantor_pairing(x, y):
return int((x + y) * (x + y + 1)/2 + y)
def cantor_pairing_nd(*args):
if len(args) == 2:
return cantor_pairing(args[0], args[1])
return cantor_pairing(cantor_pairing_nd(*args[:-1]), args[-1])
if __name__ == '__main__':
import collections
import matplotlib.pyplot as plt
import numpy as np
d = {}
for i in range(1, 10):
for j in range(1, 10):
val = cantor_pairing(i, j)
d[val] = np.array((i, j))
od = collections.OrderedDict(sorted(d.items()))
plt.figure()
plt.axis([0, 10, 0, 10])
plt.axis('off')
for k, v in od.items():
if v[0] == 9 and v[1] == 2:
break
plt.annotate(s='{}/{}'.format(*v), xy=v, ha='center', va='center')
if 'v0' in locals():
plt.annotate(s='', xy=v0 + (v - v0) * 0.2 / np.linalg.norm(v - v0), xytext=v - (
v - v0) * 0.2 / np.linalg.norm(v - v0), arrowprops=dict(arrowstyle='<-'))
v0 = v
plt.savefig('progression.png', format='png', bbox_inches='tight', dpi=300)
|
import boto3
from botocore.exceptions import ClientError
s3 = boto3.resource('s3')
def get_buckets():
try:
buckets = list(s3.buckets.all())
except ClientError:
print("Couldn't get buckets.")
raise
else:
return buckets
def count_buckets():
count = 0
buckets = [b for b in get_buckets()]
for bucket in buckets:
print(f"Got bucket {bucket.name}.")
count = count+1
return count
|
import pytest
@pytest.fixture()
def settings():
from pygluu.kubernetes.settings import SettingsHandler, unlink_settings_json
handler = SettingsHandler()
yield handler
unlink_settings_json()
|
#!/usr/bin/python -tt
#=======================================================================
# General Documentation
"""Single-function module.
See function docstring for description.
"""
from __future__ import absolute_import
#-----------------------------------------------------------------------
# Additional Documentation
#
# RCS Revision Code:
# $Id: interp.py,v 1.2 2004/03/23 04:28:16 jlin Exp $
#
# Modification History:
# - 22 Mar 2004: Original by Johnny Lin, Computation Institute,
# University of Chicago. Passed passably reasonable tests.
#
# Notes:
# - Written for Python 2.2.
# - Module docstrings can be tested using the doctest module. To
# test, execute "python interp.py".
# - See import statements throughout for non-"built-in" packages and
# modules required.
#
# Copyright (c) 2004 by Johnny Lin. For licensing, distribution
# conditions, contact information, and additional documentation see
# the URL http://www.johnny-lin.com/py_pkgs/gemath/doc/;
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA.
# You can contact Johnny Lin at his email address or at the University
# of Chicago, Department of the Geophysical Sciences, 5734 S. Ellis
# Ave., Chicago, IL 60637, USA.
#=======================================================================
#----------------------- Overall Module Imports ------------------------
#- Set module version to package version:
#------------------------ Non-Private Function -------------------------
from builtins import range
def interp(y, x, xinterp, missing=1e+20):
"""Simple linear interpolation for ordinate with missing values.
Vectors x and y are the data describing a piecewise linear function.
Function returns the interpolated values of the ordinate function
at abscissa values in xinterp. Values of xinterp outside the range
of x are returned as missing. Any elements in the output that uses
missing values in y for the interpolation are set to missing.
Positional Input Arguments:
* y: Ordinate values of data. Rank 1 numeric vector. Required.
Can have missing values. Floating or integer type.
* x: Abscissa values of data. Rank 1 numeric vector. Required.
Can have no missing values. Must be monotonically ascending.
Floating or integer type.
* xinterp: Abscissa values to calculate interpolated ordinate
values at. Rank 1 numeric vector or numeric scalar. Required.
Can have no missing values. Can be in any order. Floating or
integer type.
Keyword Input Arguments:
* missing: If input has missing values, this is the missing value
value. Scalar. Floating or integer type. Default is 1e+20.
Output Result:
* Interpolated ordinate values at xinterp. Rank 1 numeric vector
of same length as xinterp (if xinterp is a numeric scalar,
output is also a numeric scalar). Missing values are set to the
value of argument missing. Type is Float, even if argument
missing and inputs are all integer.
References:
* Lin, J. W.-B.: "Simple Interpolation."
Python/CDAT for Earth Scientists: Tips and Examples.
http://www.johnny-lin.com/cdat_tips/tips_math/interp.html
Example with no missing values (gives same output as function
arrayfns.interp):
>>> from interp import interp
>>> import numpy as N
>>> x = N.array([1., 2., 3., 4., 5.])
>>> y = N.array([3., 6., 2.,-5.,-3.])
>>> xint = N.array([3.4, 2.3])
>>> yint = interp(y, x, xint, missing=1e+20)
>>> ['%.7g' % yint[i] for i in range(len(yint))]
['-0.8', '4.8']
Example with missing values:
>>> x = N.array([1., 2., 3., 4., 5.])
>>> y = N.array([3., 1e+20, 2., -5., -3.])
>>> xint = N.array([3.4, 2.3])
>>> yint = interp(y, x, xint, missing=1e+20)
>>> ['%.7g' % yint[i] for i in range(len(yint))]
['-0.8', '1e+20']
Example with values out of range of the data:
>>> x = N.array([1., 2.1, 3., 4., 5.1])
>>> y = N.array([3., 1e+20, 2., -5., -3.])
>>> xint = N.array([3.4, -2.3, 6.])
>>> yint = interp(y, x, xint, missing=1e+20)
>>> ['%.7g' % yint[i] for i in range(len(yint))]
['-0.8', '1e+20', '1e+20']
"""
import arrayfns
import numpy.ma as MA
import numpy as N
from .where_close import where_close
#- Check inputs for possible errors:
if (N.rank(y) != 1) or (N.rank(x) != 1):
raise ValueError("interp: Input(s) not a vector")
if N.rank(xinterp) > 1:
raise ValueError("interp: xinterp not a vector or scalar")
if x[-1] <= x[0]:
raise ValueError("interp: x not monotonically increasing")
#- Establish constants and define xint, a rank 1 version of
# xinterp to be used for the rest of the function:
if N.rank(xinterp) == 0:
xint = N.reshape(xinterp, (1,))
else:
xint = xinterp
num_xint = N.size(xint)
#- Mask as missing values of xint that are outside of the range
# of x:
yint_outrange_mask = N.logical_or( N.less(xint, x[0]) \
, N.greater(xint, x[-1]) )
#- Mask of elements with missing values in y, if there are any
# missing values in y. If xint equals a value in x, missing
# values mask for that xint is the same as the corresponding
# value in x; and mask elements in xint which fall in an interval
# (whose upper bound index is top_idx) where one of the endpoints
# is missing:
y_miss_mask = where_close(y, missing)
yint_miss_mask = N.zeros(num_xint)
if MA.maximum(y_miss_mask) == 1:
for i in range(num_xint):
if yint_outrange_mask[i] == 0:
x_eq_xint = where_close(x, xint[i])
if MA.maximum(x_eq_xint) == 1:
yint_miss_mask[i] = y_miss_mask[N.nonzero(x_eq_xint)]
else:
top_idx = N.nonzero(N.greater(x, xint[i]))[0]
yint_miss_mask[i] = y_miss_mask[top_idx] or \
y_miss_mask[top_idx-1]
#- Return interpolated values, set to missing values as
# appropriate, and making a scalar if xinterp is a scalar:
yint = arrayfns.interp(y, x, xint)
N.putmask( yint, N.logical_or(yint_miss_mask, yint_outrange_mask) \
, missing)
if N.rank(xinterp) == 0: yint = yint[0]
return yint
#-------------------------- Main: Test Module -------------------------
#- Define additional examples for doctest to use:
__test__ = {'Additional Examples':
"""
(1) General error catching:
>>> from interp import interp
>>> import numpy as N
>>> x = N.array([1., 2., 3., 4., 5., 6.])
>>> y = N.array([3., 1e+20, 2., -5., -3., -4.])
>>> x = N.reshape(x, (2,3))
>>> y = N.reshape(y, (2,3))
>>> xint = N.array([3.4, 2.3])
>>> yint = interp(y, x, xint, missing=1e+20)
Traceback (most recent call last):
...
ValueError: interp: Input(s) not a vector
>>> x = N.array([1., 2., 3., 4., 5., 6.])
>>> y = N.array([3., 1e+20, 2., -5., -3., -4.])
>>> xint = N.array([[3.4, 2.3],[3.4, 2.3]])
>>> yint = interp(y, x, xint, missing=1e+20)
Traceback (most recent call last):
...
ValueError: interp: xinterp not a vector or scalar
>>> x = N.array([1., 2., 3., 4., 5., 0.])
>>> y = N.array([3., 1e+20, 2., -5., -3., -4.])
>>> xint = N.array([3.4, 2.3])
>>> yint = interp(y, x, xint, missing=1e+20)
Traceback (most recent call last):
...
ValueError: interp: x not monotonically increasing
>>> x = N.array([1., 2., 3., 4., 5., 6.])
>>> y = N.array([3., None, 2., -5., -3., -4.])
>>> xint = N.array([3.4, 2.3, 2., 5., 3., 1.])
>>> yint = interp(y, x, xint, missing=None)
Traceback (most recent call last):
...
ValueError: where_close: Inputs must be Float or Integer
(2) Values right on the border of intervals:
>>> x = N.array([1., 2., 3., 4., 5., 6.])
>>> y = N.array([3., 1e+20, 2., -5., -3., -4.])
>>> xint = N.array([3.4, 2.3, 2., 5., 3., 1.])
>>> yint = interp(y, x, xint, missing=1e+20)
>>> ['%.7g' % yint[i] for i in range(len(yint))]
['-0.8', '1e+20', '1e+20', '-3', '2', '3']
(3) Single element vector input:
>>> yint = interp(y, x, N.array([6.]), missing=1e+20)
>>> ['%.7g' % yint[i] for i in range(len(yint))]
['-4']
(4) Scalar xint:
>>> x = N.array([1., 2., 3., 4., 5., 6.])
>>> y = N.array([3., 1e+20, 2., -5., -3., -4.])
>>> yint = interp(y, x, N.array(6.), missing=1e+20)
>>> yint
-4.0
>>> N.rank(yint)
0
(5) Integer values:
>>> x = N.arange(6)
>>> y = N.arange(6)
>>> xint = N.array([3.4, 2.3])
>>> yint = interp(y, x, xint, missing=-9999999)
>>> ['%.7g' % yint[i] for i in range(len(yint))]
['3.4', '2.3']
>>> yint.dtype.char
'd'
>>> x = N.arange(6)
>>> y = N.arange(6)
>>> xint = N.array([3, 2])
>>> yint = interp(y, x, xint, missing=-9999999)
>>> ['%.7g' % yint[i] for i in range(len(yint))]
['3', '2']
>>> yint.dtype.char
'd'
"""}
#- Execute doctest if module is run from command line:
if __name__ == "__main__":
"""Test the module.
Tests the examples in all the module documentation strings, plus
__test__.
Note: To help ensure that module testing of this file works, the
parent directory to the current directory is added to sys.path.
"""
import doctest, sys, os
sys.path.append(os.pardir)
doctest.testmod(sys.modules[__name__])
# ===== end file =====
|
import sys
import os
import itertools
from qmla.exploration_strategies.nv_centre_spin_characterisation import nv_centre_full_access
import qmla.shared_functionality.probe_set_generation
import qmla.shared_functionality.expectation_value_functions
from qmla import construct_models
class SimulatedNVCentre(
nv_centre_full_access.FullAccessNVCentre # inherit from this
):
# Uses some of the same functionality as
# default NV centre spin experiments/simulations
# but uses an expectation value which traces out
# and different model generation
def __init__(
self,
exploration_rules,
**kwargs
):
super().__init__(
exploration_rules=exploration_rules,
**kwargs
)
# Set up true model
B = 11e-3 # Tesla
g = 2 #
bohr_magneton = 9.274e-24 # J T^-1
hbar = 1.05e-34 # m^2 kg s^-1
nuclear_magneton = 5.05e-27 # J T^-1
gamma_n = 0.307e6 / 1e-6 # from Seb's thesis
gamma = 10.705e6 # T^-1 s^-1 # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5226623/
self.true_model_terms_params = {
# 'x' : 0.55,
# test
# 'pauliSet_1_x_d1' : 4, # TEST - kHz
# 'pauliSet_1_y_d1' : 4, # TEST - kHz
# 'pauliSet_1_z_d1' : 4, # TEST - kHz
# spin - test MHz terms
# 'pauliSet_1_x_d3' : 1e-3*B*g*bohr_magneton/hbar, # ~1.943 GHz = 1943123809.5238094
# 'pauliSet_1_y_d3' : 1e-3*B*g*bohr_magneton/hbar,
# 'pauliSet_1_z_d3' : 1e-3*B*g*bohr_magneton/hbar,
# spin
# 'pauliSet_1_x_d3' : B*g*bohr_magneton/hbar, # ~1.943 GHz = 1943123809.5238094
'pauliSet_1_y_d3' : B*g*bohr_magneton/hbar,
'pauliSet_1_z_d3' : B*g*bohr_magneton/hbar,
# nitrogen nuclei
# 'pauliSet_2_x_d3' : B*gamma_n , # ~3.37GHz
# 'pauliSet_2_y_d3' : B*gamma_n ,
# 'pauliSet_2_z_d3' : B*gamma_n ,
# # carbon nuclei
# 'pauliSet_3_x_d3' : B * gamma , # ~117KHz
# 'pauliSet_3_y_d3' : B * gamma ,
# 'pauliSet_3_z_d3' : B * gamma ,
# # interactions: spin with nitrogen nuclei
# 'pauliSet_1J2_xJx_d3' : 2.7e6, # 2.7MHz
# 'pauliSet_1J2_yJy_d3' : 2.7e6,
# 'pauliSet_1J2_zJz_d3' : 2.14e6,
# # interactions: spin with carbon nuclei
# 'pauliSet_1J3_xJx_d3' : 2.4e6, # 2.4MHz
# 'pauliSet_1J3_yJy_d3' : 2.4e6,
# 'pauliSet_1J3_zJz_d3' : 2.4e6,
}
self.gaussian_prior_means_and_widths = {
# 'x' : (0.5, 0.2),
# test
'pauliSet_1_x_d1' : (5, 2), # TEST
'pauliSet_1_y_d1' : (5, 2), # TEST
'pauliSet_1_z_d1' : (5, 2), # TEST
# spin w v thin prior
# values found in a QHL for GHz terms
# 'pauliSet_1_x_d3' : (1.85197880e+09, 7.28778717e+07), # ~1.943 GHz
# 'pauliSet_1_y_d3' : (1.97413207e+09, 7.77490072e+07),
# 'pauliSet_1_z_d3' : (1.99779857e+09, 3.95883908e+07),
# 'pauliSet_1_x_d3' : (B*g*bohr_magneton/hbar - 0.4e7, 5e7), # ~1.943 GHz
# 'pauliSet_1_y_d3' : (B*g*bohr_magneton/hbar + 0.5, 5e7),
# 'pauliSet_1_z_d3' : (B*g*bohr_magneton/hbar - 0.5, 5e7),
# 'pauliSet_1_z_d3' : (B*g*bohr_magneton/hbar - 50, 100),
# spin
'pauliSet_1_x_d3' : (5e9, 2e9), # ~1.943 GHz
'pauliSet_1_y_d3' : (5e9, 2e9),
'pauliSet_1_z_d3' : (5e9, 2e9),
# nitrogen nuclei
'pauliSet_2_x_d3' : (5e9, 2e9) , # ~3.37GHz
'pauliSet_2_y_d3' : (5e9, 2e9) ,
'pauliSet_2_z_d3' : (5e9, 2e9) ,
# carbon nuclei
'pauliSet_3_x_d3' : (1e6, 2e5) , # ~117KHz
'pauliSet_3_y_d3' : (1e6, 2e5) ,
'pauliSet_3_z_d3' : (1e6, 2e5) ,
# interactions: spin with nitrogen nuclei
'pauliSet_1J2_xJx_d3' : (5e6, 2e6), # 2.7MHz
'pauliSet_1J2_yJy_d3' : (5e6, 2e6),
'pauliSet_1J2_zJz_d3' : (5e6, 2e6),
# interactions: spin with carbon nuclei
'pauliSet_1J3_xJx_d3' : (5e6, 2e6), # 2.4MHz
'pauliSet_1J3_yJy_d3' : (5e6, 2e6),
'pauliSet_1J3_zJz_d3' : (5e6, 2e6),
}
self.true_model = '+'.join(
(self.true_model_terms_params.keys())
)
self.true_model = qmla.construct_models.alph(self.true_model)
self.tree_completed_initially = True
self.initial_models=None
self.expectation_value_subroutine = \
qmla.shared_functionality.expectation_value_functions.n_qubit_hahn_evolution_double_time_reverse
self.timing_insurance_factor = 2/3
self.num_probes = 20
time_basis = 1e-9 # nanoseconds
# self.system_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.eigenbasis_of_first_qubit
self.max_time_to_consider = 50 * time_basis # 50 microseconds
self.plot_time_increment = 0.5 * time_basis # 0.5 microseconds
self.track_quadratic_loss = True
# self.expectation_value_subroutine = qmla.shared_functionality.expectation_value_functions.default_expectation_value
self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.MultiParticleGuessHeuristic
self.latex_string_map_subroutine = qmla.shared_functionality.latex_model_names.pauli_set_latex_name
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.MixedMultiParticleLinspaceHeuristic
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.SampleOrderMagnitude
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.SampledUncertaintyWithConvergenceThreshold
def generate_models(self, model_list, **kwargs):
if self.spawn_stage[-1]==None:
self.spawn_stage.append('Complete')
return [self.true_model]
def check_tree_completed(self, **kwargs):
if self.spawn_stage[-1] == 'Complete':
return True
else:
return False
def spin_system_model(
num_sites = 2,
full_parameterisation=True,
include_transverse_terms = False,
core_terms = ['x', 'y', 'z']
):
spin_terms = [
'pauliSet_1_{op}_d{N}'.format(op = op, N=num_sites)
for op in core_terms
]
hyperfine_terms = [
'pauliSet_1J{k}_{op}J{op}_d{N}'.format(
k = k,
op = op,
N = num_sites
)
for k in range(2, num_sites+1)
for op in core_terms
]
transverse_terms = [
'pauliSet_1J{k}_{op1}J{op2}_d{N}'.format(
k = k,
op1 = op1,
op2 = op2,
N = num_sites
)
for k in range(2, num_sites+1)
for op1 in core_terms
for op2 in core_terms
if op1 < op2 # only xJy, not yJx
]
all_terms = []
all_terms.extend(spin_terms)
all_terms.extend(hyperfine_terms)
if include_transverse_terms: all_terms.extend(transverse_terms)
model = '+'.join(all_terms)
model = qmla.construct_models.alph(model)
print("Spin system used:", model)
return model
class TestSimulatedNVCentre(
SimulatedNVCentre # inherit from this
):
# Uses some of the same functionality as
# default NV centre spin experiments/simulations
# but uses an expectation value which traces out
# and different model generation
def __init__(
self,
exploration_rules,
**kwargs
):
super().__init__(
exploration_rules=exploration_rules,
**kwargs
)
# Set up true model
B = 11e-3 # Tesla
g = 2 #
bohr_magneton = 9.274e-24 # J T^-1
hbar = 1.05e-34 # m^2 kg s^-1
nuclear_magneton = 5.05e-27 # J T^-1
gamma_n = 0.307e6 / 1e-6 # from Seb's thesis
gamma = 10.705e6 # T^-1 s^-1 # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5226623/
order_mag = -1
self.true_model_terms_params = {
# spin
# 'pauliSet_1_x_d3' : B*g*bohr_magneton/hbar, # ~1.943 GHz = 1943123809.5238094
# 'pauliSet_1_y_d3' : B*g*bohr_magneton/hbar,
# 'pauliSet_1_z_d3' : B*g*bohr_magneton/hbar,
'pauliSet_1_x_d1' : 4.2431238095238094 * (10**order_mag),
'pauliSet_1_y_d1' : 5.9431238095238094 * (10**order_mag),
# 'pauliSet_1_z_d1' : 5.9431238095238094 * (10**order_mag),
# 'pauliSet_1_x_d2' : 3.2431238095238094 * (10**order_mag),
# 'pauliSet_1_y_d2' : 6.9431238095238094 * (10**order_mag),
# 'pauliSet_1_x_d2' : 1.9431238095238094 * (10**order_mag),
# 'pauliSet_1_y_d2' : 8.9431238095238094 * (10**order_mag),
# 'pauliSet_1_z_d2' : 3.9431238095238094 * (10**order_mag),
# 'pauliSet_2_x_d2' : 4.9431238095238094 * (10**order_mag),
# 'pauliSet_2_y_d2' : 7.9431238095238094 * (10**order_mag),
# 'pauliSet_2_z_d2' : 6.9431238095238094 * (10**order_mag),
}
self.gaussian_prior_means_and_widths = {
# start accurate with very small prior
# 'pauliSet_1_x_d1' : ( 3.243 * (10**order_mag) , 0.001 * (10**order_mag) ),
# 'pauliSet_1_y_d1' : ( 6.943 * (10**order_mag) , 0.001 * (10**order_mag) ) ,
# 'pauliSet_1_z_d1' : (5* (10**order_mag), 2* (10**order_mag)),
# spin
'pauliSet_1_x_d1' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_1_y_d1' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_1_z_d1' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_1_x_d2' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_1_y_d2' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_1_z_d2' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_2_x_d2' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_2_y_d2' : (5* (10**order_mag), 2* (10**order_mag)),
'pauliSet_2_z_d2' : (5* (10**order_mag), 2* (10**order_mag)),
}
self.true_model = '+'.join(
(self.true_model_terms_params.keys())
)
self.true_model = qmla.construct_models.alph(self.true_model)
self.qinfer_resampler_threshold = 0.15
self.qinfer_resampler_a = 0.98
self.iqle_mode = False
self.hard_fix_resample_effective_sample_size = 1000
self.expectation_value_subroutine = qmla.shared_functionality.expectation_value_functions.default_expectation_value
# Choose heuristic
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.MultiParticleGuessHeuristic
self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.RandomTimeUpperBounded
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.MixedMultiParticleLinspaceHeuristic
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.VolumeAdaptiveParticleGuessHeuristic
# self.model_heuristic_subroutine = qmla.shared_functionality.experiment_design_heuristics.FixedNineEighthsToPowerK
# self.system_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.eigenbasis_of_first_qubit
self.system_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.manual_set_probes
time_basis = 1/10**order_mag # nanoseconds
# self.system_probes_generation_subroutine = qmla.shared_functionality.probe_set_generation.eigenbasis_of_first_qubit
self.max_time_to_consider = 50 * time_basis # 50 microseconds
self.plot_time_increment = 5 * time_basis # 0.5 microseconds
self.timing_insurance_factor = 0.25 |
# requires python3
import collections
# You are given the following information, but you may prefer to do some research for yourself.
#
# 1 Jan 1900 was a Monday.
# Thirty days has September,
# April, June and November.
# All the rest have thirty-one,
# Saving February alone,
# Which has twenty-eight, rain or shine.
# And on leap years, twenty-nine.
# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec']
days_in_month_dict = collections.OrderedDict([
('Jan', lambda leap_bool: 31),
('Feb', lambda leap_bool: 28 if not leap_bool else 29),
('Mar', lambda leap_bool: 31),
('Apr', lambda leap_bool: 30),
('May', lambda leap_bool: 31),
('Jun', lambda leap_bool: 30),
('Jul', lambda leap_bool: 31),
('Aug', lambda leap_bool: 31),
('Sep', lambda leap_bool: 30),
('Oct', lambda leap_bool: 31),
('Nov', lambda leap_bool: 30),
('Dec', lambda leap_bool: 31)])
# 0-6
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# 7th day is index 6
day_number = 0
leap_year = False
year_start = 1900
num_years = 101
number_sundays = 0
for year in range(year_start, year_start+num_years):
if year % 4 == 0:
if str(year)[-2] == '00' and year % 400 == 0:
leap_year = False
leap_year = True
for month, days_in_month_func in days_in_month_dict.items():
days_in_month = days_in_month_func(leap_year)
day_name = days[day_number%7]
extra = ''
if year > year_start and day_name == 'Sun':
number_sundays += 1
extra = ' %s' % number_sundays
print('%s 1st %s is a %s%s' % (month, year, day_name, extra))
day_number += days_in_month
print('number_sundays=%s' % number_sundays)
|
from django.urls import path
from .views import (
IssueDetailView,
IssueUpdateView,
IssueDeleteView,
IssueCreateView,
)
urlpatterns = [
path('<int:issue_pk>/', IssueDetailView.as_view(), name="issue-detail"),
path('<int:issue_pk>/update/', IssueUpdateView.as_view(), name='issue-update'),
path('<int:issue_pk>/delete/', IssueDeleteView.as_view(), name='issue-delete'),
path('new/', IssueCreateView.as_view(), name="issue-create"),
]
|
from struct import unpack as struct_unpack
from typing import Any, BinaryIO, Callable, Iterator, Union
def _file_to_stream(file: BinaryIO, chunk_size=512):
while True:
# Read while not EOF
data = file.read(chunk_size)
if not data:
# EOF
break
# Yield each byte
yield from data
class DecodeError(Exception):
pass
def _handle_bool(subtype: int, twine_stream: Iterator) -> Union[bool, None]:
# bool data is stored in the subtype itself and can be determined
# without reading from the twine stream
if subtype == 0x00:
return False
elif subtype == 0x01:
return True
elif subtype == 0x03:
return None
else:
error_msg = f"Unknown bool subtype: {hex(subtype)}"
raise DecodeError(error_msg)
def _handle_int(subtype: int, twine_stream: Iterator) -> int:
# Specifies whether the integer is signed - stored in highest bit
is_signed = bool(subtype & 0x08)
# Calculate the number of bytes. Last 3 bits of subtype are calculated
# using log(byte_count, 2) - 1. We can reverse this to obtain the byte
# count from the last 2 digits.
byte_count: int = 2 ** ((subtype & 0x07) - 1)
# Get data from twine stream into a bytearray
data_bytes = bytearray()
for _ in range(byte_count):
next_byte = next(twine_stream)
data_bytes.append(next_byte)
# Construct an int from the bytearray
decoded_int = int.from_bytes(data_bytes, "big", signed=is_signed)
return decoded_int
def _handle_float(subtype: int, twine_stream: Iterator) -> float:
# Handle NaN and infinities
if subtype == 0x0:
return float("nan")
elif subtype == 0x4:
return float("inf")
elif subtype == 0xB:
return float("-inf")
# Float precision type is encoded in last 2 bytes of subtype
precision = subtype & 0x03
# Single precision
if precision == 0x01:
unpack_type = "!f"
byte_count = 4
# Double precision
elif precision == 0x02:
unpack_type = "!d"
byte_count = 8
# NOTE: Quadruple precision support has not been added yet
else:
error_msg = f"Unknown float subtype: {hex(subtype)}"
raise DecodeError(error_msg)
# Get data from twine stream into a bytearray
data_bytes = bytearray()
for _ in range(byte_count):
next_byte = next(twine_stream)
data_bytes.append(next_byte)
# Construct float of correct precision from data_bytes
decoded_float = struct_unpack(unpack_type, data_bytes)[0]
return decoded_float
def _get_single_utf8_char(twine_stream: Iterator) -> str:
# Get first byte of character.
first_byte = next(twine_stream)
# Determine how long the character is using the first byte
if first_byte & 0xF0 == 0xF0:
bytes_to_read = 3
elif first_byte & 0xE0 == 0xE0:
bytes_to_read = 2
elif first_byte & 0xC0 == 0xE0:
bytes_to_read = 1
elif not first_byte & 0x80:
bytes_to_read = 0
else:
error_msg = "invalid UTF-8 sequence"
raise DecodeError(error_msg)
# Read bytes from the stream into a bytearray
data_bytes = bytearray([first_byte])
for _ in range(bytes_to_read):
byte_read = next(twine_stream)
data_bytes.append(byte_read)
# Decode bytes to str and return
return bytes(data_bytes).decode("utf8")
def _handle_str(subtype: int, twine_stream: Iterator) -> str:
decoded_str = "" # Chars will be added to this str after decoding
# Get length, encoded as an int, from the twine
length = _handle_any(twine_stream)
# Decode each char and append it to the str
for _ in range(length):
decoded_char = _get_single_utf8_char(twine_stream)
decoded_str += decoded_char
return decoded_str
def _handle_list(subtype, twine_stream):
decoded_list = [] # Elements will be added to this list after decoding
# Get length, encoded as an int, from the twine
length = _handle_any(twine_stream)
# Decode each element of the list and append it to the list
for _ in range(length):
decoded_element = _handle_any(twine_stream)
decoded_list.append(decoded_element)
return decoded_list
_decoders: dict[int, Callable] = {
0x10: _handle_bool,
0x20: _handle_int,
0x30: _handle_float,
0x40: _handle_str,
0x50: _handle_list,
}
def set_decoder(type_code: int, decoder: Callable) -> None:
_decoders[type_code] = decoder
def _handle_any(twine_stream) -> Any:
# Get the data type and subtype
type_byte = next(twine_stream)
data_type, subtype = type_byte & 0xF0, type_byte & 0x0F
# Verify that a handler exists for the data
if data_type not in _decoders:
error_msg = f"type {hex(data_type)} has no decoder"
raise DecodeError(error_msg)
decoder = _decoders.get(data_type)
decoded = decoder(subtype, twine_stream)
return decoded
def load(file: BinaryIO, chunk_size=512) -> Any:
"""Loads twine data from a file object
Args:
file (BinaryIO): A file opened in 'rb' mode
chunk_size (int, optional): File will be read in
chunks of this size. Defaults to 512.
Returns:
any: The decoded data
"""
# Convert file to stream
stream = _file_to_stream(file, chunk_size=chunk_size)
# Decode the data and return it
decoded = _handle_any(stream)
return decoded
def loads(data: bytes) -> Any:
"""Loads twine data from a bytearray
Args:
data (bytes): Twine data as a bytes or bytearray
Returns:
any: The decoded data
"""
# Convert data to stream (iterator)
stream = iter(data)
# Decode the data and return it
decoded = _handle_any(stream)
return decoded
|
from model_statistics import ModelStatistics
class StatisticCollector(ModelStatistics):
def __init__(self, token, logger, save_path=None):
super().__init__(token, save_path)
self.logger = logger
def add(self, key, val):
super().add(key, val)
self.logger.info('StatisticCollector {} add {} with value {}'.format(self.token, key, val))
return self
|
from typing import cast
from ymmsl import Reference
from libmuscle.mcp.client import Client
import libmuscle.mcp.pipe_multiplexer as mux
class PipeClient(Client):
"""A client for pipe-based communication.
"""
@staticmethod
def can_connect_to(location: str) -> bool:
"""Whether this client class can connect to the given location.
Args:
location: The location to potentially connect to.
Returns:
True iff this class can connect to this location.
"""
return mux.can_connect_to(location)
@staticmethod
def shutdown(instance_id: Reference) -> None:
"""Free any resources shared by all clients for this instance.
"""
if mux.can_communicate_for(instance_id):
# close mux ends to allow clean shutdown
mux.close_mux_ends(instance_id)
mux_client_conn = mux.get_instance_client_conn(instance_id)
mux_client_conn.close()
def __init__(self, instance_id: Reference, location: str) -> None:
"""Creates a PipeClient.
Args:
instance_id: Our instance id.
peer_id: Id of the peer (server) we're connecting to.
"""
self._instance_id = instance_id
mux_client_conn = mux.get_instance_client_conn(instance_id)
_, peer_id = location.split('/')
# request connection
# This assumes that the clients are made one by one in the same thread
# so that they can use the same pipe without getting in each other's
# way.
mux_client_conn.send(peer_id)
self._conn = mux_client_conn.recv()
def receive(self, receiver: Reference) -> bytes:
"""Receive a message from a port this client connects to.
Args:
receiver: The receiving (local) port.
Returns:
The received message.
"""
self._conn.send(receiver)
return cast(bytes, self._conn.recv())
def close(self) -> None:
"""Closes this client.
This closes any connections this client has and/or performs
other shutdown activities.
"""
self._conn.close()
|
import os
from contextlib import contextmanager
from typing import Generator
from alembic import command # type: ignore
from alembic.config import Config as AlembicConfig # type: ignore
from aiohttp_storage import StorageConfig
@contextmanager
def storage(config: StorageConfig, root: str) -> Generator[None, None, None]:
migrations_root = os.path.join(root, "migrations")
config_path = os.path.join(migrations_root, "alembic.ini")
migrations_config = AlembicConfig(config_path)
migrations_config.set_main_option("script_location", migrations_root)
migrations_config.set_main_option("sqlalchemy.url", config.uri)
command.upgrade(migrations_config, "head")
yield
command.downgrade(migrations_config, "base")
|
# psdtags/psdtags.py
# Copyright (c) 2022, Christoph Gohlke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write layered TIFF ImageSourceData and ImageResources tags.
Psdtags is a Python library to read and write the Adobe Photoshop(r) specific
ImageResources (#34377) and ImageSourceData (#37724) TIFF tags, which contain
image resource blocks, layer and mask information found in a typical layered
TIFF file created by Photoshop.
The format is specified in the
`Adobe Photoshop TIFF Technical Notes (March 22, 2002)
<https://www.adobe.io/open/standards/TIFF.html>`_
and
`Adobe Photoshop File Formats Specification (November 2019)
<https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/>`_.
Adobe Photoshop is a registered trademark of Adobe Systems Inc.
:Author:
`Christoph Gohlke <https://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:License: BSD 3-Clause
:Version: 2022.2.11
:Status: Alpha
Requirements
------------
This release has been tested with the following requirements and dependencies
(other versions may work):
* `CPython 3.8.10, 3.9.10, 3.10.2 64-bit <https://www.python.org>`_
* `Numpy 1.21.5 <https://pypi.org/project/numpy/>`_
* `Imagecodecs 2021.11.20 <https://pypi.org/project/imagecodecs/>`_ (optional)
* `Tifffile 2022.2.9 <https://pypi.org/project/tifffile/>`_ (optional)
* `Matplotlib 3.4.3 <https://pypi.org/project/matplotlib/>`_ (optional)
Revisions
---------
2022.2.11
Fix struct padding.
Support TiffImageResources.
2022.2.2
Various API changes (breaking).
Handle additional layer information.
Preserve structures of unknown format as opaque bytes.
Add options to skip tag structures of unknown format.
Add abstract base class for tag structures.
Add classes for many structures.
2022.1.18
Various API changes (breaking).
Various fixes for writing TiffImageSourceData.
Support filter masks.
Add option to change channel compression on write.
Warn when skipping ResourceKey sections.
2022.1.14
Initial release.
Notes
-----
The API is not stable yet and might change between revisions.
This module has been tested with a limited number of files only.
Additional layer information is not yet supported.
Consider `psd-tools <https://github.com/psd-tools/psd-tools>`_ and
`pytoshop <https://github.com/mdboom/pytoshop>`_ for working with
Adobe Photoshop PSD files.
Examples
--------
Read the ImageSourceData tag value from a layered TIFF file and iterate over
all the channels:
>>> isd = TiffImageSourceData.fromtiff('layered.tif')
>>> for layer in isd.layers:
... layer.name
... for channel in layer.channels:
... ch = channel.data # a numpy array
'Background'
'Reflect1'
'Reflect2'
'image'
'Layer 1'
'ORight'
'I'
'IShadow'
'O'
Read the ImageResources tag value from the TIFF file, iterate over the blocks,
and get the thumbnail image:
>>> res = TiffImageResources.fromtiff('layered.tif')
>>> for block in res.blocks:
... blockname = block.name
>>> res.thumbnail().shape
(90, 160, 3)
Write the image, ImageSourceData and ImageResources to a new layered TIFF file:
>>> from tifffile import imread, imwrite
>>> image = imread('layered.tif')
>>> imwrite(
... '_layered.tif',
... image,
... byteorder=isd.byteorder, # must match ImageSourceData
... photometric='rgb', # must match ImageSourceData
... metadata=None, # do not write any tifffile specific metadata
... extratags=[isd.tifftag(), res.tifftag()],
... )
Verify that the new layered TIFF file contains readable ImageSourceData:
>>> assert isd == TiffImageSourceData.fromtiff('_layered.tif')
>>> assert res == TiffImageResources.fromtiff('_layered.tif')
To view the layer and mask information as well as the image resource blocks in
a layered TIFF file from a command line, run::
python -m psdtags layered.tif
"""
from __future__ import annotations
__version__ = '2022.2.11'
__all__ = [
'PsdBlendMode',
'PsdBoolean',
'PsdBytesBlock',
'PsdChannel',
'PsdChannelId',
'PsdClippingType',
'PsdColorBlock',
'PsdColorSpaceType',
'PsdColorType',
'PsdCompressionType',
'PsdEmpty',
'PsdExposure',
'PsdFilterMask',
'PsdFormat',
'PsdImageMode',
'PsdInteger',
'PsdKey',
'PsdKeyABC',
'PsdLayer',
'PsdLayerFlag',
'PsdLayerMask',
'PsdLayerMaskFlag',
'PsdLayerMaskParameterFlag',
'PsdLayers',
'PsdMetadataSetting',
'PsdPascalString',
'PsdPascalStringBlock',
'PsdPascalStringsBlock',
'PsdPatterns',
'PsdPoint',
'PsdRectangle',
'PsdReferencePoint',
'PsdResourceBlockABC',
'PsdResourceId',
'PsdSectionDividerSetting',
'PsdSectionDividerType',
'PsdSheetColorSetting',
'PsdString',
'PsdStringBlock',
'PsdStringsBlock',
'PsdTextEngineData',
'PsdThumbnailBlock',
'PsdUnicodeString',
'PsdUnknown',
'PsdUserMask',
'PsdVersionBlock',
'PsdVirtualMemoryArray',
'PsdVirtualMemoryArrayList',
'PsdWord',
'TiffImageResources',
'TiffImageSourceData',
'read_tifftag',
'read_psdblocks',
'read_psdtags',
'write_psdblocks',
'write_psdtags',
]
import sys
import os
import io
import enum
import struct
import zlib
import dataclasses
import abc
import numpy
from typing import cast, Any, BinaryIO, Iterable, Literal, NamedTuple, Type
class BytesEnumMeta(enum.EnumMeta):
"""Metaclass for bytes enums."""
def __contains__(cls, value: object) -> bool:
try:
cls(value)
except ValueError:
return False
else:
return True
def __call__(cls, *args, **kwds) -> Any:
try:
# big endian
c = enum.EnumMeta.__call__(cls, *args, **kwds)
except ValueError as exc:
try:
# little endian
if args:
args = (args[0][::-1],) + args[1:]
c = enum.EnumMeta.__call__(cls, *args, **kwds)
except Exception:
raise exc
return c
class BytesEnum(bytes, enum.Enum, metaclass=BytesEnumMeta):
"""Base class for bytes enums."""
def tobytes(self, byteorder: str = '>') -> bytes:
"""Return enum value as bytes."""
return self.value if byteorder == '>' else self.value[::-1]
def write(self, fh: BinaryIO, byteorder: str = '>', /) -> int:
"""Write enum value to open file."""
return fh.write(self.value if byteorder == '>' else self.value[::-1])
class PsdKey(BytesEnum):
"""Keys of tagged structures."""
ALPHA = b'Alph'
ANIMATION_EFFECTS = b'anFX'
ANNOTATIONS = b'Anno'
ARTBOARD_DATA = b'artb'
ARTBOARD_DATA_2 = b'artd'
ARTBOARD_DATA_3 = b'abdd'
BLACK_AND_WHITE = b'blwh'
BLEND_CLIPPING_ELEMENTS = b'clbl'
BLEND_INTERIOR_ELEMENTS = b'infx'
BRIGHTNESS_AND_CONTRAST = b'brit'
CHANNEL_BLENDING_RESTRICTIONS_SETTING = b'brst'
CHANNEL_MIXER = b'mixr'
COLOR_BALANCE = b'blnc'
COLOR_LOOKUP = b'clrL'
COMPOSITOR_USED = b'cinf'
CONTENT_GENERATOR_EXTRA_DATA = b'CgEd'
CURVES = b'curv'
EFFECTS_LAYER = b'lrFX'
EXPOSURE = b'expA'
FILTER_EFFECTS = b'FXid'
FILTER_EFFECTS_2 = b'FEid'
FILTER_MASK = b'FMsk'
FOREIGN_EFFECT_ID = b'ffxi'
GRADIENT_FILL_SETTING = b'GdFl'
GRADIENT_MAP = b'grdm'
HUE_SATURATION = b'hue2'
HUE_SATURATION_PS4 = b'hue '
INVERT = b'nvrt'
KNOCKOUT_SETTING = b'knko'
LAYER = b'Layr'
LAYER_16 = b'Lr16'
LAYER_32 = b'Lr32'
LAYER_ID = b'lyid'
LAYER_MASK_AS_GLOBAL_MASK = b'lmgm'
LAYER_NAME_SOURCE_SETTING = b'lnsr'
LAYER_VERSION = b'lyvr'
LEVELS = b'levl'
LINKED_LAYER = b'lnkD'
LINKED_LAYER_2 = b'lnk2'
LINKED_LAYER_3 = b'lnk3'
LINKED_LAYER_EXTERNAL = b'lnkE'
METADATA_SETTING = b'shmd'
NESTED_SECTION_DIVIDER_SETTING = b'lsdk'
OBJECT_BASED_EFFECTS_LAYER_INFO = b'lfx2'
PATT = b'patt'
PATTERNS = b'Patt'
PATTERNS_2 = b'Pat2'
PATTERNS_3 = b'Pat3'
PATTERN_DATA = b'shpa'
PATTERN_FILL_SETTING = b'PtFl'
PHOTO_FILTER = b'phfl'
PIXEL_SOURCE_DATA = b'PxSc'
PIXEL_SOURCE_DATA_CC15 = b'PxSD'
PLACED_LAYER = b'plLd'
PLACED_LAYER_CS3 = b'PlLd'
POSTERIZE = b'post'
PROTECTED_SETTING = b'lspf'
REFERENCE_POINT = b'fxrp'
SAVING_MERGED_TRANSPARENCY = b'Mtrn'
SAVING_MERGED_TRANSPARENCY2 = b'MTrn'
SAVING_MERGED_TRANSPARENCY_16 = b'Mt16'
SAVING_MERGED_TRANSPARENCY_32 = b'Mt32'
SECTION_DIVIDER_SETTING = b'lsct'
SELECTIVE_COLOR = b'selc'
SHEET_COLOR_SETTING = b'lclr'
SMART_OBJECT_LAYER_DATA = b'SoLd'
SMART_OBJECT_LAYER_DATA_CC15 = b'SoLE'
SOLID_COLOR_SHEET_SETTING = b'SoCo'
TEXT_ENGINE_DATA = b'Txt2'
THRESHOLD = b'thrs'
TRANSPARENCY_SHAPES_LAYER = b'tsly'
TYPE_TOOL_INFO = b'tySh'
TYPE_TOOL_OBJECT_SETTING = b'TySh'
UNICODE_LAYER_NAME = b'luni'
UNICODE_PATH_NAME = b'pths'
USER_MASK = b'LMsk'
USING_ALIGNED_RENDERING = b'sn2P'
VECTOR_MASK_AS_GLOBAL_MASK = b'vmgm'
VECTOR_MASK_SETTING = b'vmsk'
VECTOR_MASK_SETTING_CS6 = b'vsms'
VECTOR_ORIGINATION_DATA = b'vogk'
VECTOR_STROKE_DATA = b'vstk'
VECTOR_STROKE_CONTENT_DATA = b'vscg'
VIBRANCE = b'vibA'
class PsdResourceId(enum.IntEnum):
"""Image resource IDs."""
UNKONWN = -1
OBSOLETE_1 = 1000
MAC_PRINT_MANAGER_INFO = 1001
MAC_PAGE_FORMAT_INFO = 1002
OBSOLETE_2 = 1003
RESOLUTION_INFO = 1005
ALPHA_NAMES_PASCAL = 1006
DISPLAY_INFO_OBSOLETE = 1007
CAPTION_PASCAL = 1008
BORDER_INFO = 1009
BACKGROUND_COLOR = 1010
PRINT_FLAGS = 1011
GRAYSCALE_HALFTONING_INFO = 1012
COLOR_HALFTONING_INFO = 1013
DUOTONE_HALFTONING_INFO = 1014
GRAYSCALE_TRANSFER_FUNCTION = 1015
COLOR_TRANSFER_FUNCTION = 1016
DUOTONE_TRANSFER_FUNCTION = 1017
DUOTONE_IMAGE_INFO = 1018
EFFECTIVE_BW = 1019
OBSOLETE_3 = 1020
EPS_OPTIONS = 1021
QUICK_MASK_INFO = 1022
OBSOLETE_4 = 1023
LAYER_STATE_INFO = 1024
WORKING_PATH = 1025
LAYER_GROUP_INFO = 1026
OBSOLETE_5 = 1027
IPTC_NAA = 1028
IMAGE_MODE_RAW = 1029
JPEG_QUALITY = 1030
GRID_AND_GUIDES_INFO = 1032
THUMBNAIL_RESOURCE_PS4 = 1033
COPYRIGHT_FLAG = 1034
URL = 1035
THUMBNAIL_RESOURCE = 1036
GLOBAL_ANGLE = 1037
COLOR_SAMPLERS_RESOURCE_OBSOLETE = 1038
ICC_PROFILE = 1039
WATERMARK = 1040
ICC_UNTAGGED_PROFILE = 1041
EFFECTS_VISIBLE = 1042
SPOT_HALFTONE = 1043
IDS_SEED_NUMBER = 1044
ALPHA_NAMES_UNICODE = 1045
INDEXED_COLOR_TABLE_COUNT = 1046
TRANSPARENCY_INDEX = 1047
GLOBAL_ALTITUDE = 1049
SLICES = 1050
WORKFLOW_URL = 1051
JUMP_TO_XPEP = 1052
ALPHA_IDENTIFIERS = 1053
URL_LIST = 1054
VERSION_INFO = 1057
EXIF_DATA_1 = 1058
EXIF_DATA_3 = 1059
XMP_METADATA = 1060
CAPTION_DIGEST = 1061
PRINT_SCALE = 1062
PIXEL_ASPECT_RATIO = 1064
LAYER_COMPS = 1065
ALTERNATE_DUOTONE_COLORS = 1066
ALTERNATE_SPOT_COLORS = 1067
LAYER_SELECTION_IDS = 1069
HDR_TONING_INFO = 1070
PRINT_INFO_CS2 = 1071
LAYER_GROUPS_ENABLED_ID = 1072
COLOR_SAMPLERS_RESOURCE = 1073
MEASUREMENT_SCALE = 1074
TIMELINE_INFO = 1075
SHEET_DISCLOSURE = 1076
DISPLAY_INFO = 1077
ONION_SKINS = 1078
COUNT_INFO = 1080
PRINT_INFO_CS5 = 1082
PRINT_STYLE = 1083
MAC_NSPRINTINFO = 1084
WINDOWS_DEVMODE = 1085
AUTO_SAVE_FILE_PATH = 1086
AUTO_SAVE_FORMAT = 1087
PATH_SELECTION_STATE = 1088
PATH_INFO = 2000 # ..2997
CLIPPING_PATH_NAME = 2999
ORIGIN_PATH_INFO = 3000
PLUGIN_RESOURCE = 4000 # ..4999
IMAGE_READY_VARIABLES = 7000
IMAGE_READY_DATA_SETS = 7001
IMAGE_READY_DEFAULT_SELECTED_STATE = 7002
IMAGE_READY_7_ROLLOVER_EXPANDED_STATE = 7003
IMAGE_READY_ROLLOVER_EXPANDED_STATE = 7004
IMAGE_READY_SAVE_LAYER_SETTINGS = 7005
IMAGE_READY_VERSION = 7006
LIGHTROOM_WORKFLOW = 8000
PRINT_FLAGS_INFO = 10000
@classmethod
def _missing_(cls, value: object) -> object:
assert isinstance(value, int)
if 2000 <= value <= 2997:
obj = cls(2000) # PATH_INFO
elif 4000 <= value <= 4999:
obj = cls(4000) # PATH_INFO
else:
obj = cls(-1) # UNKONWN
obj._value_ = value
return obj
class PsdBlendMode(BytesEnum):
"""Blend modes."""
PASS_THROUGH = b'pass'
NORMAL = b'norm'
DISSOLVE = b'diss'
DARKEN = b'dark'
MULTIPLY = b'mul '
COLOR_BURN = b'idiv'
LINEAR_BURN = b'lbrn'
DARKER_COLOR = b'dkCl'
LIGHTEN = b'lite'
SCREEN = b'scrn'
COLOR_DODGE = b'div '
LINEAR_DODGE = b'lddg'
LIGHTER_COLOR = b'lgCl'
OVERLAY = b'over'
SOFT_LIGHT = b'sLit'
HARD_LIGHT = b'hLit'
VIVID_LIGHT = b'vLit'
LINEAR_LIGHT = b'lLit'
PIN_LIGHT = b'pLit'
HARD_MIX = b'hMix'
DIFFERENCE = b'diff'
EXCLUSION = b'smud'
SUBTRACT = b'fsub'
DIVIDE = b'fdiv'
HUE = b'hue '
SATURATION = b'sat '
COLOR = b'colr'
LUMINOSITY = b'lum '
class PsdColorSpaceType(enum.IntEnum):
"""Color space types."""
DUMMY = -1
RGB = 0
HSB = 1
CMYK = 2
Pantone = 3
Focoltone = 4
Trumatch = 5
Toyo = 6
Lab = 7
Gray = 8
WideCMYK = 9
HKS = 10
DIC = 11
TotalInk = 12
MonitorRGB = 13
Duotone = 14
Opacity = 15
Web = 16
GrayFloat = 17
RGBFloat = 18
OpacityFloat = 19
@classmethod
def _missing_(cls, value: object) -> object:
assert isinstance(value, int)
obj = cls(-1)
obj._value_ = value
return obj
class PsdImageMode(enum.IntEnum):
"""Image modes."""
DUMMY = -1
Bitmap = 0
Grayscale = 1
Indexed = 2
RGB = 3
CMYK = 4
Multichannel = 7
Duotone = 8
Lab = 9
@classmethod
def _missing_(cls, value: object) -> object:
assert isinstance(value, int)
obj = cls(-1)
obj._value_ = value
return obj
class PsdChannelId(enum.IntEnum):
"""Channel types."""
CHANNEL0 = 0 # red, cyan, or gray
CHANNEL1 = 1 # green or magenta
CHANNEL2 = 2 # blue or yellow
CHANNEL3 = 3 # black
CHANNEL4 = 4
CHANNEL5 = 5
CHANNEL6 = 6
CHANNEL7 = 7
CHANNEL8 = 8
CHANNEL9 = 9
TRANSPARENCY_MASK = -1
USER_LAYER_MASK = -2
REAL_USER_LAYER_MASK = -3
class PsdClippingType(enum.IntEnum):
"""Clipping types."""
BASE = 0
NON_BASE = 1
class PsdCompressionType(enum.IntEnum):
"""Image compression types."""
UNKNOWN = -1
RAW = 0
RLE = 1 # PackBits
ZIP = 2
ZIP_PREDICTED = 3
@classmethod
def _missing_(cls, value: object) -> object:
assert isinstance(value, int)
obj = cls(-1)
obj._value_ = value
return obj
class PsdLayerFlag(enum.IntFlag):
"""Layer record flags."""
TRANSPARENCY_PROTECTED = 1
VISIBLE = 2
OBSOLETE = 4
PHOTOSHOP5 = 8 # 1 for Photoshop 5.0 and later, tells if bit 4 has info
IRRELEVANT = 16 # pixel data irrelevant to appearance of document
class PsdLayerMaskFlag(enum.IntFlag):
"""Layer mask flags."""
RELATIVE = 1 # position relative to layer
DISABLED = 2 # layer mask disabled
INVERT = 4 # invert layer mask when blending (obsolete)
RENDERED = 8 # user mask actually came from rendering other data
APPLIED = 16 # user and/or vector masks have parameters applied to them
class PsdLayerMaskParameterFlag(enum.IntFlag):
"""Layer mask parameters."""
USER_DENSITY = 1 # user mask density, 1 byte
USER_FEATHER = 2 # user mask feather, 8 byte, double
VECTOR_DENSITY = 4 # vector mask density, 1 byte
VECTOR_FEATHER = 8 # vector mask feather, 8 bytes, double
class PsdColorType(enum.IntFlag):
"""Color IDs used by sheet color setting structure."""
UNKNOWN = -1
NONE = 0
RED = 1
ORANGE = 2
YELLOW = 3
GREEN = 4
BLUE = 5
VIOLET = 6
GRAY = 7
@classmethod
def _missing_(cls, value: object) -> object:
assert isinstance(value, int)
obj = cls(-1)
obj._value_ = value
return obj
class PsdSectionDividerType(enum.IntEnum):
"""Section divider setting types."""
OTHER = 0
OPEN_FOLDER = 1
CLOSED_FOLDER = 2
BOUNDING_SECTION_DIVIDER = 3
@classmethod
def _missing_(cls, value: object) -> object:
assert isinstance(value, int)
obj = cls(0)
obj._value_ = value
return obj
class PsdPoint(NamedTuple):
"""Point."""
vertical: int
horizontal: int
def __str__(self) -> str:
return str(tuple(self))
class PsdRectangle(NamedTuple):
"""Rectangle."""
top: int
left: int
bottom: int
right: int
@property
def shape(self) -> tuple[int, int]:
return (self.bottom - self.top, self.right - self.left)
@property
def offset(self) -> tuple[int, int]:
return self.top, self.left
def __bool__(self) -> bool:
return self.bottom - self.top > 0 and self.right - self.left > 0
def __str__(self) -> str:
return str(tuple(self))
@dataclasses.dataclass(repr=False)
class PsdPascalString:
"""Pascal string."""
value: str
@classmethod
def read(cls, fh: BinaryIO, pad: int = 1) -> PsdPascalString:
"""Return instance from open file."""
size = fh.read(1)[0]
if size > 255:
raise ValueError(f'invalid length of pascal string, {size} > 255')
data = fh.read(size)
if len(data) != size:
raise IOError(f'could not read enough data, {len(data)} != {size}')
value = data.decode('macroman')
fh.seek((pad - (size + 1) % pad) % pad, 1)
return cls(value=value)
def write(self, fh: BinaryIO, pad: int = 1) -> int:
"""Write Pascal string to open file."""
value = self.value[:255]
data = value.encode('macroman')
size = len(data)
fh.write(struct.pack('B', size))
fh.write(data)
pad = fh.write(b'\0' * ((pad - (size + 1) % pad) % pad))
return 1 + size + pad
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value!r})'
def __str__(self):
return self.value
@dataclasses.dataclass(repr=False)
class PsdUnicodeString:
"""Unicode string."""
value: str
@classmethod
def read(cls, fh: BinaryIO, psdformat: PsdFormat, /) -> PsdUnicodeString:
"""Return instance from open file."""
size = psdformat.read(fh, 'I') * 2
assert size >= 0
data = fh.read(size)
if len(data) != size:
raise IOError(f'could not read enough data, {len(data)} != {size}')
value = data.decode(psdformat.utf16)
if value and value[-1] == '\0':
value = value[:-1]
return cls(value=value)
def write(
self, fh: BinaryIO, psdformat: PsdFormat, /, terminate=True
) -> int:
"""Write unicode string to open file."""
value = self.value + '\0' if terminate else self.value
written = psdformat.write(fh, 'I', len(value))
written += fh.write(value.encode(psdformat.utf16))
return written
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value!r})'
def __str__(self):
return self.value
class PsdFormat(bytes, enum.Enum):
"""PSD format."""
BE32BIT = b'8BIM'
LE32BIT = b'MIB8'
BE64BIT = b'8B64'
LE64BIT = b'46B8'
@property
def byteorder(self) -> Literal['>'] | Literal['<']:
if self.value == PsdFormat.BE32BIT or self.value == PsdFormat.BE64BIT:
return '>'
return '<'
@property
def sizeformat(self) -> str:
if self.value == PsdFormat.BE32BIT:
return '>I'
if self.value == PsdFormat.LE32BIT:
return '<I'
if self.value == PsdFormat.BE64BIT:
return '>Q'
return '<Q'
@property
def utf16(self):
if self.value == PsdFormat.BE32BIT or self.value == PsdFormat.BE64BIT:
return 'UTF-16-BE'
return 'UTF-16-LE'
@property
def isb64(self):
return (
self.value == PsdFormat.BE64BIT or self.value == PsdFormat.LE64BIT
)
def read(self, fh: BinaryIO, fmt: str) -> Any:
"""Return unpacked values."""
fmt = self.byteorder + fmt
value = struct.unpack(fmt, fh.read(struct.calcsize(fmt)))
return value[0] if len(value) == 1 else value
def write(self, fh: BinaryIO, fmt: str, *values) -> int:
"""Write values to open file."""
return fh.write(struct.pack(self.byteorder + fmt, *values))
def pack(self, fmt: str, *values) -> bytes:
"""Return packed values."""
return struct.pack(self.byteorder + fmt, *values)
def read_size(self, fh: BinaryIO, key: PsdKey | None = None) -> int:
"""Return integer whose size depends on signature or key from file."""
if key is None:
fmt = self.sizeformat
elif self.isb64 and key in PSD_KEY_64BIT:
fmt = self.sizeformat # TODO: test this
else:
fmt = self.byteorder + 'I'
return struct.unpack(fmt, fh.read(struct.calcsize(fmt)))[0]
def write_size(
self, fh: BinaryIO, value: int, key: PsdKey | None = None
) -> int:
"""Write integer whose size depends on signature or key to file."""
return fh.write(self.pack_size(value, key))
def pack_size(self, value: int, key: PsdKey | None = None) -> bytes:
"""Pack integer whose size depends on signature or key."""
if key is None:
fmt = self.sizeformat
elif self.isb64 and key in PSD_KEY_64BIT:
fmt = self.sizeformat # TODO: test this
else:
fmt = self.byteorder + 'I'
return struct.pack(fmt, value)
def write_signature(self, fh: BinaryIO, signature: bytes, /) -> int:
"""Write signature to file."""
return fh.write(
signature if self.byteorder == '>' else signature[::-1]
)
def write_key(self, fh: BinaryIO, key: PsdKey, /) -> int:
"""Write signature to file."""
return fh.write(
key.value if self.byteorder == '>' else key.value[::-1]
)
class PsdKeyABC(metaclass=abc.ABCMeta):
"""Abstract base class for structures with key."""
key: PsdKey
@classmethod
@abc.abstractmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdKeyABC:
"""Return instance from open file."""
pass
@classmethod
def frombytes(
cls, data: bytes, psdformat: PsdFormat, key: PsdKey, /
) -> PsdKeyABC:
"""Return instance from bytes."""
with io.BytesIO(data) as fh:
self = cls.read(fh, psdformat, key, length=len(data))
return self
@abc.abstractmethod
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write instance values to open file."""
pass
def tobytes(self, psdformat: PsdFormat, /):
"""Return instance values as bytes."""
with io.BytesIO() as fh:
self.write(fh, psdformat)
data = fh.getvalue()
return data
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.key.name}>'
@dataclasses.dataclass(repr=False)
class PsdLayers(PsdKeyABC):
"""Sequence of PsdLayer."""
key: PsdKey
layers: list[PsdLayer] = dataclasses.field(default_factory=list)
has_transparency: bool = False
TYPES = {
PsdKey.LAYER: 'B',
PsdKey.LAYER_16: 'H',
PsdKey.LAYER_32: 'f',
}
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
unknown: bool = True,
) -> PsdLayers:
"""Return instance from open file."""
count = psdformat.read(fh, 'h')
has_transparency = count < 0
count = abs(count)
# layer records
layers = []
for _ in range(count):
layers.append(PsdLayer.read(fh, psdformat, unknown=unknown))
# channel image data
dtype = PsdLayers.TYPES[key]
shape: tuple[int, ...] = ()
for layer in layers:
for channel in layer.channels:
if channel.channelid < -1 and layer.mask is not None:
shape = layer.mask.shape
else:
shape = layer.shape
channel.read_image(fh, psdformat, shape, dtype)
return cls(
key=key,
layers=layers,
has_transparency=has_transparency,
)
@classmethod
def frombytes(
cls,
data: bytes,
psdformat: PsdFormat,
key: PsdKey,
/,
unknown: bool = True,
) -> PsdLayers:
"""Return instance from bytes."""
with io.BytesIO(data) as fh:
self = cls.read(
fh, psdformat, key, length=len(data), unknown=unknown
)
return self
def write(
self,
fh: BinaryIO,
psdformat: PsdFormat,
/,
compression: PsdCompressionType | int | None = None,
unknown: bool = True,
) -> int:
"""Write layer records and channel info data to open file."""
pos = fh.tell()
# channel count
psdformat.write(
fh, 'h', (-1 if self.has_transparency else 1) * len(self.layers)
)
# layer records
channel_image_data = []
for layer in self.layers:
data = layer.write(
fh, psdformat, compression=compression, unknown=unknown
)
channel_image_data.append(data)
# channel info data
for data in channel_image_data:
fh.write(data)
size = fh.tell() - pos
if size % 2:
# length of layers info must be multiple of 2
fh.write(b'\0')
size += 1
return size
def tobytes(
self,
psdformat: PsdFormat,
/,
compression: PsdCompressionType | int | None = None,
unknown: bool = True,
):
"""Return layer records and channel info data as bytes."""
with io.BytesIO() as fh:
self.write(fh, psdformat, compression=compression, unknown=unknown)
data = fh.getvalue()
return data
@property
def dtype(self) -> numpy.dtype:
return numpy.dtype(PsdLayers.TYPES[self.key])
@property
def shape(self) -> tuple[int, int]:
shape = [0, 0]
for layer in self.layers:
if layer.rectangle[2] > shape[0]:
shape[0] = layer.rectangle[2]
if layer.rectangle[3] > shape[1]:
shape[1] = layer.rectangle[3]
if layer.mask is not None and layer.mask.rectangle is not None:
if layer.mask.rectangle[2] > shape[0]:
shape[0] = layer.mask.rectangle[2]
if layer.mask.rectangle[3] > shape[1]:
shape[1] = layer.mask.rectangle[3]
return shape[0], shape[1]
def __bool__(self) -> bool:
return len(self.layers) > 0
def __len__(self) -> int:
return len(self.layers)
def __getitem__(self, key: int) -> PsdLayer:
return self.layers[key]
def __setitem__(self, key: int, value: PsdLayer):
self.layers[key] = value
def __iter__(self):
yield from self.layers
def __str__(self) -> str:
return indent(
repr(self),
# f'length: {len(self)}',
f'shape: {self.shape!r}',
f'dtype: {numpy.dtype(self.dtype)}',
f'has_transparency: {self.has_transparency!r}',
*self.layers,
)
@dataclasses.dataclass(repr=False)
class PsdLayer:
"""PSD layer record."""
name: str
channels: list[PsdChannel]
rectangle: PsdRectangle
mask: PsdLayerMask | None = None
opacity: int = 255
blendmode: PsdBlendMode = PsdBlendMode.NORMAL
blending_ranges: tuple[int, ...] = ()
clipping: PsdClippingType = PsdClippingType(0)
flags: PsdLayerFlag = PsdLayerFlag(0)
info: list[Any] = dataclasses.field(default_factory=list)
@classmethod
def read(
cls, fh: BinaryIO, psdformat: PsdFormat, /, unknown: bool = True
) -> PsdLayer:
"""Return instance from open file.
Channel image data must be read separately.
"""
rectangle = PsdRectangle(*psdformat.read(fh, 'iiii'))
count = psdformat.read(fh, 'H')
channels = []
for _ in range(count):
channels.append(PsdChannel.read(fh, psdformat))
signature = fh.read(4)
assert signature in (b'8BIM', b'MIB8')
blendmode = PsdBlendMode(fh.read(4))
opacity = fh.read(1)[0]
clipping = PsdClippingType(fh.read(1)[0])
flags = PsdLayerFlag(fh.read(1)[0])
filler = fh.read(1)[0]
assert filler == 0
extra_size = psdformat.read(fh, 'I')
end = fh.tell() + extra_size
# layer mask data
mask = PsdLayerMask.read(fh, psdformat)
# layer blending ranges
nbytes = psdformat.read(fh, 'I')
assert nbytes % 4 == 0
blending_ranges = psdformat.read(fh, 'i' * (nbytes // 4))
name = str(PsdPascalString.read(fh, pad=4))
info = read_psdtags(
fh, psdformat, length=end - fh.tell(), unknown=unknown, align=2
)
fh.seek(end)
return cls(
name=name,
channels=channels,
blending_ranges=blending_ranges,
mask=mask,
rectangle=rectangle,
opacity=opacity,
blendmode=blendmode,
clipping=clipping,
flags=flags,
info=info,
)
@classmethod
def frombytes(cls, data: bytes, psdformat: PsdFormat, /) -> PsdLayer:
"""Return instance from bytes."""
with io.BytesIO(data) as fh:
self = cls.read(fh, psdformat)
return self
def write(
self,
fh: BinaryIO,
psdformat: PsdFormat,
/,
compression: PsdCompressionType | int | None = None,
unknown: bool = True,
) -> bytes:
"""Write layer record to open file and return channel data records."""
psdformat.write(fh, 'iiii', *self.rectangle)
psdformat.write(fh, 'H', len(self.channels))
channel_image_data = []
for channel in self.channels:
data = channel.write(fh, psdformat, compression=compression)
channel_image_data.append(data)
psdformat.write_signature(fh, b'8BIM') # blend mode signature
psdformat.write(
fh,
'4sBBBB',
self.blendmode.tobytes(psdformat.byteorder),
self.opacity,
self.clipping.value,
self.flags,
0,
)
extra_size_pos = fh.tell()
psdformat.write(fh, 'I', 0) # placeholder
pos = fh.tell()
# layer mask data
if self.mask is None:
psdformat.write(fh, 'I', 0)
else:
size = self.mask.write(fh, psdformat)
assert size in (4, 24, 40)
# layer blending ranges
psdformat.write(fh, 'I', len(self.blending_ranges) * 4)
psdformat.write(
fh, 'i' * len(self.blending_ranges), *self.blending_ranges
)
PsdPascalString(self.name).write(fh, pad=4)
write_psdtags(fh, psdformat, compression, unknown, 2, *self.info)
extra_size = fh.tell() - pos
fh.seek(extra_size_pos)
psdformat.write(fh, 'I', extra_size)
fh.seek(extra_size, 1)
return b''.join(channel_image_data)
def tobytes(
self,
psdformat: PsdFormat,
/,
compression: PsdCompressionType | int | None = None,
unknown: bool = True,
) -> tuple[bytes, bytes]:
"""Return layer and channel data records."""
with io.BytesIO() as fh:
channel_image_data = self.write(
fh, psdformat, compression=compression, unknown=unknown
)
layer_record = fh.getvalue()
return layer_record, channel_image_data
def asarray(
self, channelid: bytes | None = None, planar: bool = False
) -> numpy.ndarray:
"""Return channel image data as numpy array."""
if channelid is not None:
datalist = [
channel.data
for channel in self.channels
if channel.channelid == channelid and channel.data is not None
]
else:
datalist = [
channel.data
for channel in self.channels
if channel.channelid >= 0 and channel.data is not None
]
for channel in self.channels:
if channel.channelid == -1 and channel.data is not None:
datalist.append(channel.data)
break
if len(datalist) == 0:
raise ValueError('no channel matching selection found')
if len(datalist) == 1:
data = datalist[0]
else:
data = numpy.stack(datalist)
if not planar:
data = numpy.moveaxis(data, 0, -1)
return data
@property
def shape(self) -> tuple[int, int]:
return self.rectangle.shape if self.rectangle else (0, 0)
@property
def offset(self) -> tuple[int, int]:
return self.rectangle.offset
@property
def has_unknowns(self):
return any(isinstance(tag, PsdUnknown) for tag in self.info)
def __eq__(self, other: object) -> bool:
return (
isinstance(other, self.__class__)
# and self.name == other.name
and self.rectangle == other.rectangle
and self.opacity == other.opacity
and self.blendmode == other.blendmode
and self.blending_ranges == other.blending_ranges
and self.clipping == other.clipping
and self.flags == other.flags
and self.mask == other.mask
and self.info == other.info
and self.channels == other.channels
)
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {str(self.name)!r}>'
def __str__(self) -> str:
return indent(
repr(self),
f'rectangle: {self.rectangle}',
f'opacity: {self.opacity}',
f'blendmode: {self.blendmode.name}',
f'clipping: {self.clipping.name}',
f'flags: {str(self.flags)}',
self.mask,
indent(f'channels[{len(self.channels)}]', *self.channels),
indent(f'info[{len(self.info)}]', *self.info),
)
@dataclasses.dataclass(repr=False)
class PsdChannel:
"""ChannelInfo and ChannelImageData."""
channelid: PsdChannelId
compression: PsdCompressionType = PsdCompressionType.RAW
data: numpy.ndarray | None = None
_data_length: int = 0
@classmethod
def read(cls, fh: BinaryIO, psdformat: PsdFormat, /) -> PsdChannel:
"""Return instance from open file.
Channel image data must be read separately using read_image.
"""
channelid = PsdChannelId(psdformat.read(fh, 'h'))
data_length = psdformat.read_size(fh)
return cls(channelid=channelid, _data_length=data_length)
@classmethod
def frombytes(cls, data: bytes, psdformat: PsdFormat, /) -> PsdChannel:
"""Return instance from bytes."""
with io.BytesIO(data) as fh:
self = cls.read(fh, psdformat)
return self
def read_image(
self,
fh: BinaryIO,
psdformat: PsdFormat,
/,
shape: tuple[int, ...],
dtype: numpy.dtype | str,
) -> None:
"""Read channel image data from open file."""
if self.data is not None:
raise RuntimeError
self.compression = PsdCompressionType(psdformat.read(fh, 'H'))
data = fh.read(self._data_length - 2)
dtype = numpy.dtype(dtype).newbyteorder(psdformat.byteorder)
rlecountfmt = psdformat.byteorder + ('I' if psdformat.isb64 else 'H')
self.data = decompress(
data, self.compression, shape, dtype, rlecountfmt
)
def tobytes(
self,
psdformat: PsdFormat,
/,
compression: PsdCompressionType | int | None = None,
) -> tuple[bytes, bytes]:
"""Return channel info and image data records."""
if self.data is None:
raise ValueError('data is None')
if compression is None:
compression = self.compression
else:
compression = PsdCompressionType(compression)
channel_image_data = psdformat.pack('H', compression)
dtype = self.data.dtype.newbyteorder(psdformat.byteorder)
if dtype.char not in PsdLayers.TYPES.values():
raise ValueError(f'dtype {dtype!r} not supported')
data = numpy.asarray(self.data, dtype=dtype)
rlecountfmt = psdformat.byteorder + ('I' if psdformat.isb64 else 'H')
channel_image_data += compress(data, compression, rlecountfmt)
channel_info = psdformat.pack('h', self.channelid)
channel_info += psdformat.pack_size(len(channel_image_data))
return channel_info, channel_image_data
def write(
self,
fh: BinaryIO,
psdformat: PsdFormat,
/,
compression: PsdCompressionType | int | None = None,
) -> bytes:
"""Write channel info record to file and return image data record."""
channel_info, channel_image_data = self.tobytes(
psdformat, compression=compression
)
fh.write(channel_info)
return channel_image_data
def __eq__(self, other: object) -> bool:
return (
isinstance(other, self.__class__)
and self.channelid == other.channelid
and numpy.array_equal(self.data, other.data) # type: ignore
# and self.compression == other.compression
)
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__}'
f' {self.channelid.name} {self.compression.name}>'
)
@dataclasses.dataclass(repr=False)
class PsdLayerMask:
"""Layer mask / adjustment layer data."""
default_color: int = 0
rectangle: PsdRectangle | None = None
flags: PsdLayerMaskFlag = PsdLayerMaskFlag(0)
user_mask_density: int | None = None
user_mask_feather: float | None = None
vector_mask_density: int | None = None
vector_mask_feather: float | None = None
real_flags: PsdLayerMaskFlag | None = None
real_background: int | None = None
real_rectangle: PsdRectangle | None = None
@classmethod
def read(cls, fh: BinaryIO, psdformat: PsdFormat, /) -> PsdLayerMask:
"""Return instance from open file."""
size = psdformat.read(fh, 'I')
if size == 0:
return cls()
rectangle = PsdRectangle(*psdformat.read(fh, 'iiii'))
default_color = fh.read(1)[0]
flags = PsdLayerMaskFlag(fh.read(1)[0])
user_mask_density = None
user_mask_feather = None
vector_mask_density = None
vector_mask_feather = None
if flags & 0b1000:
param_flags = PsdLayerMaskParameterFlag(fh.read(1)[0])
if param_flags & PsdLayerMaskParameterFlag.USER_DENSITY:
user_mask_density = fh.read(1)[0]
if param_flags & PsdLayerMaskParameterFlag.USER_FEATHER:
user_mask_feather = psdformat.read(fh, 'd')
if param_flags & PsdLayerMaskParameterFlag.VECTOR_DENSITY:
vector_mask_density = fh.read(1)[0]
if param_flags & PsdLayerMaskParameterFlag.VECTOR_FEATHER:
vector_mask_feather = psdformat.read(fh, 'd')
if size == 20:
fh.seek(2, 1) # padding
real_flags = None
real_background = None
real_rectangle = None
else:
real_flags = PsdLayerMaskFlag(fh.read(1)[0])
real_background = fh.read(1)[0]
real_rectangle = PsdRectangle(*psdformat.read(fh, 'iiii'))
return cls(
rectangle=rectangle,
default_color=default_color,
flags=flags,
user_mask_density=user_mask_density,
user_mask_feather=user_mask_feather,
vector_mask_density=vector_mask_density,
vector_mask_feather=vector_mask_feather,
real_flags=real_flags,
real_background=real_background,
real_rectangle=real_rectangle,
)
@classmethod
def frombytes(cls, data: bytes, psdformat: PsdFormat, /) -> PsdLayerMask:
"""Return instance from bytes."""
with io.BytesIO(data) as fh:
self = cls.read(fh, psdformat)
return self
def tobytes(self, psdformat: PsdFormat, /) -> bytes:
"""Return layer mask structure."""
if self.rectangle is None:
return psdformat.pack('I', 0)
flags = self.flags
param_flags = self.param_flags
if param_flags:
flags = flags | 0b1000
data = psdformat.pack('iiii', *self.rectangle)
data += psdformat.pack('B', 255 if self.default_color else 0)
data += psdformat.pack('B', flags)
if param_flags:
data += psdformat.pack('B', param_flags)
if self.user_mask_density is not None:
data += psdformat.pack('B', self.user_mask_density)
if self.user_mask_feather is not None:
data += psdformat.pack('d', self.user_mask_feather)
if self.vector_mask_density is not None:
data += psdformat.pack('B', self.vector_mask_density)
if self.vector_mask_feather is not None:
data += psdformat.pack('d', self.vector_mask_feather)
assert self.real_flags is not None
assert self.real_background is not None
assert self.real_rectangle is not None
data += psdformat.pack(
'BB4i',
self.real_flags,
self.real_background,
*self.real_rectangle,
)
else:
data += b'\0\0'
assert len(data) == 20
return psdformat.pack('I', len(data)) + data
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write layer mask structure to open file."""
return fh.write(self.tobytes(psdformat))
@property
def param_flags(self) -> PsdLayerMaskParameterFlag:
flags = 0
if self.user_mask_density is not None:
flags |= PsdLayerMaskParameterFlag.USER_DENSITY
if self.user_mask_feather is not None:
flags |= PsdLayerMaskParameterFlag.USER_FEATHER
if self.vector_mask_density is not None:
flags |= PsdLayerMaskParameterFlag.VECTOR_DENSITY
if self.vector_mask_feather is not None:
flags |= PsdLayerMaskParameterFlag.VECTOR_FEATHER
return PsdLayerMaskParameterFlag(flags)
@property
def shape(self) -> tuple[int, int]:
return self.rectangle.shape if self.rectangle else (0, 0)
@property
def offset(self) -> tuple[int, int]:
return self.rectangle.offset if self.rectangle is not None else (0, 0)
def __bool__(self) -> bool:
return self.rectangle is not None
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.rectangle}>'
def __str__(self) -> str:
if self.rectangle is None:
return repr(self)
info = [
repr(self),
# f'rectangle: {self.rectangle}',
f'default_color: {self.default_color!r}',
]
if self.flags:
info += [f'flags: {str(self.flags)}']
if self.user_mask_density is not None:
info += [f'user_mask_density: {self.user_mask_density}']
if self.user_mask_feather is not None:
info += [f'user_mask_feather: {self.user_mask_feather}']
if self.vector_mask_density is not None:
info += [f'vector_mask_density: {self.vector_mask_density}']
if self.vector_mask_feather is not None:
info += [f'vector_mask_feather: {self.vector_mask_feather}']
if self.real_flags is not None and self.real_background is not None:
info += [
f'real_background: {self.real_background!r}',
repr(self.real_rectangle),
repr(self.real_flags),
]
return indent(*info)
@dataclasses.dataclass(repr=False)
class PsdUserMask(PsdKeyABC):
"""User mask. Same as global layer mask info table."""
colorspace: PsdColorSpaceType = PsdColorSpaceType(-1)
components: tuple[int, int, int, int] = (0, 0, 0, 0)
opacity: int = 0
flag: int = 128
key = PsdKey.USER_MASK
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdUserMask:
"""Return instance from open file."""
colorspace = PsdColorSpaceType(psdformat.read(fh, 'h'))
fmt = '4h' if colorspace == PsdColorSpaceType.Lab else '4H'
components = psdformat.read(fh, fmt)
opacity = psdformat.read(fh, 'H')
flag = fh.read(1)[0]
return cls(
colorspace=colorspace,
components=components,
opacity=opacity,
flag=flag,
)
def tobytes(self, psdformat: PsdFormat, /) -> bytes:
"""Return user mask record."""
data = psdformat.pack('h', self.colorspace.value)
fmt = '4h' if self.colorspace == PsdColorSpaceType.Lab else '4H'
data += psdformat.pack(fmt, *self.components)
data += psdformat.pack('HB', self.opacity, self.flag)
data += b'\0'
return data
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write user mask record to open file."""
return fh.write(self.tobytes(psdformat))
def __str__(self) -> str:
return indent(
repr(self),
f'colorspace: {self.colorspace.name}',
f'components: {self.components}',
f'opacity: {self.opacity}',
# f'flag: {self.flag}', # always 128
)
@dataclasses.dataclass(repr=False)
class PsdFilterMask(PsdKeyABC):
"""Filter Mask (Photoshop CS3)."""
colorspace: PsdColorSpaceType
components: tuple[int, int, int, int] = (0, 0, 0, 0)
opacity: int = 0
key = PsdKey.FILTER_MASK
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdFilterMask:
"""Return instance from open file."""
colorspace = PsdColorSpaceType(psdformat.read(fh, 'h'))
fmt = '4h' if colorspace == PsdColorSpaceType.Lab else '4H'
components = psdformat.read(fh, fmt)
opacity = psdformat.read(fh, 'H')
return cls(
colorspace=colorspace,
components=components,
opacity=opacity,
)
def tobytes(self, psdformat: PsdFormat, /) -> bytes:
"""Return filter mask record."""
data = psdformat.pack('h', self.colorspace.value)
fmt = '4h' if self.colorspace == PsdColorSpaceType.Lab else '4H'
data += psdformat.pack(fmt, *self.components)
data += psdformat.pack('H', self.opacity)
return data
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write filter mask record to open file."""
return fh.write(self.tobytes(psdformat))
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.colorspace.name}>'
def __str__(self) -> str:
return indent(
repr(self),
f'components: {self.colorspace.name}',
f'components: {self.components}',
f'opacity: {self.opacity}',
)
@dataclasses.dataclass(repr=False)
class PsdPatterns(PsdKeyABC):
"""Patterns (Photoshop 6.0 and CS 8.0)."""
key: PsdKey
imagemode: PsdImageMode
name: str
guid: str
data: PsdVirtualMemoryArrayList
colortable: numpy.ndarray | None = None
point: PsdPoint = PsdPoint(0, 0)
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdPatterns:
"""Return instance from open file."""
length, version = psdformat.read(fh, 'II')
assert version == 1
imagemode = PsdImageMode(psdformat.read(fh, 'I'))
point = PsdPoint(*psdformat.read(fh, 'hh'))
name = str(PsdUnicodeString.read(fh, psdformat))
guid = str(PsdPascalString.read(fh))
if imagemode == PsdImageMode.Indexed:
colortable = numpy.frombuffer(fh.read(768), numpy.uint8, count=768)
colortable.shape = 256, 3
else:
colortable = None
data = PsdVirtualMemoryArrayList.read(fh, psdformat)
return cls(
key=key,
imagemode=imagemode,
name=name,
guid=guid,
data=data,
colortable=colortable,
point=point,
)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write patterns to open file."""
length_pos = fh.tell()
psdformat.write(fh, 'I', 0) # length placeholder
pos = fh.tell()
psdformat.write(fh, 'I', 1) # version
psdformat.write(fh, 'I', self.imagemode.value)
psdformat.write(fh, 'hh', *self.point)
PsdUnicodeString(self.name).write(fh, psdformat)
PsdPascalString(self.guid).write(fh)
if self.colortable is not None:
assert self.imagemode == PsdImageMode.Indexed
fh.write(self.colortable.tobytes())
self.data.write(fh, psdformat)
length = fh.tell() - pos
fh.seek(length_pos)
psdformat.write(fh, 'I', length)
fh.seek(length, 1)
return length + 4
def asarray(self, planar: bool = False) -> numpy.ndarray:
"""Return channel image data as numpy array."""
datalist = [channel.data for channel in self.data if channel]
if len(datalist) == 0:
raise ValueError('no channel data found')
if len(datalist) == 1:
return datalist[0]
data = numpy.stack(datalist)
if not planar:
data = numpy.moveaxis(data, 0, -1)
return data
def __str__(self) -> str:
colortable = None if self.colortable is None else self.colortable.shape
return indent(
repr(self),
f'imagemode: {self.imagemode.name}',
f'name: {str(self.name)!r}',
f'guid: {str(self.guid)!r}',
f'colortable: {colortable}',
f'point: {self.point}',
self.data,
)
@dataclasses.dataclass(repr=False)
class PsdMetadataSettings(PsdKeyABC):
"""Metadata setting (Photoshop 6.0)."""
items: list[PsdMetadataSetting] = dataclasses.field(default_factory=list)
key = PsdKey.METADATA_SETTING
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdMetadataSettings:
"""Return metadata settings from open file."""
self = cls()
count = psdformat.read(fh, 'I')
for _ in range(count):
self.items.append(PsdMetadataSetting.read(fh, psdformat))
return self
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write metadata settings to open file."""
written = psdformat.write(fh, 'I', len(self.items))
for item in self.items:
written = item.write(fh, psdformat)
return written
def __repr__(self) -> str:
return f'<{self.__class__.__name__} [{len(self.items)}]>'
def __str__(self) -> str:
return indent(repr(self), *self.items)
@dataclasses.dataclass(repr=False)
class PsdMetadataSetting:
"""Metadata setting item."""
signature: PsdFormat
key: bytes
data: bytes = b''
copyonsheet: bool = False
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
) -> PsdMetadataSetting:
"""Return metadata setting from open file."""
signature = PsdFormat(fh.read(4))
# assert signature in (b'8BIM', b'MIB8')
key = fh.read(4)
copyonsheet = psdformat.read(fh, '?xxx')
length = psdformat.read(fh, 'I')
data = fh.read(length) # TODO: parse DescriptorStructure
return cls(
signature=signature, key=key, data=data, copyonsheet=copyonsheet
)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write metadata setting to open file."""
# TODO: can the format change?
# psdformat.write_signature(fh, self.signature)
# psdformat.write_key(fh, self.key)
fh.write(self.signature.value)
fh.write(self.key)
psdformat.write(fh, '?xxxI', self.copyonsheet, len(self.data))
fh.write(self.data)
return 16 + len(self.data)
def __eq__(self, other: object) -> bool:
return (
isinstance(other, self.__class__)
# and self.signature == other.signature
and self.key == other.key
and self.copyonsheet == other.copyonsheet
and self.data == other.data
)
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} {self.key!r} {len(self.data)} bytes>'
)
@dataclasses.dataclass(repr=False)
class PsdVirtualMemoryArrayList:
"""Virtual memory array list."""
rectangle: PsdRectangle
channels: list[PsdVirtualMemoryArray] = dataclasses.field(
default_factory=list
)
@classmethod
def read(
cls, fh: BinaryIO, psdformat: PsdFormat, /
) -> PsdVirtualMemoryArrayList:
"""Return instance from open file."""
version = psdformat.read(fh, 'I')
assert version == 3
length = psdformat.read(fh, 'I')
rectangle = PsdRectangle(*psdformat.read(fh, '4I'))
channelcount = psdformat.read(fh, 'I')
channels = []
for _ in range(channelcount + 2):
channels.append(PsdVirtualMemoryArray.read(fh, psdformat))
return cls(rectangle=rectangle, channels=channels)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write virtual memory array list to open file."""
psdformat.write(fh, 'I', 3)
length_pos = fh.tell()
psdformat.write(fh, 'I', 0) # length placeholder
pos = fh.tell()
psdformat.write(fh, '4I', *self.rectangle)
psdformat.write(fh, 'I', len(self.channels) - 2)
for channel in self.channels:
channel.write(fh, psdformat)
length = fh.tell() - pos
fh.seek(length_pos)
psdformat.write(fh, 'I', length)
fh.seek(length, 1)
return length + 8
def __len__(self) -> int:
return len(self.channels)
def __getitem__(self, key: int) -> PsdVirtualMemoryArray:
return self.channels[key]
def __setitem__(self, key: int, value: PsdVirtualMemoryArray):
self.channels[key] = value
def __iter__(self):
yield from self.channels
def __repr__(self) -> str:
return f'<{self.__class__.__name__} [{len(self.channels)}]>'
def __str__(self) -> str:
channels = [
repr(channel) for channel in self.channels if channel.iswritten
]
return indent(
repr(self),
f'rectangle: {str(self.rectangle)}',
*channels,
)
@dataclasses.dataclass(repr=False)
class PsdVirtualMemoryArray:
"""Virtual memory array."""
iswritten: bool = False
depth: int | None = None
rectangle: PsdRectangle | None = None
pixeldepth: int | None = None
compression: PsdCompressionType = PsdCompressionType.RAW
data: numpy.ndarray | None = None
@classmethod
def read(
cls, fh: BinaryIO, psdformat: PsdFormat, /
) -> PsdVirtualMemoryArray:
"""Return instance from open file."""
iswritten = bool(psdformat.read(fh, 'I'))
if not iswritten:
return cls(iswritten=iswritten)
length = psdformat.read(fh, 'I')
if length == 0:
return cls(iswritten=iswritten)
depth = psdformat.read(fh, 'I')
rectangle = PsdRectangle(*psdformat.read(fh, '4I'))
pixeldepth = psdformat.read(fh, 'H')
compression = PsdCompressionType(psdformat.read(fh, 'B'))
dtype = numpy.dtype(
{8: 'B', 16: 'H', 32: 'f'}[pixeldepth]
).newbyteorder(psdformat.byteorder)
data = decompress(
fh.read(length - 23),
compression,
rectangle.shape,
dtype,
psdformat.byteorder + 'H',
)
return cls(
iswritten=iswritten,
depth=depth,
rectangle=rectangle,
pixeldepth=pixeldepth,
compression=compression,
data=data,
)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write virtual memory array to open file."""
start = fh.tell()
psdformat.write(fh, 'I', self.iswritten)
if not self.iswritten:
return fh.tell() - start
if (
self.depth is None
or self.rectangle is None
or self.pixeldepth is None
or self.data is None
):
psdformat.write(fh, 'I', 0)
return fh.tell() - start
length_pos = fh.tell()
psdformat.write(fh, 'I', 0) # length placeholder
pos = fh.tell()
psdformat.write(fh, 'I', self.depth)
psdformat.write(fh, '4I', *self.rectangle)
psdformat.write(fh, 'H', self.pixeldepth)
psdformat.write(fh, 'B', self.compression)
data = compress(
self.data,
self.compression,
psdformat.byteorder + 'H',
)
fh.write(data)
length = fh.tell() - pos
fh.seek(length_pos)
psdformat.write(fh, 'I', length)
fh.seek(length, 1)
return fh.tell() - start
@property
def dtype(self) -> numpy.dtype:
if self.pixeldepth is None:
return numpy.dtype('B')
return numpy.dtype({8: 'B', 16: 'H', 32: 'f'}[self.pixeldepth])
@property
def shape(self) -> tuple[int, int]:
return self.rectangle.shape if self.rectangle else (0, 0)
@property
def offset(self) -> tuple[int, int]:
return self.rectangle.offset if self.rectangle else (0, 0)
def __bool__(self) -> bool:
return self.iswritten and bool(self.rectangle)
def __eq__(self, other: object) -> bool:
return (
isinstance(other, self.__class__)
and self.iswritten is other.iswritten
and self.depth == other.depth
and self.pixeldepth == other.pixeldepth
and self.rectangle == other.rectangle
and numpy.array_equal(self.data, other.data) # type: ignore
# and self.compression == other.compression
)
def __repr__(self) -> str:
if not self.iswritten:
return f'<{self.__class__.__name__} notwritten>'
if self.rectangle is None:
return f'<{self.__class__.__name__} empty>'
return (
f'<{self.__class__.__name__} {str(self.rectangle.shape)} '
f'{self.dtype} {self.compression.name}>'
)
def __str__(self) -> str:
if not self.iswritten or self.rectangle is None:
return repr(self)
return indent(
repr(self),
f'rectangle: {str(self.rectangle)}',
f'depth: {self.depth}',
f'pixeldepth: {self.pixeldepth}',
f'compression: {self.compression.name}',
)
@dataclasses.dataclass(repr=False)
class PsdSectionDividerSetting(PsdKeyABC):
"""Section divider setting (Photoshop 6.0)."""
kind: PsdSectionDividerType
blendmode: PsdBlendMode | None = None
subtype: int | None = None
key = PsdKey.SECTION_DIVIDER_SETTING
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdSectionDividerSetting:
"""Return instance from open file."""
kind = PsdSectionDividerType(psdformat.read(fh, 'I'))
if length < 12:
return cls(kind=kind)
signature = fh.read(4)
assert signature in (b'8BIM', b'MIB8')
blendmode = PsdBlendMode(fh.read(4))
if length < 16:
return cls(kind=kind, blendmode=blendmode)
subtype = psdformat.read(fh, 'I')
return cls(kind=kind, blendmode=blendmode, subtype=subtype)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write section divider settin to open file."""
psdformat.write(fh, 'I', self.kind.value)
if self.blendmode is None:
return 4
psdformat.write_signature(fh, b'8BIM')
psdformat.write_signature(fh, self.blendmode.value)
if self.subtype is None:
return 12
psdformat.write(fh, 'I', self.subtype)
return 16
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.kind.name}>'
@dataclasses.dataclass(repr=False)
class PsdSheetColorSetting(PsdKeyABC):
"""Sheet color setting (Photoshop 6.0)."""
color: PsdColorType
key = PsdKey.SHEET_COLOR_SETTING
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdSheetColorSetting:
"""Return instance from open file."""
color = PsdColorType(psdformat.read(fh, 'H6x'))
return cls(color=color)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write color setting to open file."""
psdformat.write(fh, 'H6x', self.color.value)
return 8
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.color.name}>'
@dataclasses.dataclass(repr=False)
class PsdReferencePoint(PsdKeyABC):
"""Reference point."""
point: tuple[float, float]
key = PsdKey.REFERENCE_POINT
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdReferencePoint:
"""Return instance from open file."""
return cls(point=psdformat.read(fh, 'dd'))
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write reference point to open file."""
return psdformat.write(fh, 'dd', *self.point)
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.point!r}>'
@dataclasses.dataclass(repr=False)
class PsdExposure(PsdKeyABC):
"""Exposure."""
exposure: float
offset: float
gamma: float
key = PsdKey.EXPOSURE
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdExposure:
"""Return exposure from open file."""
version, exposure, offset, gamma = psdformat.read(fh, 'Hfff')
assert version == 1
return cls(exposure=exposure, offset=offset, gamma=gamma)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write exposure to open file."""
return psdformat.write(
fh, 'Hfff', 1, self.exposure, self.offset, self.gamma
)
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} '
f'{self.exposure}, {self.offset}, {self.gamma}>'
)
@dataclasses.dataclass(repr=False)
class PsdTextEngineData(PsdKeyABC):
"""Text Engine Data (Photoshop CS3)."""
data: bytes
key = PsdKey.TEXT_ENGINE_DATA
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdTextEngineData:
"""Return instance from open file."""
length = psdformat.read(fh, 'I')
return cls(data=fh.read(length))
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write unicode string to open file."""
written = psdformat.write(fh, 'I', len(self.data))
written += fh.write(self.data)
return written
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {len(self.data)} bytes>'
@dataclasses.dataclass(repr=False)
class PsdString(PsdKeyABC):
"""Unicode string."""
key: PsdKey
value: str
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdString:
"""Return instance from open file."""
value = str(PsdUnicodeString.read(fh, psdformat))
return cls(key=key, value=value)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write unicode string to open file."""
return PsdUnicodeString(self.value).write(
fh, psdformat, terminate=False
)
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} {self.key.name} ' f'{self.value!r}>'
)
@dataclasses.dataclass(repr=False)
class PsdBoolean(PsdKeyABC):
"""Boolean."""
key: PsdKey
value: bool
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdBoolean:
"""Return instance from open file."""
value = bool(fh.read(1))
fh.read(3)
return cls(key=key, value=value)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write boolean to open file."""
return fh.write(b'\1\0\0\0' if self.value else b'\0\0\0\0')
def __bool__(self) -> bool:
return self.value
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} {self.key.name} ' f'{self.value!r}>'
)
@dataclasses.dataclass(repr=False)
class PsdInteger(PsdKeyABC):
"""4 Byte Integer."""
key: PsdKey
value: int
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdInteger:
"""Return instance from open file."""
value = psdformat.read(fh, 'i')
return cls(key=key, value=value)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write integer to open file."""
return psdformat.write(fh, 'i', self.value)
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} {self.key.name} ' f'{self.value!r}>'
)
@dataclasses.dataclass(repr=False)
class PsdWord(PsdKeyABC):
"""Four bytes."""
key: PsdKey
value: bytes
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdWord:
"""Return instance from open file."""
return cls(key=key, value=fh.read(4))
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write four bytes value to open file."""
return fh.write(self.value)
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} {self.key.name} ' f'{self.value!r}>'
)
@dataclasses.dataclass(repr=False)
class PsdUnknown(PsdKeyABC):
"""Unknown keys stored as opaque bytes."""
key: PsdKey
psdformat: PsdFormat
value: bytes
@classmethod
def frombytes(
cls, data: bytes, psdformat: PsdFormat, key: PsdKey, /
) -> PsdUnknown:
"""Return instance from bytes."""
return cls(key=key, psdformat=psdformat, value=data)
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdUnknown:
"""Return instance from open file."""
return cls(key=key, psdformat=psdformat, value=fh.read(length))
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write opaque binary value to open file."""
if len(self.value) <= 1 or self.psdformat != psdformat:
raise ValueError(f'can not write opaque bytes as {psdformat}')
return fh.write(self.value)
def tobytes(self, psdformat: PsdFormat, /) -> bytes:
"""Return opaque binary value."""
if len(self.value) <= 1 or self.psdformat != psdformat:
raise ValueError(f'can not write opaque bytes as{psdformat}')
return self.value
def __eq__(self, other: object) -> bool:
return (
isinstance(other, self.__class__)
and self.key == other.key
and (len(self.value) <= 1 or self.psdformat == other.psdformat)
and self.value == other.value
)
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} {self.key.name} '
f'{len(self.value)!r} bytes>'
)
@dataclasses.dataclass(repr=False)
class PsdEmpty(PsdKeyABC):
"""Empty structure, no data associated with key."""
key: PsdKey
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
key: PsdKey,
/,
length: int,
) -> PsdEmpty:
"""Return instance from open file."""
assert length == 0
return cls(key=key)
@classmethod
def frombytes(
cls, data: bytes, psdformat: PsdFormat, key: PsdKey, /
) -> PsdEmpty:
"""Return instance from bytes."""
assert len(data) == 0
return cls(key=key)
def tobytes(self, psdformat: PsdFormat, /) -> bytes:
"""Return empty byte string."""
return b''
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write nothing to open file."""
return 0
class PsdResourceBlockABC(metaclass=abc.ABCMeta):
"""Abstract base class for image resource block data."""
resourceid: PsdResourceId
name: str
@classmethod
@abc.abstractmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdResourceBlockABC:
"""Return instance from open file."""
pass
@classmethod
def frombytes(
cls,
data: bytes,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
) -> PsdResourceBlockABC:
"""Return instance from bytes."""
with io.BytesIO(data) as fh:
self = cls.read(
fh, psdformat, resourceid, name=name, length=len(data)
)
return self
@abc.abstractmethod
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write instance values to open file."""
pass
def tobytes(self, psdformat: PsdFormat, /):
"""Return instance values as bytes."""
with io.BytesIO() as fh:
self.write(fh, psdformat)
data = fh.getvalue()
return data
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} {self.resourceid.name} '
f'{self.resourceid.value}>'
)
@dataclasses.dataclass(repr=False)
class PsdBytesBlock(PsdResourceBlockABC):
"""Image resource blocks stored as opaque bytes."""
resourceid: PsdResourceId
name: str
value: bytes
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdBytesBlock:
"""Return instance from open file."""
value = fh.read(length)
return cls(resourceid=resourceid, name=name, value=value)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write instance values to open file."""
return fh.write(self.value)
@dataclasses.dataclass(repr=False)
class PsdVersionBlock(PsdResourceBlockABC):
"""Image resource blocks stored as opaque bytes."""
resourceid: PsdResourceId
name: str
version: int
file_version: int
writer_name: str
reader_name: str
has_real_merged_data: bool
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdVersionBlock:
"""Return instance from open file."""
version = psdformat.read(fh, 'I')
has_real_merged_data = bool(fh.read(1))
writer_name = str(PsdUnicodeString.read(fh, psdformat))
reader_name = str(PsdUnicodeString.read(fh, psdformat))
file_version = psdformat.read(fh, 'I')
return cls(
resourceid=resourceid,
name=name,
version=version,
file_version=file_version,
writer_name=writer_name,
reader_name=reader_name,
has_real_merged_data=has_real_merged_data,
)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write instance values to open file."""
written = 0
written += psdformat.write(fh, 'I', self.version)
written += fh.write(b'\1' if self.has_real_merged_data else b'\0')
written += PsdUnicodeString(self.writer_name).write(fh, psdformat)
written += PsdUnicodeString(self.reader_name).write(fh, psdformat)
written += psdformat.write(fh, 'I', self.file_version)
return written
def __str__(self) -> str:
return indent(
repr(self),
f'version: {self.version}',
f'file_version: {self.file_version}',
f'writer_name: {self.writer_name}',
f'reader_name: {self.reader_name}',
f'has_real_merged_data: {self.has_real_merged_data}',
)
@dataclasses.dataclass(repr=False)
class PsdStringBlock(PsdResourceBlockABC):
"""Unicode string."""
resourceid: PsdResourceId
name: str
value: str
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdStringBlock:
"""Return instance from open file."""
value = str(PsdUnicodeString.read(fh, psdformat))
return cls(resourceid=resourceid, name=name, value=value)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write Pascal string to open file."""
return PsdUnicodeString(self.value).write(fh, psdformat)
def __str__(self) -> str:
return indent(repr(self), self.value)
@dataclasses.dataclass(repr=False)
class PsdStringsBlock(PsdResourceBlockABC):
"""Series of Unicode strings."""
resourceid: PsdResourceId
name: str
values: list[str]
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdStringsBlock:
"""Return instance from open file."""
values = []
pos = fh.tell()
while fh.tell() - pos < length:
values.append(str(PsdUnicodeString.read(fh, psdformat)))
return cls(resourceid=resourceid, name=name, values=values)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write sequence of Unicode strings to open file."""
written = 0
for value in self.values:
written += PsdUnicodeString(value).write(fh, psdformat)
return written
def __str__(self) -> str:
return indent(repr(self), *self.values)
@dataclasses.dataclass(repr=False)
class PsdPascalStringBlock(PsdResourceBlockABC):
"""Pascal string."""
resourceid: PsdResourceId
name: str
value: str
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdPascalStringBlock:
"""Return instance from open file."""
value = str(PsdPascalString.read(fh, pad=2))
return cls(resourceid=resourceid, name=name, value=value)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write Pascal string to open file."""
return PsdPascalString(self.value).write(fh, pad=2)
def __str__(self) -> str:
return indent(repr(self), self.value)
@dataclasses.dataclass(repr=False)
class PsdPascalStringsBlock(PsdResourceBlockABC):
"""Series of Pascal strings."""
resourceid: PsdResourceId
name: str
values: list[str]
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdPascalStringsBlock:
"""Return instance from open file."""
values = []
pos = fh.tell()
while fh.tell() - pos < length:
values.append(str(PsdPascalString.read(fh, pad=1)))
return cls(resourceid=resourceid, name=name, values=values)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write sequence of Pascal strings to open file."""
written = 0
for value in self.values:
written += PsdPascalString(value).write(fh, pad=1)
return written
def __str__(self) -> str:
return indent(repr(self), *self.values)
@dataclasses.dataclass(repr=False)
class PsdColorBlock(PsdResourceBlockABC):
"""Color structure."""
resourceid: PsdResourceId
name: str
colorspace: PsdColorSpaceType
components: tuple[int, int, int, int] = (0, 0, 0, 0)
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdColorBlock:
"""Return instance from open file."""
colorspace = PsdColorSpaceType(psdformat.read(fh, 'h'))
fmt = '4h' if colorspace == PsdColorSpaceType.Lab else '4H'
components = psdformat.read(fh, fmt)
return cls(
resourceid=resourceid,
name=name,
colorspace=colorspace,
components=components,
)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write instance values to open file."""
fmt = 'h4h' if self.colorspace == PsdColorSpaceType.Lab else 'h4H'
return psdformat.write(fh, fmt, self.colorspace, *self.components)
def __str__(self) -> str:
return indent(
repr(self),
f'colorspace: {self.colorspace.name}',
f'components: {self.components}',
)
@dataclasses.dataclass(repr=False)
class PsdThumbnailBlock(PsdResourceBlockABC):
"""Thumbnail resource format."""
resourceid: PsdResourceId
name: str
format: int
width: int
height: int
rawdata: bytes
@classmethod
def read(
cls,
fh: BinaryIO,
psdformat: PsdFormat,
resourceid: PsdResourceId,
/,
name: str,
length: int,
) -> PsdThumbnailBlock:
"""Return instance from open file."""
(
fmt,
width,
height,
widthbytes,
size,
size_compressed,
bitsperpixel,
planes,
) = psdformat.read(fh, 'IIIIIIHH')
assert bitsperpixel == 24
assert planes == 1
assert widthbytes == (width * bitsperpixel + 31) // 32 * 4
assert size == widthbytes * height * planes
assert size_compressed == length - 28
rawdata = fh.read(length - 28)
return cls(
resourceid=resourceid,
name=name,
format=fmt,
width=width,
height=height,
rawdata=rawdata,
)
def write(self, fh: BinaryIO, psdformat: PsdFormat, /) -> int:
"""Write Thumbnail resource format to open file."""
planes = 1
bitsperpixel = 24
widthbytes = (self.width * bitsperpixel + 31) // 32 * 4
size = widthbytes * self.height * planes
size_compressed = len(self.rawdata)
psdformat.write(
fh,
'IIIIIIHH',
self.format,
self.width,
self.height,
widthbytes,
size,
size_compressed,
bitsperpixel,
planes,
)
return 28 + fh.write(self.rawdata)
@property
def is_bgr(self) -> bool:
return self.resourceid.value == 1033
@property
def data(self) -> numpy.ndarray:
if self.format == 0:
# kRawRGB
data = numpy.frombuffer(self.rawdata, dtype=numpy.uint8)
data.shape = (self.height, (self.width * 24 + 31) // 32 * 4)
data = data[:, : self.width * 3]
data = data.reshape(self.height, self.width, 3)
elif self.format == 1:
# kJpegRGB
from imagecodecs import jpeg8_decode
data = jpeg8_decode(self.rawdata)
assert data.shape == (self.height, self.width, 3)
else:
raise ValueError(f'unknown PsdThumbnailBlock format {format!r}')
return data
def __str__(self) -> str:
return indent(
repr(self),
f'format: {self.format}',
f'shape: ({self.height}, {self.width}, 3)',
)
@dataclasses.dataclass(repr=False)
class TiffImageSourceData:
"""TIFF ImageSourceData tag #37724."""
psdformat: PsdFormat
layers: PsdLayers
usermask: PsdUserMask
info: list[PsdKeyABC] = dataclasses.field(default_factory=list)
name: str | None = None
SIGNATURE = b'Adobe Photoshop Document Data Block\0'
@classmethod
def read(
cls, fh: BinaryIO, /, name: str | None = None, unknown: bool = True
) -> TiffImageSourceData:
"""Return instance from open file."""
name = type(fh).__name__ if name is None else name
signature = fh.read(len(TiffImageSourceData.SIGNATURE))
if signature != TiffImageSourceData.SIGNATURE:
raise ValueError(f'invalid ImageResourceData {signature!r}')
signature = fh.read(4)
if len(signature) == 0:
return cls(
psdformat=PsdFormat.BE32BIT,
layers=PsdLayers(PsdKey.LAYER),
usermask=PsdUserMask(),
name=name,
)
psdformat = PsdFormat(signature)
fh.seek(-4, 1)
layers = None
usermask = None
info: list[PsdKeyABC] = []
while fh.read(4) == psdformat:
key = PsdKey(fh.read(4))
size = psdformat.read_size(fh, key)
pos = fh.tell()
if size == 0:
info.append(PsdEmpty(key))
elif key in PsdLayers.TYPES and layers is None:
layers = PsdLayers.read(
fh, psdformat, key, length=size, unknown=unknown
)
elif key == PsdKey.USER_MASK and usermask is None:
usermask = PsdUserMask.read(fh, psdformat, key, length=size)
elif key in PSD_KEY_TYPE:
info.append(
PSD_KEY_TYPE[key].read(fh, psdformat, key, length=size)
)
elif unknown:
info.append(PsdUnknown.read(fh, psdformat, key, length=size))
# log_warning(
# f"<TiffImageSourceData '{name}'> skipped {size} bytes "
# f"in {key.value.decode()!r} info"
# )
size += (4 - size % 4) % 4
fh.seek(pos + size)
if layers is None:
log_warning(f'<{cls.__name__} {name!r}> contains no layers')
layers = PsdLayers(PsdKey.LAYER)
if usermask is None:
log_warning(f'<{cls.__name__} {name!r}> contains no usermask')
usermask = PsdUserMask()
return cls(
psdformat=psdformat,
name=name,
layers=layers,
usermask=usermask,
info=info,
)
@classmethod
def frombytes(
cls, data: bytes, /, name: str | None = None, unknown: bool = True
) -> TiffImageSourceData:
"""Return instance from bytes."""
with io.BytesIO(data) as fh:
self = cls.read(fh, name=name, unknown=unknown)
return self
@classmethod
def fromtiff(
cls,
filename: os.PathLike | str,
/,
pageindex: int = 0,
unknown: bool = True,
) -> TiffImageSourceData:
"""Return instance from TIFF file."""
data = read_tifftag(filename, 37724, pageindex=pageindex)
if data is None:
raise ValueError('TIFF file contains no ImageSourceData tag')
return cls.frombytes(
data, name=os.path.split(filename)[-1], unknown=unknown
)
def write(
self,
fh: BinaryIO,
/,
psdformat: PsdFormat | bytes | None = None,
compression: PsdCompressionType | int | None = None,
unknown: bool = True,
) -> int:
"""Write ImageResourceData tag value to open file."""
psdformat = (
self.psdformat if psdformat is None else PsdFormat(psdformat)
)
written = fh.write(TiffImageSourceData.SIGNATURE)
written += write_psdtags(
fh,
psdformat,
compression,
unknown,
4,
self.layers,
self.usermask,
*self.info,
)
return written
@property
def byteorder(self):
return self.psdformat.byteorder
def tobytes(
self,
psdformat: PsdFormat | bytes | None = None,
compression: PsdCompressionType | int | None = None,
unknown: bool = True,
) -> bytes:
"""Return ImageResourceData tag value as bytes."""
with io.BytesIO() as fh:
self.write(
fh,
psdformat,
compression=compression,
unknown=unknown,
)
value = fh.getvalue()
return value
def tifftag(
self,
psdformat: PsdFormat | bytes | None = None,
compression: PsdCompressionType | int | None = None,
unknown: bool = True,
) -> tuple[int, int, int, bytes, bool]:
"""Return tifffile.TiffWriter.write extratags item."""
value = self.tobytes(
psdformat, compression=compression, unknown=unknown
)
return 37724, 7, len(value), value, True
def has_unknowns(self):
return any(isinstance(tag, PsdUnknown) for tag in self.info) or any(
layer.has_unknowns() for layer in self.layers
)
def __eq__(self, other: object) -> bool:
return (
isinstance(other, self.__class__)
and self.layers == other.layers
and self.info == other.info
# and self.name == other.name
# and self.psdformat == other.psdformat
)
def __bool__(self) -> bool:
return bool(self.layers) or len(self.info) > 1
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.name!r}>'
def __str__(self) -> str:
if not self.psdformat:
return repr(self)
return indent(
repr(self),
repr(self.psdformat),
self.layers,
self.usermask,
*self.info,
)
@dataclasses.dataclass(repr=False)
class TiffImageResources:
"""TIFF ImageResources tag #34377."""
psdformat: PsdFormat
blocks: list[PsdResourceBlockABC]
blocks_dict: dict[int, PsdResourceBlockABC] # TODO: use a multidict
name: str | None = None
@classmethod
def read(
cls, fh: BinaryIO, length: int, name: str | None = None
) -> TiffImageResources:
"""Return instance from open file."""
fname = type(fh).__name__ if name is None else name
blocks = read_psdblocks(fh, length=length)
blocks_dict: dict[int, PsdResourceBlockABC] = {}
for block in blocks:
if block.resourceid.value not in blocks_dict:
blocks_dict[block.resourceid.value] = block
return cls(
psdformat=PsdFormat.BE32BIT,
name=fname,
blocks=blocks,
blocks_dict=blocks_dict,
)
@classmethod
def frombytes(
cls, data: bytes, name: str | None = None
) -> TiffImageResources:
"""Return instance from ImageResources tag value."""
with io.BytesIO(data) as fh:
self = cls.read(fh, length=len(data), name=name)
return self
@classmethod
def fromtiff(
cls, filename: os.PathLike | str, /, pageindex: int = 0
) -> TiffImageResources:
"""Return instance from ImageResources tag in TIFF file."""
data = read_tifftag(filename, 34377, pageindex=pageindex)
if data is None:
raise ValueError('TIFF file contains no ImageResources tag')
return cls.frombytes(data, name=os.path.split(filename)[-1])
def write(self, fh: BinaryIO) -> int:
"""Write ImageResources tag value to open file."""
return write_psdblocks(fh, *self.blocks)
def tobytes(self) -> bytes:
"""Return ImageResources tag value as bytes."""
with io.BytesIO() as fh:
self.write(fh)
value = fh.getvalue()
return value
def tifftag(self) -> tuple[int, int, int, bytes, bool]:
"""Return tifffile.TiffWriter.write extratags item."""
value = self.tobytes()
return 34377, 7, len(value), value, True
def thumbnail(self) -> numpy.ndarray | None:
"""Return thumbnail image if any, else None."""
if 1036 in self.blocks_dict:
return cast(PsdThumbnailBlock, self.blocks_dict[1036]).data
if 1033 in self.blocks_dict:
return cast(PsdThumbnailBlock, self.blocks_dict[1033]).data
return None
def __eq__(self, other: object) -> bool:
return (
isinstance(other, self.__class__)
and self.tobytes() == other.tobytes()
)
def __contains__(self, key: int) -> bool:
return key in self.blocks_dict
def __bool__(self) -> bool:
return len(self.blocks) > 0
def __len__(self) -> int:
return len(self.blocks)
def __getitem__(self, key: int) -> PsdResourceBlockABC:
return self.blocks_dict[key]
def __iter__(self):
yield from self.blocks
def __repr__(self) -> str:
return f'<{self.__class__.__name__} {self.name!r}>'
def __str__(self) -> str:
return indent(repr(self), *self.blocks)
PSD_KEY_64BIT = {
# if 64 bit format, these keys use a length count of 8 bytes
PsdKey.ALPHA,
PsdKey.FILTER_MASK,
PsdKey.USER_MASK,
PsdKey.LAYER,
PsdKey.LAYER_16,
PsdKey.LAYER_32,
PsdKey.SAVING_MERGED_TRANSPARENCY,
PsdKey.SAVING_MERGED_TRANSPARENCY2,
PsdKey.SAVING_MERGED_TRANSPARENCY_16,
PsdKey.SAVING_MERGED_TRANSPARENCY_32,
PsdKey.LINKED_LAYER_2,
PsdKey.FILTER_EFFECTS,
PsdKey.FILTER_EFFECTS_2,
PsdKey.PIXEL_SOURCE_DATA_CC15,
}
PSD_KEY_TYPE: dict[PsdKey, Type[PsdKeyABC]] = {
PsdKey.BLEND_CLIPPING_ELEMENTS: PsdBoolean,
PsdKey.BLEND_INTERIOR_ELEMENTS: PsdBoolean,
PsdKey.KNOCKOUT_SETTING: PsdBoolean,
PsdKey.PATT: PsdEmpty,
PsdKey.SAVING_MERGED_TRANSPARENCY: PsdEmpty,
PsdKey.SAVING_MERGED_TRANSPARENCY2: PsdEmpty,
PsdKey.SAVING_MERGED_TRANSPARENCY_16: PsdEmpty,
PsdKey.SAVING_MERGED_TRANSPARENCY_32: PsdEmpty,
PsdKey.EXPOSURE: PsdExposure,
PsdKey.FILTER_MASK: PsdFilterMask,
PsdKey.LAYER_ID: PsdInteger,
PsdKey.LAYER_VERSION: PsdInteger,
PsdKey.PROTECTED_SETTING: PsdInteger,
PsdKey.USING_ALIGNED_RENDERING: PsdInteger,
PsdKey.LAYER: PsdLayers,
PsdKey.LAYER_16: PsdLayers,
PsdKey.LAYER_32: PsdLayers,
PsdKey.LINKED_LAYER_EXTERNAL: PsdLayers,
PsdKey.METADATA_SETTING: PsdMetadataSettings,
PsdKey.PATTERNS: PsdPatterns,
PsdKey.PATTERNS_2: PsdPatterns,
PsdKey.PATTERNS_3: PsdPatterns,
PsdKey.REFERENCE_POINT: PsdReferencePoint,
PsdKey.NESTED_SECTION_DIVIDER_SETTING: PsdSectionDividerSetting,
PsdKey.SECTION_DIVIDER_SETTING: PsdSectionDividerSetting,
PsdKey.SHEET_COLOR_SETTING: PsdSheetColorSetting,
PsdKey.UNICODE_LAYER_NAME: PsdString,
PsdKey.TEXT_ENGINE_DATA: PsdTextEngineData,
PsdKey.USER_MASK: PsdUserMask,
PsdKey.FOREIGN_EFFECT_ID: PsdWord,
PsdKey.LAYER_MASK_AS_GLOBAL_MASK: PsdWord,
PsdKey.LAYER_NAME_SOURCE_SETTING: PsdWord,
PsdKey.TRANSPARENCY_SHAPES_LAYER: PsdWord,
PsdKey.VECTOR_MASK_AS_GLOBAL_MASK: PsdWord,
# TODO:
# PsdKey.ALPHA: PsdUnknown,
# PsdKey.ANIMATION_EFFECTS: PsdUnknown,
# PsdKey.ANNOTATIONS: PsdUnknown,
# PsdKey.ARTBOARD_DATA: PsdUnknown,
# PsdKey.ARTBOARD_DATA_2: PsdUnknown,
# PsdKey.ARTBOARD_DATA_3: PsdUnknown,
# PsdKey.BLACK_AND_WHITE: PsdUnknown,
# PsdKey.BRIGHTNESS_AND_CONTRAST: PsdUnknown,
# PsdKey.CHANNEL_BLENDING_RESTRICTIONS_SETTING: PsdUnknown,
# PsdKey.CHANNEL_MIXER: PsdUnknown,
# PsdKey.COLOR_BALANCE: PsdUnknown,
# PsdKey.COLOR_LOOKUP: PsdUnknown,
# PsdKey.COMPOSITOR_USED: PsdUnknown,
# PsdKey.CONTENT_GENERATOR_EXTRA_DATA: PsdUnknown,
# PsdKey.CURVES: PsdUnknown,
# PsdKey.EFFECTS_LAYER: PsdUnknown,
# PsdKey.FILTER_EFFECTS: PsdUnknown,
# PsdKey.FILTER_EFFECTS_2: PsdUnknown,
# PsdKey.GRADIENT_FILL_SETTING: PsdUnknown,
# PsdKey.GRADIENT_MAP: PsdUnknown,
# PsdKey.HUE_SATURATION: PsdUnknown,
# PsdKey.HUE_SATURATION_PS4: PsdUnknown,
# PsdKey.INVERT: PsdUnknown,
# PsdKey.LEVELS: PsdUnknown,
# PsdKey.LINKED_LAYER: PsdUnknown,
# PsdKey.LINKED_LAYER_2: PsdUnknown,
# PsdKey.LINKED_LAYER_3: PsdUnknown,
# PsdKey.OBJECT_BASED_EFFECTS_LAYER_INFO: PsdUnknown,
# PsdKey.PATTERN_DATA: PsdUnknown,
# PsdKey.PATTERN_FILL_SETTING: PsdUnknown,
# PsdKey.PHOTO_FILTER: PsdUnknown,
# PsdKey.PIXEL_SOURCE_DATA: PsdUnknown,
# PsdKey.PIXEL_SOURCE_DATA_CC15: PsdUnknown,
# PsdKey.PLACED_LAYER: PsdUnknown,
# PsdKey.PLACED_LAYER_CS3: PsdUnknown,
# PsdKey.POSTERIZE: PsdUnknown,
# PsdKey.SELECTIVE_COLOR: PsdUnknown,
# PsdKey.SMART_OBJECT_LAYER_DATA: PsdUnknown,
# PsdKey.SMART_OBJECT_LAYER_DATA_CC15: PsdUnknown,
# PsdKey.SOLID_COLOR_SHEET_SETTING: PsdUnknown,
# PsdKey.THRESHOLD: PsdUnknown,
# PsdKey.TYPE_TOOL_INFO: PsdUnknown,
# PsdKey.TYPE_TOOL_OBJECT_SETTING: PsdUnknown,
# PsdKey.UNICODE_PATH_NAME: PsdUnknown,
# PsdKey.VECTOR_MASK_SETTING: PsdUnknown,
# PsdKey.VECTOR_MASK_SETTING_CS6: PsdUnknown,
# PsdKey.VECTOR_ORIGINATION_DATA: PsdUnknown,
# PsdKey.VECTOR_STROKE_CONTENT_DATA: PsdUnknown,
# PsdKey.VECTOR_STROKE_DATA: PsdUnknown,
# PsdKey.VIBRANCE: PsdUnknown,
}
PSD_RESOURCE_TYPE: dict[PsdResourceId, Type[PsdResourceBlockABC]] = {
PsdResourceId.ALPHA_NAMES_PASCAL: PsdPascalStringsBlock,
PsdResourceId.CAPTION_PASCAL: PsdPascalStringBlock,
PsdResourceId.ALPHA_NAMES_UNICODE: PsdStringsBlock,
PsdResourceId.WORKFLOW_URL: PsdStringBlock,
# PsdResourceId.AUTO_SAVE_FILE_PATH: PsdStringBlock,
# PsdResourceId.AUTO_SAVE_FORMAT: PsdStringBlock,
PsdResourceId.THUMBNAIL_RESOURCE_PS4: PsdThumbnailBlock,
PsdResourceId.THUMBNAIL_RESOURCE: PsdThumbnailBlock,
PsdResourceId.VERSION_INFO: PsdVersionBlock,
PsdResourceId.BACKGROUND_COLOR: PsdColorBlock,
}
def read_psdblocks(fh: BinaryIO, /, length: int) -> list[PsdResourceBlockABC]:
"""Return list of image resource block values from open file."""
align = 2
psdformat = PsdFormat.BE32BIT
blocks: list[PsdResourceBlockABC] = []
end = fh.tell() + length
while fh.tell() < end and fh.read(4) == psdformat:
resourceid = PsdResourceId(psdformat.read(fh, 'H'))
name = str(PsdPascalString.read(fh, 2))
size = psdformat.read(fh, 'I')
pos = fh.tell()
resourcetype = PSD_RESOURCE_TYPE.get(resourceid, PsdBytesBlock)
blocks.append(
resourcetype.read(
fh, psdformat, resourceid, name=name, length=size
)
)
size += (align - size % align) % align
fh.seek(pos + size)
return blocks
def write_psdblocks(fh: BinaryIO, /, *blocks: PsdResourceBlockABC) -> int:
"""Write sequence of blocks to open file."""
align = 2
psdformat = PsdFormat.BE32BIT
start = fh.tell()
for block in blocks:
fh.write(psdformat.value)
psdformat.write(fh, 'H', block.resourceid.value)
PsdPascalString(block.name).write(fh, 2)
size_pos = fh.tell()
psdformat.write(fh, 'I', 0) # update later
pos = fh.tell()
block.write(fh, psdformat)
size = fh.tell() - pos
fh.seek(size_pos)
psdformat.write(fh, 'I', size)
fh.seek(size, 1)
fh.write(b'\0' * ((align - size % align) % align))
return fh.tell() - start
def read_psdtags(
fh: BinaryIO,
psdformat: PsdFormat,
/,
length: int,
unknown: bool = True,
align: int = 2,
) -> list[PsdKeyABC]:
"""Return list of tags from open file."""
tags: list[PsdKeyABC] = []
end = fh.tell() + length
while fh.tell() < end and fh.read(4) == psdformat:
key = PsdKey(fh.read(4))
size = psdformat.read_size(fh, key)
pos = fh.tell()
if size == 0:
tags.append(PsdEmpty(key))
elif key in PSD_KEY_TYPE:
tags.append(
PSD_KEY_TYPE[key].read(fh, psdformat, key, length=size)
)
elif unknown:
tags.append(PsdUnknown.read(fh, psdformat, key, length=size))
size += (align - size % align) % align
fh.seek(pos + size)
return tags
def write_psdtags(
fh: BinaryIO,
psdformat: PsdFormat,
/,
compression: PsdCompressionType | int | None,
unknown: bool,
align: int,
*tags: PsdKeyABC,
) -> int:
"""Write sequence of tags to open file."""
start = fh.tell()
for tag in tags:
if tag is None:
continue
if isinstance(tag, PsdUnknown):
if not unknown:
continue
if tag.psdformat != psdformat: # type: ignore
log_warning(
f'<PsdUnknown {tag.key.value.decode()!r}> not written'
)
continue
fh.write(psdformat.value)
psdformat.write_key(fh, tag.key)
size_pos = fh.tell()
psdformat.write_size(fh, 0, tag.key)
pos = fh.tell()
if isinstance(tag, PsdLayers):
tag.write(
fh, psdformat, compression=compression, unknown=unknown
) # type: ignore
else:
tag.write(fh, psdformat)
size = fh.tell() - pos
fh.seek(size_pos)
psdformat.write_size(fh, size, tag.key)
fh.seek(size, 1)
fh.write(b'\0' * ((align - size % align) % align))
return fh.tell() - start
def read_tifftag(
filename: os.PathLike | str, tag: int | str, /, pageindex: int = 0
) -> bytes | None:
"""Return tag value from TIFF file."""
from tifffile import TiffFile # type: ignore
with TiffFile(filename) as tif:
data = tif.pages[pageindex].tags.valueof(tag)
# if data is None:
# raise ValueError(f'TIFF file contains no tag {tag!r}')
return data
def compress(
data: numpy.ndarray, compression: PsdCompressionType, rlecountfmt: str
) -> bytes:
"""Return compressed numpy array."""
if data.dtype.char not in 'BHf':
raise ValueError(f'data type {data.dtype!r} not supported')
if data.size == 0:
return b''
if compression == PsdCompressionType.RAW:
return data.tobytes()
if compression == PsdCompressionType.ZIP:
return zlib.compress(data.tobytes())
if compression == PsdCompressionType.ZIP_PREDICTED:
import imagecodecs
if data.dtype.char == 'f':
data = imagecodecs.floatpred_encode(data)
else:
data = imagecodecs.delta_encode(data)
return zlib.compress(data.tobytes())
if compression == PsdCompressionType.RLE:
import imagecodecs
lines = [imagecodecs.packbits_encode(line) for line in data]
sizes = [len(line) for line in lines]
fmt = f'{rlecountfmt[0]}{len(sizes)}{rlecountfmt[1]}'
return struct.pack(fmt, *sizes) + b''.join(lines)
raise ValueError(f'unknown compression type')
def decompress(
data: bytes,
compression: PsdCompressionType,
shape: tuple[int, ...],
dtype: numpy.dtype,
rlecountfmt: str,
) -> numpy.ndarray:
"""Return decompressed numpy array."""
if dtype.char not in 'BHf':
raise ValueError(f'data type {dtype!r} not supported')
uncompressed_size = product(shape) * dtype.itemsize
if uncompressed_size == 0:
return numpy.zeros(shape, dtype=dtype)
if compression == PsdCompressionType.RAW:
return numpy.frombuffer(data, dtype=dtype).reshape(shape).copy()
if compression == PsdCompressionType.ZIP:
data = zlib.decompress(data)
return numpy.frombuffer(data, dtype=dtype).reshape(shape).copy()
if compression == PsdCompressionType.ZIP_PREDICTED:
import imagecodecs
data = imagecodecs.zlib_decode(data, out=uncompressed_size)
image = numpy.frombuffer(data, dtype=dtype).reshape(shape)
if dtype.kind == 'f':
return imagecodecs.floatpred_decode(image)
return imagecodecs.delta_decode(image)
if compression == PsdCompressionType.RLE:
import imagecodecs
offset = shape[0] * struct.calcsize(rlecountfmt)
data = imagecodecs.packbits_decode(data[offset:])
return numpy.frombuffer(data, dtype=dtype).reshape(shape).copy()
raise ValueError('unknown compression type')
def log_warning(msg, *args, **kwargs):
"""Log message with level WARNING."""
import logging
logging.getLogger(__name__).warning(msg, *args, **kwargs)
def product(iterable: Iterable[int]) -> int:
"""Return product of sequence of numbers."""
prod = 1
for i in iterable:
prod *= i
return prod
def indent(*args) -> str:
"""Return joined string representations of objects with indented lines."""
text = '\n'.join(str(arg) for arg in args)
return '\n'.join(
(' ' + line if line else line) for line in text.splitlines() if line
)[2:]
def test(verbose: bool = False) -> None:
"""Test TiffImageSourceData and TiffImageResources classes."""
from glob import glob
import tifffile
import imagecodecs
print(f'Python {sys.version}')
print(
f'psdtags-{__version__},',
f'numpy-{numpy.__version__},',
f'tifffile-{tifffile.__version__},',
f'imagecodecs-{imagecodecs.__version__}',
)
for filename in glob('tests/*.tif'):
if read_tifftag(filename, 34377) is not None:
res1 = TiffImageResources.fromtiff(filename)
assert str(res1)
if verbose:
print(res1)
print()
res2 = TiffImageResources.frombytes(res1.tobytes())
assert res1 == res2
isd1 = TiffImageSourceData.fromtiff(filename)
assert str(isd1)
if verbose:
print(isd1)
print()
has_unknown = any(
isinstance(tag, PsdUnknown) for tag in isd1.info
) or any(
isinstance(tag, PsdUnknown)
for layer in isd1.layers
for tag in layer.info
)
# test roundtrips of psdformat and compression
for psdformat in PsdFormat:
unknown = has_unknown and psdformat == isd1.psdformat
if not unknown:
isd1 = TiffImageSourceData.fromtiff(filename, unknown=False)
for compression in PsdCompressionType:
if compression == PsdCompressionType.UNKNOWN:
continue
print('.', end='', flush=True)
buffer = isd1.tobytes(
psdformat=psdformat,
compression=compression,
unknown=unknown,
)
isd2 = TiffImageSourceData.frombytes(buffer)
str(isd2)
if isd2:
assert isd2.psdformat == psdformat
assert isd1 == isd2
# test not equal after changing data
if isd2.layers:
ch0 = isd2.layers[0].channels[0].data
if ch0 is not None and ch0.size > 0:
ch0[..., 0] = 123
assert isd1 != isd2
# test tifftag value
tagid, dtype, size, tagvalue, writeonce = isd1.tifftag()
assert tagid == 37724
assert dtype == 7
assert size == len(tagvalue)
assert writeonce
assert isd1 == TiffImageSourceData.frombytes(tagvalue)
print('.', end=' ', flush=True)
print()
# TODO: test TiffImageResources
def main(argv: list[str] | None = None) -> int:
"""Psdtags command line usage main function.
Print ImageResourceData tag in TIFF file or all TIFF files in directory:
``python -m psdtags file_or_directory``
"""
from glob import glob
from matplotlib import pyplot
from tifffile import TiffFile, imshow
if argv is None:
argv = sys.argv
if len(argv) > 1 and '--test' in argv:
if os.path.exists('../tests'):
os.chdir('../')
import doctest
m: Any
try:
import psdtags.psdtags
m = psdtags.psdtags
except ImportError:
m = None
doctest.testmod(m)
print()
if os.path.exists('tests'):
# print('running tests')
test()
print()
return 0
if len(argv) == 1:
files = glob('*.tif')
elif '*' in argv[1]:
files = glob(argv[1])
elif os.path.isdir(argv[1]):
files = glob(f'{argv[1]}/*.tif')
else:
files = argv[1:]
doplot = False
for fname in files:
name = os.path.split(fname)[-1]
try:
with TiffFile(fname) as tif:
imagesourcedata = tif.pages[0].tags.valueof(37724)
imageresources = tif.pages[0].tags.valueof(34377)
if imagesourcedata is not None:
isd = TiffImageSourceData.frombytes(imagesourcedata, name=name)
print(isd)
print()
if isd.layers and len(files) == 1:
for layer in isd.layers:
image = layer.asarray()
if image.size > 0:
imshow(image, title=repr(layer))
doplot = True
if imageresources is not None:
irs = TiffImageResources.frombytes(imageresources, name=name)
print(irs)
print()
if 1036 in irs:
thumbnailblock = cast(PsdThumbnailBlock, irs[1036])
elif 1033 in irs:
thumbnailblock = cast(PsdThumbnailBlock, irs[1033])
else:
thumbnailblock = None
if thumbnailblock is not None:
thumbnail = thumbnailblock.data
if thumbnail.size > 0:
imshow(thumbnail, title=repr(thumbnailblock))
doplot = True
if doplot:
pyplot.show()
except ValueError as exc:
# raise # enable for debugging
print(fname, exc)
continue
return 0
if __name__ == '__main__':
sys.exit(main())
|
import httplib
import time
import json
import sys
import cfscrape
import os.path
import datetime
from model.coeff import Coeff
__author__ = 'mivanov'
class CoeffProvider:
_conn = httplib.HTTPConnection('egb.com')
def process_history(self, dao):
counter = 0
now = datetime.datetime.now()
if os.path.exists('./old_history'):
for file in os.listdir('./old_history'):
print(file)
response = None
with open('./old_history/' + file) as data_file:
response = json.load(data_file)
filename = 'coeffs-{0}-{1}-{2}-{3}-{4}-{5}.csv'.format(now.year, now.month, \
now.day if now.day > 10 else "0" + `now.day`, \
now.hour if now.hour > 10 else "0" + `now.hour`, \
now.minute if now.minute > 10 else "0" + `now.minute`,
counter)
coeffs = self.process_response(response)
dao.save_coeff_list(coeffs, filename)
counter = counter + 1
print "done with history"
def process_response(self, response):
coeffs = []
bets = response['bets']
for bet in bets:
game = bet['game']
team1 = bet['gamer_1']['nick'].encode('utf-8')
team2 = bet['gamer_2']['nick'].encode('utf-8')
result = ''
if bet['gamer_1']['win'] == 1 and bet['gamer_2']['win'] == 0:
result = 'win1'
elif bet['gamer_1']['win'] == 0 and bet['gamer_2']['win'] == 1:
result = 'win2'
elif bet['gamer_1']['win'] == 0 and bet['gamer_2']['win'] == 0:
result = 'draw'
else:
result = 'NA'
coeffs.append(Coeff(bet['id'], game, bet['date'], bet['coef_1'], bet['coef_2'], team1, team2, 'Total', bet['tourn'], bet['id'], 'NA', result))
if 'nb_arr' in bet:
nested_bets = bet['nb_arr']
for nested in nested_bets:
bet_type = 'NA'
map = 'NA'
result = ''
if nested['gamer_1']['win'] == 1 and nested['gamer_2']['win'] == 0:
result = 'win1'
elif nested['gamer_1']['win'] == 0 and nested['gamer_2']['win'] == 1:
result = 'win2'
elif nested['gamer_1']['win'] == 0 and nested['gamer_2']['win'] == 0:
result = 'draw'
else:
result = 'NA'
if nested['gamer_1']['nick'] == 'Map 1':
bet_type = 'GameResult'
map = 'Map1'
elif nested['gamer_1']['nick'].lower() == 'first blood on map 1':
bet_type = 'FB'
map = 'Map1'
elif nested['gamer_1']['nick'].lower() == 'will take first 10 kills on map 1':
bet_type = 'Kills10'
map = 'Map1'
elif nested['gamer_1']['nick'] == 'Map 2':
bet_type = 'GameResult'
map = 'Map2'
elif nested['gamer_1']['nick'].lower() == 'first blood on map 2':
bet_type = 'FB'
map = 'Map2'
elif nested['gamer_1']['nick'].lower() == 'will take first 10 kills on map 2':
bet_type = 'Kills10'
map = 'Map2'
elif nested['gamer_1']['nick'] == 'Map 3':
bet_type = 'GameResult'
map = 'Map3'
elif nested['gamer_1']['nick'].lower() == 'first blood on map 3':
bet_type = 'FB'
map = 'Map3'
elif nested['gamer_1']['nick'].lower() == 'will take first 10 kills on map 3':
bet_type = 'Kills10'
map = 'Map3'
else:
bet_type = nested['gamer_1']['nick']
map = nested['gamer_2']['nick']
coeffs.append(Coeff(nested['id'], game, nested['date'], nested['coef_1'], nested['coef_2'], team1, team2, bet_type, bet['tourn'], bet['id'], map, result))
return coeffs
def get_coeff_list(self):
scraper = cfscrape.create_scraper(js_engine='Node')
response = None
if os.path.isfile('data.json'):
with open('data.json') as data_file:
response = json.load(data_file)
else:
data = scraper.get('http://egb.com/ajax.php?act=UpdateTableBets&ajax=update&fg=1&ind=tables&limit=0&st=0&type=modules&ut=0').content
# data = self.invoke_url('/ajax.php?act=UpdateTableBets&ajax=update&fg=1&ind=tables&limit=0&st=0&type=modules&ut=0')
response = json.loads(data)
return self.process_response(response)
def invoke_url(self, url):
while True:
try:
self._conn.request('GET', url, headers={ 'User-Agent' : 'super happy flair bot by /u/spladug' })
data = self._conn.getresponse().read()
return data
except:
print "Unexpected error:", sys.exc_info()[0], " when requesting ", url, ", sleep for 3 minutes"
time.sleep(180)
print "retry..."
self._conn = httplib.HTTPConnection('www.dotabuff.com')
|
# -*- coding: utf-8 -*-
"""Test the manager's citation utilities."""
import os
import time
import unittest
from pybel import BELGraph
from pybel.constants import (
CITATION, CITATION_AUTHORS, CITATION_DATE, CITATION_JOURNAL, CITATION_TYPE_PUBMED,
)
from pybel.dsl import Protein
from pybel.manager.citation_utils import enrich_pubmed_citations, get_citations_by_pmids, sanitize_date
from pybel.manager.models import Citation
from pybel.testing.cases import TemporaryCacheMixin
from pybel.testing.utils import n
class TestSanitizeDate(unittest.TestCase):
"""Test sanitization of dates in various formats."""
def test_sanitize_1(self):
"""Test YYYY Mon DD."""
self.assertEqual('2012-12-19', sanitize_date('2012 Dec 19'))
def test_sanitize_2(self):
"""Test YYYY Mon."""
self.assertEqual('2012-12-01', sanitize_date('2012 Dec'))
def test_sanitize_3(self):
"""Test YYYY."""
self.assertEqual('2012-01-01', sanitize_date('2012'))
def test_sanitize_4(self):
"""Test YYYY Mon-Mon."""
self.assertEqual('2012-10-01', sanitize_date('2012 Oct-Dec'))
def test_sanitize_5(self):
"""Test YYYY Season."""
self.assertEqual('2012-03-01', sanitize_date('2012 Spring'))
def test_sanitize_6(self):
"""Test YYYY Mon DD-DD."""
self.assertEqual('2012-12-12', sanitize_date('2012 Dec 12-15'))
def test_sanitize_7(self):
"""Test YYYY Mon DD-Mon DD."""
self.assertEqual('2005-01-29', sanitize_date('2005 Jan 29-Feb 4'))
def test_sanitize_nope(self):
"""Test failure."""
self.assertEqual(None, sanitize_date('2012 Early Spring'))
class TestCitations(TemporaryCacheMixin):
"""Tests for citations."""
def setUp(self):
super().setUp()
self.u, self.v = (Protein(n(), n()) for _ in range(2))
self.pmid = "9611787"
self.graph = BELGraph()
self.graph.add_increases(self.u, self.v, citation=self.pmid, evidence=n())
def test_enrich(self):
self.assertEqual(0, self.manager.count_citations())
get_citations_by_pmids(manager=self.manager, pmids=[self.pmid])
self.assertEqual(1, self.manager.count_citations())
c = self.manager.get_citation_by_pmid(self.pmid)
self.assertIsNotNone(c)
self.assertIsInstance(c, Citation)
self.assertEqual(CITATION_TYPE_PUBMED, c.db)
self.assertEqual(self.pmid, c.db_id)
def test_enrich_list(self):
pmids = [
'25818332',
'27003210',
'26438529',
'26649137',
]
get_citations_by_pmids(manager=self.manager, pmids=pmids)
citation = self.manager.get_or_create_citation(db=CITATION_TYPE_PUBMED, db_id='25818332')
self.assertIsNotNone(citation)
def test_enrich_list_grouped(self):
pmids = [
'25818332',
'27003210',
'26438529',
'26649137',
]
get_citations_by_pmids(manager=self.manager, pmids=pmids, group_size=2)
citation = self.manager.get_citation_by_pmid('25818332')
self.assertIsNotNone(citation)
def test_enrich_overwrite(self):
citation = self.manager.get_or_create_citation(db=CITATION_TYPE_PUBMED, db_id=self.pmid)
self.manager.session.commit()
self.assertIsNone(citation.date)
self.assertIsNone(citation.title)
enrich_pubmed_citations(manager=self.manager, graph=self.graph)
_, _, d = list(self.graph.edges(data=True))[0]
citation_dict = d[CITATION]
self.assertIn(CITATION_JOURNAL, citation_dict)
self.assertIn(CITATION_DATE, citation_dict)
self.assertEqual('1998-05-01', citation_dict[CITATION_DATE])
self.assertIn(CITATION_AUTHORS, citation_dict)
self.assertEqual(
{'Lewell XQ', 'Judd DB', 'Watson SP', 'Hann MM'},
set(citation_dict[CITATION_AUTHORS])
)
def test_enrich_graph(self):
enrich_pubmed_citations(manager=self.manager, graph=self.graph)
_, _, d = list(self.graph.edges(data=True))[0]
citation_dict = d[CITATION]
self.assertIn(CITATION_JOURNAL, citation_dict)
self.assertIn(CITATION_DATE, citation_dict)
self.assertEqual('1998-05-01', citation_dict[CITATION_DATE])
self.assertIn(CITATION_AUTHORS, citation_dict)
self.assertEqual(
{'Lewell XQ', 'Judd DB', 'Watson SP', 'Hann MM'},
set(citation_dict[CITATION_AUTHORS])
)
@unittest.skipIf(os.environ.get('DB') == 'mysql', reason='MySQL collation is wonky')
def test_accent_duplicate(self):
"""Test when two authors, Gomez C and Goméz C are both checked that they are not counted as duplicates."""
g1 = 'Gomez C'
g2 = 'Gómez C'
pmid_1, pmid_2 = pmids = [
'29324713',
'29359844',
]
get_citations_by_pmids(manager=self.manager, pmids=pmids)
time.sleep(1)
x = self.manager.get_citation_by_pmid(pmid_1)
self.assertIsNotNone(x)
self.assertEqual('Martínez-Guillén JR', x.first.name)
self.assertIn(g1, self.manager.object_cache_author)
self.assertIn(g2, self.manager.object_cache_author)
a1 = self.manager.get_author_by_name(g1)
self.assertEqual(g1, a1.name)
a2 = self.manager.get_author_by_name(g2)
self.assertEqual(g2, a2.name)
|
#%%
#################
# This script takes as input the FAO country definitions
# and corresponding HuID regions, and returns different
# Earth projections containing the defined regions.
#
# Last updated: Dec 2020
# Author: Ignacio Lopez-Gomez
#
#################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
import pandas as pd
import anthro.viz
def plot_huid_countries(df,projection,colors,edgecolor):
"""
Returns a world map using a projection. Continents
are colored based on the Human Impacts regional
definitions and the anthro.viz colors.
"""
colors_huid = anthro.viz.plotting_style()
ocean = colors_huid['light_blue']
ax = plt.axes(projection=projection)
ax.outline_patch.set_edgecolor(edgecolor)
ax.add_feature(cfeature.OCEAN,
facecolor=ocean,
edgecolor='black',
linewidth=1.5)
# Get countries
shpfilename = shpreader.natural_earth(resolution='110m',
category='cultural',
name='admin_0_countries')
reader = shpreader.Reader(shpfilename)
countries = reader.records()
# Get provinces and subnational territories
shpfilename_2 = shpreader.natural_earth(resolution='10m',
category='cultural',
name='admin_1_states_provinces')
reader2 = shpreader.Reader(shpfilename_2)
subcountries = reader2.records()
linewidth_cont_ = .4
# Iterate through countries in HuID definition and natural earth dataset
for country in countries:
for huid_country_index in range(len(df["area"])):
# Resolve country name conflicts
if "Viet Nam" in df["area"][huid_country_index]:
df["area"][huid_country_index] = 'Vietnam'
elif "Eswatini" in df["area"][huid_country_index]:
df["area"][huid_country_index] = 'Swaziland'
# If HuID and natural earth definitions coincide, change color
if (df["area"][huid_country_index] in country.attributes.values() or
country.attributes['NAME'] in df["area"][huid_country_index] or
country.attributes['NAME_EN'] in df["area"][huid_country_index]):
# color country
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=(colors[df["region"][huid_country_index]]),
label=country.attributes["NAME"],
edgecolor=(colors[df["region"][huid_country_index]]),
linewidth=linewidth_cont_)
# Color "disputed countries" not present in the HuID definitions
elif "Somaliland" in country.attributes['NAME']:
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=(colors["Africa"]),
label="Somaliland",
edgecolor=(colors["Africa"]),
linewidth=linewidth_cont_)
elif "Kosovo" in country.attributes['NAME']:
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=(colors["Europe"]),
label="Kosovo",
edgecolor=(colors["Europe"]),
linewidth=linewidth_cont_)
elif "N. Cyprus" in country.attributes['NAME']:
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=(colors["Asia"]),
label="N. Cyprus",
edgecolor=(colors["Asia"]),
linewidth=linewidth_cont_)
# Resolve name conficts and recolor
if country.attributes["NAME"] in "Guinea":
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=(colors["Africa"]),
label="Guinea",
edgecolor=(colors["Africa"]),
linewidth=linewidth_cont_)
if country.attributes["NAME"] in "Antarctica":
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor='white',
label="Antarctica",
edgecolor='white',
linewidth=linewidth_cont_)
# Correct provinces and subnational territories
for subcountry in subcountries:
# Fix French Guyana
if "French" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["South America"]),
label="French Guyana",
edgecolor=(colors["South America"]),
linewidth=linewidth_cont_)
# Fix Greenland
elif "Greenland" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["North America"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["North America"]),
linewidth=linewidth_cont_)
elif "Pituffik" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["North America"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["North America"]),
linewidth=linewidth_cont_)
elif "Qeqqata" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["North America"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["North America"]),
linewidth=linewidth_cont_)
elif "Kujalleq" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["North America"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["North America"]),
linewidth=linewidth_cont_)
elif "Qaasuitsup" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["North America"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["North America"]),
linewidth=linewidth_cont_)
elif "Sermersooq" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["North America"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["North America"]),
linewidth=linewidth_cont_)
# Fix missing islands
elif "Santa Cruz de Tenerife" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["Europe"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["Europe"]),
linewidth=linewidth_cont_)
elif "Las Palmas" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["Europe"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["Europe"]),
linewidth=linewidth_cont_)
elif "Balearic Islands" in subcountry.attributes["name_en"]:
ax.add_geometries(subcountry.geometry, ccrs.PlateCarree(),
facecolor=(colors["Europe"]),
label=subcountry.attributes["name_en"],
edgecolor=(colors["Europe"]),
linewidth=linewidth_cont_)
return
# Other projections:
# ccrs.InterruptedGoodeHomolosine() #ccrs.PlateCarree() #ccrs.Mollweide() #ccrs.Robinson()
plt.figure()
projection = ccrs.PlateCarree()
huid_colors = anthro.viz.region_colors()[0]
df2 = pd.read_csv('../../../miscellaneous/region_definitions.csv')
plot_huid_countries(df2,projection,huid_colors,edgecolor='white')
plt.figure()
projection = ccrs.Robinson()
huid_colors = anthro.viz.region_colors()[0]
df2 = pd.read_csv('../../../miscellaneous/region_definitions.csv')
plot_huid_countries(df2,projection,huid_colors,edgecolor='white')
# %%
|
"""Prepares MSCOCO-like dataset for the im2txt model"""
from datetime import datetime
import json
import logging
from pathlib import Path
from random import choices
import re
import shutil
import sys
from typing import Iterable, List, Tuple
import click
from os import path, walk
COCO_SPLIT_PROPORTIONS = {'val': 0.3, 'train': 0.7}
def split_choices():
population = list(COCO_SPLIT_PROPORTIONS.keys())
weights = list(COCO_SPLIT_PROPORTIONS.values())
while True:
yield choices(population, weights=weights)[0]
split_choice = split_choices()
class NoCaptionError(Exception):
"""The media data contains no caption."""
def get_caption(raw_data: dict) -> str:
try:
caption_node = raw_data["edge_media_to_caption"]["edges"][0]["node"]
except (KeyError, IndexError):
raise NoCaptionError()
return caption_node["text"]
def tokenize(caption: str) -> List[str]:
"""
Reconstuct caption with no punctuation except for "#", "`"and "'"
Usernames are split on "." and "_", and do not start with "@"
"""
pattern = "[A-Za-z0-9'\`#@%,.-_?]+(?:\`[A-Za-z]+)?"
handle_pattern = "[A-Za-z0-9]+(?:\`[A-Za-z]+)?"
punctuation = (",", ".", "?", ":", ";", "!")
for token in re.findall(pattern, caption):
if len(token) == 1 and token != "a":
continue
if "@" in token:
# Also addresses case where '@' somehow ends up in middle of token
yield from re.findall(handle_pattern, token)
elif any(token.endswith(punctuation) for punctuation in punctuation):
yield token[:-1]
else:
yield token
def clean_caption_and_tokens(raw_caption: str) -> Tuple[str, List[str]]:
tokens = list(tokenize(raw_caption))
print(f"Tokens: {tokens}")
caption = " ".join(tokens)
return caption, tokens
def image_data(image_id: int,
image_filename: str,
raw_data: dict,
images_dirname: str,
coco_id: int):
assert int(raw_data["id"]) == image_id
split = next(split_choice)
output_dirname = f"{images_dirname}_{split}"
raw_caption = get_caption(raw_data)
caption, tokens = clean_caption_and_tokens(raw_caption)
sentence = {
"raw": caption,
"tokens": tokens,
"imid": image_id,
"sentid": image_id,
}
return {
"filepath": output_dirname,
"sentids": [image_id],
"filename": image_filename,
"imgid": image_id,
"split": split,
"sentences": [sentence],
"cocoid": coco_id,
}
def all_image_data(image_dir: str, media_dir: str, logger: logging.Logger):
images_dirname = path.basename(path.normpath(image_dir))
_, _, filenames = next(walk(image_dir))
filenames_by_id = {
path.splitext(filename)[0]: filename for filename in filenames
}
coco_id = 0
for dirpath, _, filenames in walk(media_dir):
if "data.json" not in filenames:
continue
image_id = path.basename(path.normpath(dirpath))
if image_id not in filenames_by_id:
continue
with open(path.join(dirpath, "data.json"), 'r') as fd:
raw_image_data = json.load(fd)
logger.info(f"Getting scraped data for image {image_id}...")
if not raw_image_data:
logger.warning(f"Failed to retrive scraped data for image {image_id}")
continue
image_filename = filenames_by_id[image_id]
try:
coco_data = image_data(int(image_id),
image_filename,
raw_image_data,
images_dirname,
coco_id=coco_id)
except NoCaptionError:
logger.info(f"Image {image_id} has no caption. Skipping...")
continue
coco_id += 1
logger.info(f"Generated COCO data from scraped data for image {image_id}")
yield coco_data
def copy_image_to_dataset(data: dict,
from_dir: str,
to_dir: str,
logger: logging.Logger):
directory = path.join(to_dir, data["filepath"])
# Create directory if absent
Path(directory).mkdir(parents=True, exist_ok=True)
filename = data["filename"]
from_path = path.join(from_dir, filename)
to_path = path.join(directory, filename)
logger.info(f"Copying {from_path} to {to_path}")
shutil.copyfile(from_path, to_path)
def belongs_to_split(split_name: str):
def predicate(datum: dict):
return datum["split"] == split_name
return predicate
def im2txt_coco_data(image_data: Iterable[dict], info: dict) -> dict:
images = []
annotations = []
for datum in image_data:
images.append(dict(file_name=datum["filename"], id=datum["imgid"]))
annotations.append(dict(id=datum["cocoid"],
image_id=datum["imgid"],
caption=datum["sentences"][0]["raw"]))
return {
"info": info,
"images": images,
"annotations": annotations,
}
@click.command()
@click.option(
"--images-directory",
"-i",
"images_dir",
type=click.Path(exists=True),
required=True
)
@click.option(
"--media-directory",
"-m",
"media_dir",
type=click.Path(exists=True),
required=True,
)
@click.option(
"--output-directory",
"-o",
"output_dir",
type=click.Path(file_okay=False),
required=True
)
@click.option('--dataset-name', '-n', type=str, required=True)
@click.option('--log-level', '-l', type=str, default='DEBUG')
def make_coco_dataset(images_dir,
media_dir,
output_dir,
dataset_name,
log_level):
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
all_images_data = list(all_image_data(images_dir, media_dir, logger))
info = {"dataset": dataset_name, "date_created": datetime.now().isoformat()}
for split_name in ('train', 'val'):
split_data = filter(belongs_to_split(split_name), all_images_data)
coco_data_path = path.join(output_dir, f"captions-{split_name}.json")
with open(coco_data_path, "wb") as fileobj:
data = im2txt_coco_data(split_data, {**info, "split": split_name})
fileobj.write(json.dumps(data).encode('ascii'))
for image_datum in all_images_data:
copy_image_to_dataset(image_datum, images_dir, output_dir, logger)
if __name__ == "__main__":
make_coco_dataset()
|
import uuid
import pytest
import stix2
from stix2.exceptions import (
AtLeastOnePropertyError, CustomContentError, DictionaryKeyError,
ExtraPropertiesError, ParseError,
)
from stix2.properties import (
DictionaryProperty, EmbeddedObjectProperty, ExtensionsProperty,
HashesProperty, IDProperty, ListProperty, ObservableProperty,
ReferenceProperty, STIXObjectProperty,
)
from stix2.v20.common import MarkingProperty
from . import constants
ID_PROP = IDProperty('my-type', spec_version="2.0")
MY_ID = 'my-type--232c9d3f-49fc-4440-bb01-607f638778e7'
@pytest.mark.parametrize(
"value", [
MY_ID,
'my-type--00000000-0000-4000-8000-000000000000',
],
)
def test_id_property_valid(value):
assert ID_PROP.clean(value) == (value, False)
CONSTANT_IDS = [
constants.ATTACK_PATTERN_ID,
constants.CAMPAIGN_ID,
constants.COURSE_OF_ACTION_ID,
constants.IDENTITY_ID,
constants.INDICATOR_ID,
constants.INTRUSION_SET_ID,
constants.MALWARE_ID,
constants.MARKING_DEFINITION_ID,
constants.OBSERVED_DATA_ID,
constants.RELATIONSHIP_ID,
constants.REPORT_ID,
constants.SIGHTING_ID,
constants.THREAT_ACTOR_ID,
constants.TOOL_ID,
constants.VULNERABILITY_ID,
]
CONSTANT_IDS.extend(constants.MARKING_IDS)
CONSTANT_IDS.extend(constants.RELATIONSHIP_IDS)
@pytest.mark.parametrize("value", CONSTANT_IDS)
def test_id_property_valid_for_type(value):
type = value.split('--', 1)[0]
assert IDProperty(type=type, spec_version="2.0").clean(value) == (value, False)
def test_id_property_wrong_type():
with pytest.raises(ValueError) as excinfo:
ID_PROP.clean('not-my-type--232c9d3f-49fc-4440-bb01-607f638778e7')
assert str(excinfo.value) == "must start with 'my-type--'."
@pytest.mark.parametrize(
"value", [
'my-type--foo',
# Not a v4 UUID
'my-type--00000000-0000-0000-0000-000000000000',
'my-type--' + str(uuid.uuid1()),
'my-type--' + str(uuid.uuid3(uuid.NAMESPACE_DNS, "example.org")),
'my-type--' + str(uuid.uuid5(uuid.NAMESPACE_DNS, "example.org")),
],
)
def test_id_property_not_a_valid_hex_uuid(value):
with pytest.raises(ValueError):
ID_PROP.clean(value)
def test_id_property_default():
default = ID_PROP.default()
assert ID_PROP.clean(default) == (default, False)
def test_reference_property_whitelist_standard_type():
ref_prop = ReferenceProperty(valid_types="identity", spec_version="2.0")
result = ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("foo--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean("foo--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
def test_reference_property_whitelist_custom_type():
ref_prop = ReferenceProperty(valid_types="my-type", spec_version="2.0")
with pytest.raises(ValueError):
ref_prop.clean("not-my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("not-my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(CustomContentError):
# This is the whitelisted type, but it's still custom, and
# customization is disallowed here.
ref_prop.clean("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_whitelist_generic_type():
ref_prop = ReferenceProperty(
valid_types=["SCO", "SRO"], spec_version="2.0",
)
result = ref_prop.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("sighting--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
# The prop assumes some-type is a custom type of one of the generic
# type categories.
result = ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
ref_prop.clean("identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_blacklist_standard_type():
ref_prop = ReferenceProperty(invalid_types="identity", spec_version="2.0")
result = ref_prop.clean(
"malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("malware--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(CustomContentError):
ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
result = ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
def test_reference_property_blacklist_generic_type():
ref_prop = ReferenceProperty(
invalid_types=["SDO", "SRO"], spec_version="2.0",
)
result = ref_prop.clean(
"file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = ref_prop.clean(
"file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(CustomContentError):
ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
result = ref_prop.clean(
"some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
assert result == ("some-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
with pytest.raises(ValueError):
ref_prop.clean(
"identity--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
with pytest.raises(ValueError):
ref_prop.clean(
"relationship--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False,
)
with pytest.raises(ValueError):
ref_prop.clean(
"relationship--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True,
)
def test_reference_property_whitelist_hybrid_type():
p = ReferenceProperty(valid_types=["a", "SCO"], spec_version="2.0")
result = p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(CustomContentError):
# although whitelisted, "a" is a custom type
p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
result = p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
# should just assume "b" is a custom SCO type.
result = p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_blacklist_hybrid_type():
p = ReferenceProperty(invalid_types=["a", "SCO"], spec_version="2.0")
with pytest.raises(ValueError):
p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
p.clean("file--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(ValueError):
p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
with pytest.raises(ValueError):
p.clean("a--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
with pytest.raises(CustomContentError):
p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", False)
# should just assume "b" is a custom type which is not an SCO
result = p.clean("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
assert result == ("b--8a8e8758-f92c-4058-ba38-f061cd42a0cf", True)
def test_reference_property_impossible_constraint():
with pytest.raises(ValueError):
ReferenceProperty(valid_types=[], spec_version="2.0")
@pytest.mark.parametrize(
"d", [
{'description': 'something'},
[('abc', 1), ('bcd', 2), ('cde', 3)],
],
)
def test_dictionary_property_valid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
assert dict_prop.clean(d)
@pytest.mark.parametrize(
"d", [
[{'a': 'something'}, "Invalid dictionary key a: (shorter than 3 characters)."],
[
{'a'*300: 'something'}, "Invalid dictionary key aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaa: (longer than 256 characters).",
],
[
{'Hey!': 'something'}, "Invalid dictionary key Hey!: (contains characters other than lowercase a-z, "
"uppercase A-Z, numerals 0-9, hyphen (-), or underscore (_)).",
],
],
)
def test_dictionary_property_invalid_key(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(DictionaryKeyError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
@pytest.mark.parametrize(
"d", [
# TODO: This error message could be made more helpful. The error is caused
# because `json.loads()` doesn't like the *single* quotes around the key
# name, even though they are valid in a Python dictionary. While technically
# accurate (a string is not a dictionary), if we want to be able to load
# string-encoded "dictionaries" that are, we need a better error message
# or an alternative to `json.loads()` ... and preferably *not* `eval()`. :-)
# Changing the following to `'{"description": "something"}'` does not cause
# any ValueError to be raised.
("{'description': 'something'}", "The dictionary property must contain a dictionary"),
],
)
def test_dictionary_property_invalid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
def test_property_list_of_dictionary():
@stix2.v20.CustomObject(
'x-new-obj-4', [
('property1', ListProperty(DictionaryProperty(spec_version="2.0"), required=True)),
],
)
class NewObj():
pass
test_obj = NewObj(property1=[{'foo': 'bar'}])
assert test_obj.property1[0]['foo'] == 'bar'
@pytest.mark.parametrize(
"key", [
"aaa",
"a"*256,
"a-1_b",
],
)
def test_hash_property_valid_key(key):
p = HashesProperty(["foo"], spec_version="2.0")
result = p.clean({key: "bar"}, True)
assert result == ({key: "bar"}, True)
@pytest.mark.parametrize(
"key", [
"aa",
"a"*257,
"funny%chars?",
],
)
def test_hash_property_invalid_key(key):
p = HashesProperty(["foo"], spec_version="2.0")
with pytest.raises(DictionaryKeyError):
p.clean({key: "foo"}, True)
def test_embedded_property():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = stix2.v20.EmailMIMEComponent(
content_type="text/plain; charset=utf-8",
content_disposition="inline",
body="Cats are funny!",
)
result = emb_prop.clean(mime, False)
assert result == (mime, False)
result = emb_prop.clean(mime, True)
assert result == (mime, False)
with pytest.raises(ValueError):
emb_prop.clean("string", False)
def test_embedded_property_dict():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = {
"content_type": "text/plain; charset=utf-8",
"content_disposition": "inline",
"body": "Cats are funny!",
}
result = emb_prop.clean(mime, False)
assert isinstance(result[0], stix2.v20.EmailMIMEComponent)
assert result[0]["body"] == "Cats are funny!"
assert not result[1]
result = emb_prop.clean(mime, True)
assert isinstance(result[0], stix2.v20.EmailMIMEComponent)
assert result[0]["body"] == "Cats are funny!"
assert not result[1]
def test_embedded_property_custom():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = stix2.v20.EmailMIMEComponent(
content_type="text/plain; charset=utf-8",
content_disposition="inline",
body="Cats are funny!",
foo=123,
allow_custom=True,
)
with pytest.raises(CustomContentError):
emb_prop.clean(mime, False)
result = emb_prop.clean(mime, True)
assert result == (mime, True)
def test_embedded_property_dict_custom():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = {
"content_type": "text/plain; charset=utf-8",
"content_disposition": "inline",
"body": "Cats are funny!",
"foo": 123,
}
with pytest.raises(ExtraPropertiesError):
emb_prop.clean(mime, False)
result = emb_prop.clean(mime, True)
assert isinstance(result[0], stix2.v20.EmailMIMEComponent)
assert result[0]["body"] == "Cats are funny!"
assert result[1]
def test_extension_property_valid():
ext_prop = ExtensionsProperty(spec_version="2.0")
result = ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
},
}, False,
)
assert isinstance(
result[0]["windows-pebinary-ext"], stix2.v20.WindowsPEBinaryExt,
)
assert not result[1]
result = ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
},
}, True,
)
assert isinstance(
result[0]["windows-pebinary-ext"], stix2.v20.WindowsPEBinaryExt,
)
assert not result[1]
def test_extension_property_invalid1():
ext_prop = ExtensionsProperty(spec_version="2.0")
with pytest.raises(ValueError):
ext_prop.clean(1, False)
def test_extension_property_invalid2():
ext_prop = ExtensionsProperty(spec_version="2.0")
with pytest.raises(CustomContentError):
ext_prop.clean(
{
'foobar-ext': {
'pe_type': 'exe',
},
},
False,
)
result = ext_prop.clean(
{
'foobar-ext': {
'pe_type': 'exe',
},
}, True,
)
assert result == ({"foobar-ext": {"pe_type": "exe"}}, True)
def test_extension_property_invalid3():
ext_prop = ExtensionsProperty(spec_version="2.0")
with pytest.raises(ExtraPropertiesError):
ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
'abc': 123,
},
},
False,
)
result = ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
'abc': 123,
},
}, True,
)
assert isinstance(
result[0]["windows-pebinary-ext"], stix2.v20.WindowsPEBinaryExt,
)
assert result[0]["windows-pebinary-ext"]["abc"] == 123
assert result[1]
def test_extension_at_least_one_property_constraint():
with pytest.raises(AtLeastOnePropertyError):
stix2.v20.TCPExt()
def test_marking_property_error():
mark_prop = MarkingProperty()
with pytest.raises(ValueError) as excinfo:
mark_prop.clean('my-marking')
assert str(excinfo.value) == "must be a Statement, TLP Marking or a registered marking."
def test_stix_property_not_compliant_spec():
# This is a 2.0 test only...
indicator = stix2.v20.Indicator(spec_version="2.0", allow_custom=True, **constants.INDICATOR_KWARGS)
stix_prop = STIXObjectProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
stix_prop.clean(indicator, False)
assert "Spec version 2.0 bundles don't yet support containing objects of a different spec version." in str(excinfo.value)
def test_observable_property_obj():
prop = ObservableProperty(spec_version="2.0")
obs = stix2.v20.File(name="data.dat")
obs_dict = {
"0": obs,
}
result = prop.clean(obs_dict, False)
assert result[0]["0"] == obs
assert not result[1]
result = prop.clean(obs_dict, True)
assert result[0]["0"] == obs
assert not result[1]
def test_observable_property_dict():
prop = ObservableProperty(spec_version="2.0")
obs_dict = {
"0": {
"type": "file",
"name": "data.dat",
},
}
result = prop.clean(obs_dict, False)
assert isinstance(result[0]["0"], stix2.v20.File)
assert result[0]["0"]["name"] == "data.dat"
assert not result[1]
result = prop.clean(obs_dict, True)
assert isinstance(result[0]["0"], stix2.v20.File)
assert result[0]["0"]["name"] == "data.dat"
assert not result[1]
def test_observable_property_obj_custom():
prop = ObservableProperty(spec_version="2.0")
obs = stix2.v20.File(name="data.dat", foo=True, allow_custom=True)
obs_dict = {
"0": obs,
}
with pytest.raises(ExtraPropertiesError):
prop.clean(obs_dict, False)
result = prop.clean(obs_dict, True)
assert result[0]["0"] == obs
assert result[1]
def test_observable_property_dict_custom():
prop = ObservableProperty(spec_version="2.0")
obs_dict = {
"0": {
"type": "file",
"name": "data.dat",
"foo": True,
},
}
with pytest.raises(ExtraPropertiesError):
prop.clean(obs_dict, False)
result = prop.clean(obs_dict, True)
assert isinstance(result[0]["0"], stix2.v20.File)
assert result[0]["0"]["foo"]
assert result[1]
def test_stix_object_property_custom_prop():
prop = STIXObjectProperty(spec_version="2.0")
obj_dict = {
"type": "identity",
"name": "alice",
"identity_class": "supergirl",
"foo": "bar",
}
with pytest.raises(ExtraPropertiesError):
prop.clean(obj_dict, False)
result = prop.clean(obj_dict, True)
assert isinstance(result[0], stix2.v20.Identity)
assert result[0]["foo"] == "bar"
assert result[1]
def test_stix_object_property_custom_obj():
prop = STIXObjectProperty(spec_version="2.0")
obj_dict = {
"type": "something",
"abc": 123,
"xyz": ["a", 1],
}
with pytest.raises(ParseError):
prop.clean(obj_dict, False)
result = prop.clean(obj_dict, True)
assert result[0] == {"type": "something", "abc": 123, "xyz": ["a", 1]}
assert result[1]
|
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astromodels import Model, PointSource
from threeML.classicMLE.goodness_of_fit import GoodnessOfFit
from threeML.classicMLE.joint_likelihood import JointLikelihood
from threeML.data_list import DataList
from threeML.plugin_prototype import PluginPrototype
from threeML.plugins.XYLike import XYLike
from threeML.utils.statistics.likelihood_functions import half_chi2
from threeML.utils.statistics.likelihood_functions import poisson_log_likelihood_ideal_bkg
from threeML.exceptions.custom_exceptions import custom_warnings
__instrument_name = "n.a."
class UnresolvedExtendedXYLike(XYLike):
def __init__(self, name, x, y, yerr=None, poisson_data=False, quiet=False, source_name=None):
super(UnresolvedExtendedXYLike, self).__init__(name, x, y, yerr, poisson_data, quiet, source_name)
def assign_to_source(self, source_name):
"""
Assign these data to the given source (instead of to the sum of all sources, which is the default)
:param source_name: name of the source (must be contained in the likelihood model)
:return: none
"""
if self._likelihood_model is not None and source_name is not None:
assert source_name in self._likelihood_model.sources, "Source %s is not contained in " \
"the likelihood model" % source_name
self._source_name = source_name
def set_model(self, likelihood_model_instance):
"""
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
:param likelihood_model_instance: instance of Model
:type likelihood_model_instance: astromodels.Model
"""
if likelihood_model_instance is None:
return
if self._source_name is not None:
# Make sure that the source is in the model
assert self._source_name in likelihood_model_instance.sources, \
"This XYLike plugin refers to the source %s, " \
"but that source is not in the likelihood model" % (self._source_name)
self._likelihood_model = likelihood_model_instance
def _get_total_expectation(self):
if self._source_name is None:
n_point_sources = self._likelihood_model.get_number_of_point_sources()
n_ext_sources = self._likelihood_model.get_number_of_extended_sources()
assert n_point_sources + n_ext_sources > 0, "You need to have at least one source defined"
# Make a function which will stack all point sources (XYLike do not support spatial dimension)
expectation_point = np.sum(map(lambda source: source(self._x, tag=self._tag),
self._likelihood_model.point_sources.values()),
axis=0)
expectation_ext = np.sum(map(lambda source: source.get_spatially_integrated_flux(self._x),
self._likelihood_model.extended_sources.values()), axis=0)
expectation = expectation_point + expectation_ext
else:
# This XYLike dataset refers to a specific source
# Note that we checked that self._source_name is in the model when the model was set
if self._source_name in self._likelihood_model.point_sources:
expectation = self._likelihood_model.point_sources[self._source_name](self._x)
elif self._source_name in self._likelihood_model.extended_sources:
expectation = self._likelihood_model.extended_sources[self._source_name].get_spatially_integrated_flux(self._x)
else:
raise KeyError("This XYLike plugin has been assigned to source %s, "
"which is neither a point soure not an extended source in the current model" % self._source_name)
return expectation
def plot(self, x_label='x', y_label='y', x_scale='linear', y_scale='linear'):
fig, sub = plt.subplots(1,1)
sub.errorbar(self.x, self.y, yerr=self.yerr, fmt='.', label= 'data')
sub.set_xscale(x_scale)
sub.set_yscale(y_scale)
sub.set_xlabel(x_label)
sub.set_ylabel(y_label)
if self._likelihood_model is not None:
flux = self._get_total_expectation()
label = 'model' if self._source_name is None else 'model (%s)' % self._source_name
sub.plot(self.x, flux, '--', label=label)
sub.legend(loc=0)
return fig
|
from rest_framework import serializers
from .models import *
class AcquisitionSerializer(serializers.ModelSerializer):
class Meta:
model = Acquisition
fields = ['id', 'questions1', 'questions3', 'questions4', 'questions5',
'questions6', 'Contact', 'CommentaireQ5', 'CommentaireQ6', 'campaignname' ]
|
""" An abstract class to inherit to tell it's a entry for topology discovery.
Right now, it only acts as a tag and does nothing.
"""
class TopologyEntry:
pass |
"""Confluence reports commands"""
import click
from damster.reports.confluence import ConfluenceChanges
from damster.utils import previous_month_range
import logging
log = logging.getLogger(__name__)
@click.command('changes', short_help='generate changes report')
@click.argument('from-date', required=False)
@click.argument('to-date', required=False)
@click.option('--use-ssh-tunnel/--no-use-ssh-tunnel', '-S', default=False)
@click.pass_context
def confluence_changes(ctx, from_date, to_date, use_ssh_tunnel):
"""Confluence content changes"""
if from_date is None:
from_date, to_date = previous_month_range()
log.info('Getting Confluence changes between {} and {}'.format(from_date, to_date))
confluence_report = ConfluenceChanges(ctx.obj, from_date, to_date, use_ssh_tunnel=use_ssh_tunnel)
confluence_report.save_to_csv()
confluence_report.save_to_json()
# For some reason, this class destructor doesn't always run, so adding this here to make sure
# The scripts actually ends
if confluence_report.ssh_tunnel:
confluence_report.ssh_tunnel.close()
|
from cbuild.core import template
import os
def invoke(pkg):
template.call_pkg_hooks(pkg, "init_extract")
template.run_pkg_func(pkg, "init_extract")
extract_done = pkg.statedir / f"{pkg.pkgname}__extract_done"
if extract_done.is_file():
return
template.call_pkg_hooks(pkg, "pre_extract")
template.run_pkg_func(pkg, "pre_extract")
if hasattr(pkg, "do_extract"):
os.makedirs(pkg.abs_wrksrc, exist_ok = True)
template.run_pkg_func(pkg, "do_extract")
else:
template.call_pkg_hooks(pkg, "do_extract")
template.run_pkg_func(pkg, "post_extract")
template.call_pkg_hooks(pkg, "post_extract")
extract_done.touch()
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible # needed to support Python 2
class Battle(models.Model):
battle_name = models.CharField(max_length=100)
hashtag1 = models.CharField(max_length=500)
hashtag1_typos = models.CharField(null=True, max_length=100, blank=True)
hashtag2 = models.CharField(max_length=500)
hashtag2_typos = models.CharField(null=True, max_length=100, blank=True)
started_at = models.CharField(max_length=100)
ended_at = models.CharField(max_length=100)
winner = models.CharField(null=True, max_length=500, blank=True)
status = models.CharField(max_length=100)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="+")
def __unicode__(self):
return self.battle_name
def __str__(self):
return str(self.battle_name)
def get_absolute_url(self):
return reverse('battle:battle_edit', kwargs={'pk': self.pk}) |
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, filters, status
from rest_framework.decorators import list_route
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from crisiscleanup.calls.api.serializers.gateway import GatewaySerializer
from crisiscleanup.calls.models import Gateway
from crisiscleanup.taskapp.celery import debug_task
class GatewayViewSet(viewsets.ModelViewSet):
queryset = Gateway.objects.all()
serializer_class = GatewaySerializer
filter_backends = (filters.SearchFilter, DjangoFilterBackend,)
search_fields = ()
filter_fields = ()
@list_route()
def test_celery(self, request):
resp = {
'task_id': debug_task.delay().id
}
return Response(resp, status=status.HTTP_200_OK)
@detail_route(methods=['get'])
def get_detail(self, request, pk=None):
gateway = self.get_object()
serializedData = self.get_serializer(gateway).data;
#Calculate whether or not the user's training and read articles are up-to-date
return Response(serializedData)
def update(self, request, pk=None):
gateway = self.get_object()
serializer = GatewaySerializer(gateway, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
gateway = self.get_object()
gateway.delete()
return Response(status=status.HTTP_204_NO_CONTENT) |
class Solution(object):
def searchInsert(self,nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
length=len(nums)
l=0
r=length-1
while l<=r:
if l==r:
return l if nums[l]>=target else l+1
mid=(l+r)/2
if nums[mid]>=target:
r=mid
else:
l=mid+1
|
from sympy import Integer as sp_Integer
from sympy import Float as sp_Float
from sympy.core.expr import Expr
from sympy.core.numbers import NegativeOne as sp_NegativeOne
from sympy.logic.boolalg import BooleanTrue as sp_BooleanTrue
from sympy.logic.boolalg import BooleanFalse as sp_BooleanFalse
from .basic import PyccelAstNode
from .datatypes import (datatype, DataType, NativeSymbol,
NativeInteger, NativeBool, NativeReal,
NativeComplex, NativeRange, NativeTensor, NativeString,
NativeGeneric, NativeTuple, default_precision)
__all__ = (
'BooleanTrue',
'BooleanFalse',
'Integer',
'Float',
'Complex',
)
#------------------------------------------------------------------------------
class BooleanTrue(sp_BooleanTrue, PyccelAstNode):
_dtype = NativeBool()
_rank = 0
_shape = ()
_precision = default_precision['bool']
#------------------------------------------------------------------------------
class BooleanFalse(sp_BooleanFalse, PyccelAstNode):
_dtype = NativeBool()
_rank = 0
_shape = ()
_precision = default_precision['bool']
#------------------------------------------------------------------------------
class Integer(sp_Integer, PyccelAstNode):
_dtype = NativeInteger()
_rank = 0
_shape = ()
_precision = default_precision['int']
def __new__(cls, val):
ival = int(val)
obj = Expr.__new__(cls, ival)
obj.p = ival
return obj
#------------------------------------------------------------------------------
class Float(sp_Float, PyccelAstNode):
_dtype = NativeReal()
_rank = 0
_shape = ()
_precision = default_precision['real']
#------------------------------------------------------------------------------
class Complex(Expr, PyccelAstNode):
_dtype = NativeComplex()
_rank = 0
_shape = ()
_precision = default_precision['complex']
@property
def real(self):
return self.args[0]
@property
def imag(self):
return self.args[1]
#------------------------------------------------------------------------------
class ImaginaryUnit(Expr, PyccelAstNode):
_dtype = NativeComplex()
_rank = 0
_shape = ()
_precision = default_precision['complex']
|
import gc
from tenders.models import *
from project.models import Log
from django.utils import timezone
class Runner:
name = 'Проверка дублей'
alias = 'test-doubles'
def __init__(self):
# Загрузчик
self.updater = Updater.objects.take(
alias = self.alias,
name = self.name)
def run(self):
start = timezone.now()
products_count = PlanGraphPositionProduct.objects.all().count()
#
print(products_count)
items_on_page = 1000
page_max = products_count // items_on_page
if products_count % items_on_page:
page_max += 1
#
print(page_max)
for pn in range(0, page_max):
products = PlanGraphPositionProduct.objects.all()[pn * items_on_page : (pn + 1) * items_on_page]
print(len(products))
for n, product in enumerate(products):
test = PlanGraphPositionProduct.objects.filter(
position = product.position,
number = product.number)
if len(test) > 1:
log = Log.objects.add(
subject = "Tenders SQL",
channel = "Critical Error",
title = "Double Product: position.id = {}, number = {}.".format(product.id, product.number))
print(log)
else:
print("Продукт {}{} из {}. Чисто.".format(pn, n, products_count))
print("Обработка завершена за {}.".format(timezone.now() - start))
return True
|
import sys
import cv2 as cv
# defining face detector
face_cascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_eye.xml')
ds_factor = 0.6
class VideoCamera:
"""
No docs :)
"""
def __init__(self):
self.video = cv.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self, detect_faces=True):
ret, frame = self.video.read()
if not ret:
print('Can not retrieve a frame. Exit.')
sys.exit()
if detect_faces:
frame = self.detect_faces(frame)
colour = cv.cvtColor(frame, cv.COLOR_BGR2BGRA)
return colour
def get_jpg_frame(self):
ret, jpeg = cv.imencode('.jpg', img=self.get_frame())
return jpeg.tobytes()
def show_frames(self):
print('Click "q" to exit')
while True:
gray_frame = self.get_frame()
cv.imshow('frame', gray_frame)
if cv.waitKey(1) == ord('q'):
break
cv.destroyAllWindows()
@staticmethod
def detect_faces(frame):
frame = cv.resize(frame, None, fx=ds_factor, fy=ds_factor, interpolation=cv.INTER_AREA)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
return frame
if __name__ == '__main__':
# just to test a face detection directly without web stuff
vc = VideoCamera()
vc.show_frames()
del vc
|
import six
import ubelt as ub
def compact_idstr(dict_):
"""
A short unique id string for a dict param config that is semi-interpretable
"""
from netharn import util
import ubelt as ub
short_keys = util.shortest_unique_prefixes(dict_.keys())
short_dict = ub.odict(sorted(zip(short_keys, dict_.values())))
idstr = ub.repr2(short_dict, nobr=1, itemsep='', si=1, nl=0,
explicit=1)
return idstr
def make_idstr(d):
"""
Make full-length-key id-string
"""
if d is None:
return ''
elif isinstance(d, six.string_types):
return d
elif len(d) == 0:
return ''
if not isinstance(d, ub.odict):
d = ub.odict(sorted(d.items()))
return ub.repr2(d, itemsep='', nobr=True, explicit=True, nl=0, si=True)
def make_short_idstr(params, precision=None):
"""
Make id-string where they keys are shortened
Args:
params (dict):
Returns:
str:
CommandLine:
python -m netharn.util.misc make_short_idstr
Example:
>>> params = {'input_shape': (None, 3, 212, 212),
>>> 'a': 'b',
>>> 'center': {'im_mean': .5, 'std': 1},
>>> 'alphabet': 'abc'}
>>> print(make_short_idstr(params))
"""
if params is None:
return ''
elif len(params) == 0:
return ''
from netharn import util
short_keys = util.shortest_unique_prefixes(list(params.keys()),
allow_simple=False,
allow_end=True,
min_length=1)
def shortval(v):
if isinstance(v, bool):
return int(v)
return v
d = dict(zip(short_keys, map(shortval, params.values())))
def make_idstr(d):
# Note: we are not using sort=True, because repr2 sorts sets and dicts
# by default.
return ub.repr2(d, itemsep='', nobr=True, explicit=True, nl=0, si=True,
precision=precision).replace(' ', '').replace('[', '').replace(']', '').replace('(', '').replace(')', '').replace('{', '').replace('}', '')
short_idstr = make_idstr(d)
return short_idstr
|
bill = int(input("Enter Total Bill: "))
tip_percentage = 0.20
tax_percentage = 0.067
tip = bill * tip_percentage
print(f"Tip: {tip}")
tax = bill * tax_percentage
print(f"Tax: {tax}")
total = bill + tip + tax
print(f"Total: {total}")
|
import pickle
# escribo en el codigo binario
"""
lista_nombres=["pedro", "ana" , "maria" , "isabel"]
fichero_binario=open("lista_binario", "wb")# write binaria
pickle.dump(lista_nombres, fichero_binario)# info volcar, name fichero a volcar in memory
fichero_binario.close()
del (fichero_binario)# delete of memory """
# leo datos del fichero binario
binario=open("lista_binario", "rb")# read binaria
lista=pickle.load(binario)
binario.close()
print(lista) |
from mcpi.minecraft import Minecraft
from mcpi.minecraft import ChatEvent
import time
import pycraft
import importlib
import sys
from pycraft import Player
import threading
mc = pycraft.new_minecraft()
def run_in_background(func, mc, player, kwargs):
player.unlock()
# TODO check if the function accepts the correct args
default_args = {"mc": mc, "player": player}
th = threading.Thread(target=func, kwargs={**default_args, **kwargs})
if player.lock():
th.start()
else:
print("Failed to run func: %s" % func)
def print_help(mc):
mc.postToChat("Please provide the name of a script in addition.")
def parse_kwargs(mc, command):
kwargs={}
if len(command) > 2:
args = command[2:]
for arg in args:
kv = arg.split("=")
if len(kv) == 2:
kwargs[kv[0]] = kv[1]
else:
mc.postToChat("Invalid arg: %s" % arg)
return kwargs
def handle_chat_event(event):
if e.message.startswith("python "):
player = Player(mc.conn, e.entityId)
command = e.message.split()
if len(command) < 2:
print_help(mc)
else:
script = command[1]
if script == "stop":
player.unlock()
elif script == "whoami":
player.log()
mc.postToChat(player.info())
else:
try:
print("Importing %s..." % script)
importlib.import_module(script)
func = sys.modules[script].main
kwargs = parse_kwargs(mc, command)
print("Running function main(%s)..." % kwargs)
run_in_background(func, mc, player, kwargs)
except AttributeError:
mc.postToChat("Script has no main function: %s" % script)
except ModuleNotFoundError:
mc.postToChat("Unknown script: %s" % script)
if __name__ == "__main__":
while True:
try:
time.sleep(1)
events = mc.events.pollChatPosts()
if len(events) > 0:
for e in events:
if e.type is ChatEvent.POST:
handle_chat_event(e)
else:
print("Unknown event type: %s" % (e.type))
except Exception as e:
time.sleep(1)
print(e)
print("Recovering from error")
|
import fandango as fn
from .api import HDBpp, MIN_FILE_SIZE
from .query import partition_prefixes
from .multi import *
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.urls import reverse
from .models import WasteCategory
from raddi_user.models import User
from .serializers import WasteCategorySerializer
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase, URLPatternsTestCase
from rest_framework.test import force_authenticate
from rest_framework.authtoken.models import Token
class WasteCategoryTest(TestCase):
def setUp(self):
self.waste_category = WasteCategory.objects.create(name='Metal', description='Metal Wastes')
def test_waste_cateogry_name(self):
metal_waste_category = WasteCategory.objects.first()
self.assertEqual(metal_waste_category.name, "Metal")
def test_waste_cateogry_description(self):
metal_waste_category = WasteCategory.objects.first()
self.assertEqual(metal_waste_category.description, "Metal Wastes")
class WaasteCategoryAPITest(APITestCase):
def setUp(self):
self.waste_category = WasteCategory.objects.create(name='Metal', description='Metal Wastes')
self.user = User.objects.create(username='test', password='abc')
self.token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
def test_get_should_not_all_unauthorized_access(self):
self.client.credentials()
response = self.client.get(reverse('waste-category-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_all_waste_categories(self):
response = self.client.get(reverse('waste-category-list'))
waste_categories = WasteCategory.objects.all()
serializer = WasteCategorySerializer(waste_categories, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_waste_category(self):
response = self.client.get(reverse('waste-category-detail', kwargs={'pk': self.waste_category.id}))
waste_category = WasteCategory.objects.get(id=self.waste_category.id)
serializer = WasteCategorySerializer(waste_category)
self.assertEqual(response.data, serializer.data)
def test_create_waste_category(self):
response = self.client.post(
reverse('waste-category-list'),
data={'name': 'New Waste', 'description': 'Newly added'},
format='json'
)
waste_category = WasteCategory.objects.last()
serializer = WasteCategorySerializer(waste_category)
self.assertEqual(response.data, serializer.data)
def test_not_create_waste_category_for_invalid_data(self):
response = self.client.post(reverse('waste-category-list'), data={}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.post(reverse('waste-category-list'), data={'name': "abc"}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.post(reverse('waste-category-list'), data={'description': "sbc"}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_waste_category(self):
waste_category = WasteCategory.objects.first()
response = self.client.put(
reverse('waste-category-detail', kwargs={'pk': waste_category.id}),
data={'name': 'Updated Waste', 'description': 'Updated Description'},
format='json'
)
waste_category = WasteCategory.objects.get(id=waste_category.id)
serializer = WasteCategorySerializer(waste_category)
self.assertEqual(response.data, serializer.data)
def test_not_update_waste_category_invalid_data(self):
response = self.client.put(
reverse('waste-category-detail', kwargs={'pk': self.waste_category.id}),
data={},
format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.put(
reverse('waste-category-detail', kwargs={'pk': self.waste_category.id}),
data={'name': "name"},
format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.put(
reverse('waste-category-detail', kwargs={'pk': self.waste_category.id}),
data={'description': "desc"},
format='json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_waste_category(self):
waste_category = WasteCategory.objects.first()
response = self.client.delete(
reverse('waste-category-detail', kwargs={'pk': waste_category.id}),
)
waste_category_count = WasteCategory.objects.filter(id=waste_category.id).count()
self.assertEqual(waste_category_count, 0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.util import sleep
from txzmq import ZmqEndpoint, ZmqFactory, ZmqPullConnection #ZmqPubConnection, ZmqSubConnection
import datetime
import cPickle as pickle
#
# ZMQ to Autobahn gate
#
# motherfuckers
#class nnZmqSubConnection(ZmqSubConnection):
# def messageReceived(self, message):
# self.gotMessage(message[0])
ZSUBGATE = "tcp://127.0.0.1:15558"
ZF = ZmqFactory()
ZFE = ZmqEndpoint("bind", ZSUBGATE)
class Component(ApplicationSession):
def onJoin(self, details):
s = ZmqPullConnection(ZF, ZFE)
def go_pull(sr):
chan, sr = sr[0].split(' ', 1)
sr = pickle.loads(sr)
print chan
#self.publish(chan, sr)
s.onPull = go_pull
if __name__ == '__main__':
runner = ApplicationRunner("ws://127.0.0.1:9002", "realm1")
runner.run(Component)
|
import logging
import shrub
from tflite2onnx import getSupportedOperator
shrub.util.formatLogging(logging.DEBUG)
def test_supported_ops():
assert(len(getSupportedOperator()) > 0)
assert(getSupportedOperator(0) == 'ADD')
if __name__ == '__main__':
test_supported_ops()
|
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from xai import constants
def adjust_xtick_labels(ticks_length):
ticks_params = dict()
if ticks_length < 5:
ticks_params['rotation'] = 0
ticks_params['fontsize'] = 20
elif ticks_length < 10:
ticks_params['rotation'] = 15
ticks_params['fontsize'] = 18
elif ticks_length < 20:
ticks_params['rotation'] = 30
ticks_params['fontsize'] = 16
else:
ticks_params['rotation'] = 90
ticks_params['fontsize'] = 16
return ticks_params
def adjust_ytick_labels(ticks_length):
ticks_params = dict()
if ticks_length < 5:
ticks_params['fontsize'] = 20
elif ticks_length < 10:
ticks_params['fontsize'] = 18
elif ticks_length < 15:
ticks_params['fontsize'] = 16
else:
ticks_params['fontsize'] = 14
return ticks_params
def make_ticklabels_invisible(fig):
for i, ax in enumerate(fig.axes):
ax.tick_params(labelbottom=False, labelleft=False)
ax.grid(False)
def dimreduce_visualization(data, mode='pca'):
if mode == 'tsne':
tsne = TSNE(n_components=2)
d_data = tsne.fit_transform(data)
else:
pca = PCA(n_components=2)
d_data = pca.fit_transform(data)
return d_data
def map_code_to_text_metric(metric_code):
for text, value_codes in constants.METRIC_MAPPING.items():
if metric_code in value_codes:
return text
|
# Input arguments flag
import sys
sys.path.append('..')
_, *flag = sys.argv
# Parse arguments
import argparse
parser = argparse.ArgumentParser(prog='hs_trajectory', description='Save/plot trajectory TbT data for selected BPMs and plane.')
parser.add_argument('-p', '--plane', choices=('x', 'y', 'i'), help='data plane', default='x')
parser.add_argument('-l', '--length', type=int, help='number of turns to use', default=4)
parser.add_argument('--load', type=int, help='number of turns to load (integer)', default=128)
select = parser.add_mutually_exclusive_group()
select.add_argument('--skip', metavar='BPM', nargs='+', help='space separated list of valid BPM names to skip')
select.add_argument('--only', metavar='BPM', nargs='+', help='space separated list of valid BPM names to use')
parser.add_argument('-o', '--offset', type=int, help='rise offset for all BPMs', default=0)
parser.add_argument('-r', '--rise', action='store_true', help='flag to use rise data (drop first turns)')
parser.add_argument('-s', '--save', action='store_true', help='flag to save data as numpy array')
transform = parser.add_mutually_exclusive_group()
transform.add_argument('--mean', action='store_true', help='flag to remove mean')
transform.add_argument('--median', action='store_true', help='flag to remove median')
transform.add_argument('--normalize', action='store_true', help='flag to normalize data')
parser.add_argument('-f', '--filter', choices=('none', 'svd', 'hankel'), help='filter type', default='none')
parser.add_argument('--rank', type=int, help='rank to use for svd & hankel filter', default=8)
parser.add_argument('--type', choices=('full', 'randomized'), help='SVD computation type for hankel filter', default='randomized')
parser.add_argument('--buffer', type=int, help='buffer size to use for randomized hankel filter', default=16)
parser.add_argument('--count', type=int, help='number of iterations to use for randomized hankel filter', default=16)
parser.add_argument('--plot', action='store_true', help='flag to plot data')
parser.add_argument('-H', '--harmonica', action='store_true', help='flag to use harmonica PV names for input')
parser.add_argument('--device', choices=('cpu', 'cuda'), help='data device', default='cpu')
parser.add_argument('--dtype', choices=('float32', 'float64'), help='data type', default='float64')
args = parser.parse_args(args=None if flag else ['--help'])
# Import
import epics
import numpy
import pandas
import torch
from datetime import datetime
from harmonica.util import LIMIT, LENGTH, pv_make
from harmonica.window import Window
from harmonica.data import Data
from harmonica.filter import Filter
# Time
TIME = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
# Check and set device & data type
dtype = {'float32': torch.float32, 'float64': torch.float64}[args.dtype]
device = args.device
if device == 'cuda' and not torch.cuda.is_available():
exit(f'error: CUDA is not avalible')
# Load monitor data
name = epics.caget('H:MONITOR:LIST')[:epics.caget('H:MONITOR:COUNT')]
flag = epics.caget_many([f'H:{name}:FLAG' for name in name])
rise = epics.caget_many([f'H:{name}:RISE' for name in name])
# Set BPM data
bpm = {name: rise for name, flag, rise in zip(name, flag, rise) if flag == 1}
# Check & remove skipped
if args.skip:
for name in (name.upper() for name in args.skip):
if not name in bpm:
exit(f'error: {name} is not a valid BPM to skip')
bpm.pop(name)
# Check & keep selected
if args.only:
for name in (name.upper() for name in args.only):
if not name in bpm:
exit(f'error: {name} is not a valid BPM to read')
for name in bpm.copy():
if not name in (name.upper() for name in args.only):
bpm.pop(name)
# Check BPM list
if not bpm:
exit(f'error: BPM list is empty')
# Set BPM positions
position = numpy.array(epics.caget_many([f'H:{name}:TIME' for name in bpm]))
# Generate PV names
pv_list = [pv_make(name, args.plane, args.harmonica) for name in bpm]
pv_rise = [*bpm.values()]
# Check load length
length = args.load
if length < 0 or length > LIMIT:
exit(f'error: {length=}, expected a positive value less than {LIMIT=}')
# Check offset
offset = args.offset
if offset < 0:
exit(f'error: {offset=}, expected a positive value')
if length + offset > LIMIT:
exit(f'error: sum of {length=} and {offset=}, expected to be less than {LIMIT=}')
# Check rise
if args.rise:
rise = min(pv_rise)
if rise < 0:
exit(f'error: rise values are expected to be positive')
rise = max(pv_rise)
if length + offset + rise > LIMIT:
exit(f'error: sum of {length=}, {offset=} and max {rise=}, expected to be less than {LIMIT=}')
else:
rise = 0
# Load TbT data
size = len(bpm)
count = length + offset + rise
win = Window(length, dtype=dtype, device=device)
tbt = Data.from_epics(win, pv_list, pv_rise if args.rise else None, shift=offset, count=count)
# Remove mean
if args.mean:
tbt.window_remove_mean()
# Remove median
if args.median:
tbt.work.sub_(tbt.median())
# Normalize
if args.normalize:
tbt.normalize()
# Filter (none)
if args.filter == 'none':
data = tbt.to_numpy()
# Filter (svd)
if args.filter == 'svd':
flt = Filter(tbt)
flt.filter_svd(rank=args.rank)
data = tbt.to_numpy()
# Filter (hankel)
if args.filter == 'hankel':
flt = Filter(tbt)
flt.filter_svd(rank=args.rank)
flt.filter_hankel(rank=args.rank, random=args.type == 'randomized', buffer=args.buffer, count=args.count)
data = tbt.to_numpy()
# Check mixed length
if args.length < 0 or args.length > args.load:
exit(f'error: requested length {args.length} is expected to be positive and less than load length {args.load}')
# Generate mixed data
data = tbt.make_signal(args.length, tbt.work)
# Convert to numpy
data = data.cpu().numpy()
name = [name for name in bpm] * args.length
turn = numpy.array([numpy.zeros(len(bpm), dtype=numpy.int32) + i for i in range(args.length)]).flatten()
time = 1/LENGTH*numpy.array([position + LENGTH * i for i in range(args.length)]).flatten()
# Plot
if args.plot:
df = pandas.DataFrame()
df['BPM'] = name
df['TURN'] = turn.astype(str)
df['TIME'] = time
df[args.plane.upper()] = data
from plotly.express import line
plot = line(df, x='TIME', y=args.plane.upper(), color='TURN', hover_data=['TURN', 'BPM'], title=f'{TIME}: TbT (TRAJECTORY)', markers=True)
config = {'toImageButtonOptions': {'height':None, 'width':None}, 'modeBarButtonsToRemove': ['lasso2d', 'select2d'], 'modeBarButtonsToAdd':['drawopenpath', 'eraseshape'], 'scrollZoom': True}
plot.show(config=config)
# Save to file
data = numpy.array([time, data])
if args.save:
filename = f'tbt_trajectory_plane_{args.plane}_length_{args.length}_time_{TIME}.npy'
numpy.save(filename, data) |
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.json import JSONEncoder
from datetime import date, datetime
import os
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@localhost/csa?charset=utf8' % (
os.environ['CSA_DB_USERNAME'], os.environ['CSA_DB_PASSWORD']
)
sqlite_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'db.sqlite')
app.config['SQLALCHEMY_BINDS'] = {
'wallet': 'sqlite:///' + sqlite_path
}
app.config['SQLALCHEMY_ECHO'] = False
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, date):
return JSONEncoder.default(
self,
datetime(obj.year, obj.month, obj.day))
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
app.json_encoder = CustomJSONEncoder
|
import sys
from ttc_py.lexer import *
from ttc_py.emitter import *
class Parser:
def __init__(self, lexer, emitter):
self.lexer = lexer
self.emitter = emitter
self.curtoken = None
self.peektoken = None
self.symbols = set()
self.declared_labels = set()
self.gotoed_labels = set()
self.next_token() # peektoken is set
self.next_token() # curtoken is set
def check_token(self, kind):
"""Return true if the passed-in token kind matches the current token's kind"""
return kind == self.curtoken.kind
def check_peek(self, kind):
"""Returns true if passed-in token kind matches the next token's kind"""
return kind == self.peektoken.kind
def match(self, kind):
"""Try to match the current token. If not match, error. Advances the current token"""
if not self.check_token(kind):
self.abort("Expected {}, but found {}".format(kind, self.curtoken.kind))
self.next_token()
def next_token(self):
"""Advances the current token"""
self.curtoken = self.peektoken
self.peektoken = self.lexer.get_token()
def abort(self, message):
"""exit the parser with an error message"""
sys.exit("Parser error: {}".format(message))
## production rules
def program(self):
"""program ::= { statement }"""
self.emitter.header_line("#include <stdio.h>")
self.emitter.header_line("int main(int argc, char *argv[])")
self.emitter.header_line("{")
while self.check_token(TokenType.NEWLINE):
self.match(TokenType.NEWLINE)
while not self.check_token(TokenType.EOF):
self.statement()
self.emitter.emit_line("return 0;")
self.emitter.emit_line("}")
# basic typechecking - ensure that all the labels that
# have been GOTOed are valid labels
for label in self.gotoed_labels:
if label not in self.declared_labels:
self.abort("Attempting to GOTO to an undeclared label {}".format(label))
def statement(self):
"""
statement ::= "PRINT" (expression | string) NL
| "IF" comparison "THEN" NL { statement } "ENDIF" NL
| "WHILE" comparison "REPEAT" NL { statement } "ENDWHILE" NL
| "LABEL" ident NL
| "GOTO" ident NL
| "LET" ident "=" expression NL
| "INPUT" ident NL
"""
if self.check_token(TokenType.PRINT):
self.match(TokenType.PRINT)
if self.check_token(TokenType.STRING):
self.emitter.emit_line(
'printf("%s\\n", "{}");'.format(self.curtoken.spelling)
)
self.match(TokenType.STRING)
else:
self.emitter.emit('printf("%.2f\\n", (float)(')
self.expression()
self.emitter.emit_line("));")
elif self.check_token(TokenType.IF):
self.match(TokenType.IF)
self.emitter.emit("if(")
self.comparison()
self.match(TokenType.THEN)
self.nl()
self.emitter.emit(") {")
while not self.check_token(TokenType.ENDIF):
self.statement()
self.match(TokenType.ENDIF)
self.emitter.emit_line("}")
elif self.check_token(TokenType.WHILE):
self.match(TokenType.WHILE)
self.emitter.emit("while (")
self.comparison()
self.match(TokenType.REPEAT)
self.nl()
self.emitter.emit_line(") {")
while not self.check_token(TokenType.ENDWHILE):
self.statement()
self.match(TokenType.ENDWHILE)
self.emitter.emit_line("}")
elif self.check_token(TokenType.LABEL):
self.match(TokenType.LABEL)
if self.curtoken.spelling in self.declared_labels:
self.abort("Label {} already exists".format(self.curtoken.spelling))
self.declared_labels.add(self.curtoken.spelling)
self.emitter.emit_line("{}:".format(self.curtoken.spelling))
self.match(TokenType.IDENT)
elif self.check_token(TokenType.GOTO):
self.match(TokenType.GOTO)
self.gotoed_labels.add(self.curtoken.spelling)
self.emitter.emit_line("goto {};".format(self.curtoken.spelling))
self.match(TokenType.IDENT)
elif self.check_token(TokenType.LET):
self.match(TokenType.LET)
if self.curtoken.spelling not in self.symbols:
self.emitter.header_line("float {};".format(self.curtoken.spelling))
self.symbols.add(self.curtoken.spelling)
self.emitter.emit("{} = ".format(self.curtoken.spelling))
self.match(TokenType.IDENT)
self.match(TokenType.EQ)
self.expression()
self.emitter.emit_line(";")
elif self.check_token(TokenType.INPUT):
self.match(TokenType.INPUT)
if self.curtoken.spelling not in self.symbols:
self.emitter.header_line(f"float {self.curtoken.spelling};")
self.symbols.add(self.curtoken.spelling)
self.emitter.emit_line(
'if(0 == scanf("%' + 'f", &' + self.curtoken.spelling + ")) {"
)
self.emitter.emit_line(self.curtoken.spelling + " = 0;")
self.emitter.emit('scanf("%')
self.emitter.emit_line('*s");')
self.emitter.emit_line("}")
self.match(TokenType.IDENT)
else:
self.abort("{} does not start a valid statement".format(self.curtoken))
self.nl()
def comparison(self):
"""
comparison ::= expression (("==" | "!=" | "<" | "<=" | ">" | ">=") expression)+
"""
self.expression()
if self.is_comparison_operator():
self.emitter.emit(self.curtoken.spelling)
self.next_token()
self.expression()
else:
self.abort(
"Expected comparison operator, but found {}".format(self.curtoken)
)
while self.is_comparison_operator():
self.emitter.emit(self.curtoken.spelling)
self.next_token()
self.expression()
def is_comparison_operator(self):
return (
self.check_token(TokenType.EQEQ)
or self.check_token(TokenType.NOTEQ)
or self.check_token(TokenType.LT)
or self.check_token(TokenType.LTEQ)
or self.check_token(TokenType.GT)
or self.check_token(TokenType.GTEQ)
)
def expression(self):
"""expression ::= term { ("-" | "+") term }"""
self.term()
while self.check_token(TokenType.MINUS) or self.check_token(TokenType.PLUS):
self.emitter.emit(self.curtoken.spelling)
self.next_token()
self.term()
def term(self):
"""term ::= unary { ("*" | "/") unary }"""
self.unary()
while self.check_token(TokenType.ASTERISK) or self.check_token(TokenType.SLASH):
self.emitter.emit(self.curtoken.spelling)
self.next_token()
self.unary()
def unary(self):
"""unary ::= ["+" | "-"] primary"""
if self.check_token(TokenType.MINUS) or self.check_token(TokenType.PLUS):
self.emitter.emit(self.curtoken.spelling)
self.next_token()
self.primary()
else:
self.primary()
def primary(self):
"""primary ::= number | ident"""
if self.check_token(TokenType.NUMBER):
self.emitter.emit(self.curtoken.spelling)
self.next_token()
elif self.check_token(TokenType.IDENT):
if self.curtoken.spelling not in self.symbols:
self.abort(
"Referencing a non-existent variable {}".format(
self.curtoken.spelling
)
)
self.emitter.emit(self.curtoken.spelling)
self.next_token()
else:
self.abort(
"Expected a number or an ident, but found {}".format(self.curtoken)
)
def nl(self):
"""NL ::= "\n"+"""
self.match(TokenType.NEWLINE)
while self.check_token(TokenType.NEWLINE):
self.match(TokenType.NEWLINE)
# public API
def parse(self):
"""parse the source file - starting rule is `program`"""
self.program()
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import json
from myuw.test.api import MyuwApiTest, require_url, fdao_mylib_override
@fdao_mylib_override
@require_url('myuw_library_api')
class TestLibrary(MyuwApiTest):
def get_library_response(self):
return self.get_response_by_reverse('myuw_library_api')
def test_javerage_books(self):
self.set_user('javerage')
response = self.get_library_response()
self.assertEquals(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(data,
{'fines': 0,
'holds_ready': 1,
'items_loaned': 1,
'next_due': '2014-05-27T02:00:00+00:00'})
def test_invalid_books(self):
self.set_user('nodata')
response = self.get_library_response()
self.assertEquals(response.status_code, 404)
self.set_user('none')
response = self.get_library_response()
self.assertEquals(response.status_code, 200)
self.set_user('jerror')
response = self.get_library_response()
self.assertEquals(response.status_code, 543)
|
from abc import abstractmethod
from typing import Any, Callable, List, Optional, Tuple
import ibis
import ibis.expr.types as ir
from fugue import DataFrame, DataFrames, ExecutionEngine
_ENGINE_FUNC: List[
Tuple[int, int, Callable[[ExecutionEngine, Any], Optional["IbisEngine"]]]
] = []
def register_ibis_engine(
priority: int, func: Callable[[ExecutionEngine, Any], Optional["IbisEngine"]]
) -> None:
_ENGINE_FUNC.append((priority, len(_ENGINE_FUNC), func))
_ENGINE_FUNC.sort()
def to_ibis_engine(
execution_engine: ExecutionEngine, ibis_engine: Any = None
) -> "IbisEngine":
if isinstance(ibis_engine, IbisEngine):
return ibis_engine
for _, _, f in _ENGINE_FUNC:
e = f(execution_engine, ibis_engine)
if e is not None:
return e
raise NotImplementedError(
f"can't get ibis engine from {execution_engine}, {ibis_engine}"
)
class IbisEngine:
"""The abstract base class for different ibis execution implementations.
:param execution_engine: the execution engine this ibis engine will run on
"""
def __init__(self, execution_engine: ExecutionEngine) -> None:
self._execution_engine = execution_engine
@property
def execution_engine(self) -> ExecutionEngine:
"""the execution engine this ibis engine will run on"""
return self._execution_engine
@abstractmethod
def select(
self, dfs: DataFrames, ibis_func: Callable[[ibis.BaseBackend], ir.TableExpr]
) -> DataFrame: # pragma: no cover
"""Execute the ibis select expression.
:param dfs: a collection of dataframes that must have keys
:param ibis_func: the ibis compute function
:return: result of the ibis function
.. note::
This interface is experimental, so it is subjected to change.
"""
raise NotImplementedError
|
def mutually_exclusive(dice, call1, call2):
|
# Atbash
import sys
import string
mode = sys.argv[1] # -e | -d
txt = sys.argv[2].upper() # String
tmp = ""
# Alphabet
common = list(string.ascii_uppercase)
coded = sorted(string.ascii_uppercase, reverse=True)
# Execution:
if mode == "-e":
for c in txt:
if c in string.ascii_uppercase:
tmp += coded[common.index(c)]
else:
tmp += c
elif mode == "-d":
for c in txt:
if c in string.ascii_uppercase:
tmp += common[coded.index(c)]
else:
tmp += c
# End:
print(tmp)
|
import bpy
import multivision as mv
from multivision.luxcore import LuxcoreProjector, init_luxcore
from multivision.common import Camera, delete_all
delete_all()
init_luxcore()
bpy.ops.mesh.primitive_plane_add()
luxcore_proj = LuxcoreProjector(pose=((0,0,3),(0,0,0))) |
""" Managers interactions between Fetching, Parsing, and Storing """
import logging
from src.parse.parse import Parse
from src.scrape.efd import EFD
from src.store.storage import Storage
from src.utility import hash_from_strings
class Controller:
""" Control interaction between Fetching, Parsing, and Storage """
def __init__(self):
""" Create concrete implementations """
self.fetcher = EFD()
self.parser = Parse()
self.storer = Storage()
def _fetcher_make_ready(self):
if not self.fetcher.is_logged_in:
self.fetcher.login()
def fetch_new_document_links(self):
logging.info("Fetching document links...")
self._fetcher_make_ready()
fetched = self.fetcher.annual_reports_search()
logging.info(f"Found '{len(fetched)}' document links!")
stored = self.storer.document_link_raws_get()
logging.info(f"Retrieved '{len(stored)}' document links from storage.")
reports_added = 0
reports_seen = frozenset([seen[1] for seen in stored])
for report in fetched:
hash_key = hash_from_strings(report)
if hash_key not in reports_seen:
report.insert(0, hash_key)
self.storer.document_link_raw_add(tuple(report))
reports_added += 1
logging.info(f"Added '{reports_added}' new document links.")
def parse_document_links(self):
document_links = self.storer.document_links_unparsed_get()
for document_link in document_links:
(key, name_first, name_last, filer_type, document_href, filed_date) = document_link
(document_type, document_id, document_name) = self.parser.document_link_parse(document_href)
filer_key = self.storer.filer_get_key(name_first, name_last)
|
#!/usr/bin/env python
import itertools
import pprint
import re
def tryint(s):
return int(s) if s.isdigit() else s
def natural_sort(string):
return [ tryint(c) for c in re.split('(\d+)', string) ]
class Host(object):
def __init__(self, name, ip, eth, switch, vlans):
self.name = name
self.ip = ip
self.eth = eth
self.switch = switch
self.vlans = vlans
def __repr__(self):
return str(self)
def __str__(self):
return "{0}: ({1}, {2}, {3}, {4})".format(self.name,
self.ip,
self.eth,
self.switch,
self.vlans)
class CoreSwitch(object):
def __init__(self, name, dpid, vlans):
self.name = name
self.dpid = dpid
self.vlans = vlans
def __repr__(self):
return str(self)
def __str__(self):
return "{0}: ({1}, {2})".format(self.name,
self.dpid,
self.vlans)
class EdgeSwitch(object):
def __init__(self, name, dpid, neighbors):
self.name = name
self.dpid = dpid
self.neighbors = neighbors
def __repr__(self):
return str(self)
def __str__(self):
return "{0}: ({1}, {2})".format(self.name,
self.dpid,
self.neighbors)
class Topology(object):
def __init__(self, config):
self.hosts = {}
self.vlans = {}
self.switches = []
self.edgeSwitches = {}
self.coreSwitches = {}
self.ports = {}
self.parse(config)
def getVlanCore(self, vlan):
for core in self.coreSwitches.values():
if vlan in core.vlans:
return core.name
return None
def dpidToName(self, dpid):
for sw in self.edgeSwitches.values() + self.coreSwitches.values():
if dpid == sw.dpid:
return sw.name
return None
# XXX: assumes ports are ordered alphabetically by switch, host name
def parse(self, fname):
with open(fname) as f:
contents = self.splitSections(f.readlines(), '*')
self.coreSwitches = self.parseCores(contents[0])
self.edgeSwitches = self.parseEdges(contents[1])
self.switches = self.coreSwitches.keys() + self.edgeSwitches.keys()
self.hosts = self.parseHosts(contents[1])
self.ports = self.setPorts()
for host in self.hosts.values():
for v in host.vlans:
if v not in self.vlans.keys():
self.vlans[v] = []
self.vlans[v].append(host.name)
def setPorts(self):
ports = {}
for s in self.coreSwitches.keys():
ports[s] = {}
i = 1
for edge in sorted(self.edgeSwitches.keys(),
key=natural_sort):
ports[s][i] = edge
ports[s][edge] = i
i += 1
for s in self.edgeSwitches.keys():
ports[s] = {}
i = 1
for core in sorted(self.coreSwitches.keys(),
key=natural_sort):
ports[s][i] = core
ports[s][core] = i
i += 1
for host in sorted(self.edgeSwitches[s].neighbors,
key=natural_sort):
ports[s][i] = host
ports[s][host] = i
i += 1
return ports
def parseEdges(self, cfg):
s = {}
for line in cfg:
fields = line.split(' ')
name, dpid = fields[0], int(fields[1])
neighbors = [n.split(',')[0] for n in fields[2:]]
s[name] = EdgeSwitch(name, dpid, neighbors)
return s
def parseCores(self, cfg):
s = {}
for line in cfg:
fields = line.split(' ')
name, dpid = fields[0], int(fields[1])
vlan = [] if len(fields) < 3 \
else [int(v) for v in fields[2].split(':')]
s[name] = CoreSwitch(name, dpid, vlan)
return s
def parseHosts(self, cfg):
h = {}
for line in cfg:
fields = line.split(' ')
switch = fields[0]
for host in fields[2:]:
hfields = host.split(',')
name, ip, eth = hfields[0], hfields[1], hfields[2]
vlans = [int(v) for v in hfields[3].split(':')]
h[name] = Host(name, ip, eth, switch, vlans)
return h
def splitSections(self, contents, separator):
contents = [j.strip() for j in contents]
return [list(j) for i,j in
itertools.groupby(contents, lambda x:x in separator) if not i]
|
##############################################################################
# Written by: Cachen Chen <cachen@novell.com>
# Date: 10/31/2008
# Description: toolstrip.py wrapper script
# Used by the toolstrip-*.py tests
##############################################################################
import sys
import os
import actions
import states
from strongwind import *
from toolstrip import *
# class to represent the main window.
class ToolStripFrame(accessibles.Frame):
def __init__(self, accessible):
super(ToolStripFrame, self).__init__(accessible)
self.toolstrip = self.findToolBar(None)
# close main window
def quit(self):
self.altF4()
|
#!/usr/bin/env python
"""
Interface to Wikipedia. Their API is in beta and subject to change.
As a result, this code is also in beta and subject to unexpected brokenness.
http://en.wikipedia.org/w/api.php
http://github.com/j2labs/wikipydia
jd@j2labs.net
"""
import urllib
import json as simplejson
import calendar
import datetime
import os
import sys
import time
import re
api_url = 'http://%s.wikipedia.org/w/api.php'
def _unicode_urlencode(params):
"""
A unicode aware version of urllib.urlencode.
Borrowed from pyfacebook :: http://github.com/sciyoshi/pyfacebook/
"""
if isinstance(params, dict):
params = params.items()
return urllib.urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params])
def _run_query(args, language, retry=5, wait=5):
"""
takes arguments and optional language argument and runs query on server
if a socket error occurs, wait the specified seconds of time and retry for the specified number of times
"""
url = api_url % (language)
data = _unicode_urlencode(args)
while True:
try:
search_results = urllib.urlopen(url, data=data)
json = simplejson.loads(search_results.read())
except Exception:
if not retry:
raise
retry -= 1
time.sleep(wait)
else:
break
return json
def opensearch(query, language='en'):
"""
action=opensearch
"""
query_args = {
'action': 'opensearch',
'search': query,
'format': 'json'
}
return _run_query(query_args, language)
def get_page_id(title, query_results):
"""
Extracts the title's pageid from the query results.
Assumes queries of the form query:pages:id,
and properly handle the normalized method.
Returns -1 if it cannot find the page id
"""
if 'normalized' in query_results['query'].keys():
for normalized in query_results['query']['normalized']:
if title == normalized['from']:
title = normalized['to']
for page in query_results['query']['pages']:
if title == query_results['query']['pages'][page]['title']:
return str(query_results['query']['pages'][page]['pageid'])
return str(-1)
def query_page_id(title, language='en'):
"""
Queries for the title's pageid.
"""
url = api_url % (language)
query_args = {
'action': 'query',
'prop': 'info',
'titles': title,
'format': 'json',
}
json = _run_query(query_args, language)
return get_page_id(title, json)
def query_exists(title, language='en'):
"""
Query if the page of the title exists.
"""
if title.find('|') != -1:
return False
url = api_url % (language)
query_args = {
'action': 'query',
'titles': title,
'format': 'json',
}
json = _run_query(query_args, language)
# check if it is an inter-wiki title e.g. Commons:Main_Page
if 'pages' not in json['query']:
return False
for page_id, page_info in json['query']['pages'].items():
if int(page_id) > 0:
if 'missing' not in page_info and 'invalid' not in page_info:
return True
return False
def query_normalized_title(title, language='en'):
"""
Query the normalization of the title.
title is a Unicode string.
"""
url = api_url % (language)
query_args = {
'action': 'query',
'titles': title,
'format': 'json',
}
json = _run_query(query_args, language)
if 'normalized' in json['query']:
for pair in json['query']['normalized']:
if title == pair['from']:
title = pair['to']
return title
def query_redirects(title, language='en'):
"""
Query the normalization of the title.
title is a Unicode string.
"""
url = api_url % (language)
query_args = {
'action': 'query',
'titles': title,
'format': 'json',
'redirects': '',
}
json = _run_query(query_args, language)
if 'normalized' in json['query']:
for pair in json['query']['normalized']:
if title == pair['from']:
title = pair['to']
if 'redirects' in json['query']:
for pair in json['query']['redirects']:
if title == pair['from']:
title = pair['to']
return title
def query_revid_by_date(title, language='en', date=datetime.date.today(), time="000000", direction='older', limit=1):
"""
Query for the revision ID of an article on a certain date.
Return 0 if no revision ID is found.
This method can be used in conjuction with query_text_raw_by_revid
"""
url = api_url % (language)
query_args = {
'action': 'query',
'format': 'json',
'prop': 'revisions',
'rvprop': 'ids',
'titles': title,
'rvdir': direction,
'rvlimit': limit,
'rvstart': date.strftime("%Y%m%d")+time
}
json = _run_query(query_args, language)
pageid = json['query']['pages'].keys()[0]
if 'revisions' not in json['query']['pages'][pageid]:
return 0
revid = json['query']['pages'][pageid]['revisions'][0]['revid']
return revid
def query_revid_by_date_fallback(title, language='en', date=datetime.date.today(), time="235959"):
"""
Query for revision ID of an article on a certain date.
If the article was moved later, it fallsback to the moved article.
The title argument is a Unicode string.
Return 0 if there exists no such an revision before the date.
"""
revid = query_revid_by_date(title, language, date, time=time, direction="older")
while not revid:
# the page was moved later
revid = query_revid_by_date(title, language, date, time=time, direction='newer')
if not revid:
return 0
redirects = query_text_raw_by_revid(revid, language)['text']
if not redirects or not redirects.lower().startswith('#redirect [[') or not redirects.endswith(']]'):
return 0
title = redirects[12:-2]
revid = query_revid_by_date(title, language, date, time="235959", direction="older")
return revid
def query_language_links(title, language='en', limit=250):
"""
action=query,prop=langlinks
returns a dict of inter-language links, containing the lang abbreviation
and the corresponding title in that language
"""
url = api_url % (language)
query_args = {
'action': 'query',
'prop': 'langlinks',
'titles': title,
'format': 'json',
'lllimit': limit
}
json = _run_query(query_args, language)
page_id = get_page_id(title, json)
lang_links = {}
if 'langlinks' in json['query']['pages'][page_id].keys():
lang_links = dict([(ll['lang'],ll['*']) for ll in json['query']['pages'][page_id]['langlinks']])
return lang_links
def query_categories(title, language='en'):
"""
action=query,prop=categories
Returns a full list of categories for the title
"""
url = api_url % (language)
query_args = {
'action': 'query',
'prop': 'categories',
'titles': title,
'format': 'json',
}
categories = []
while True:
json = _run_query(query_args, language)
for page_id in json['query']['pages']:
if 'categories' in json['query']['pages'][page_id].keys():
for category in json['query']['pages'][page_id]['categories']:
categories.append(category['title'])
if 'query-continue' in json:
continue_item = json['query-continue']['categories']['clcontinue']
query_args['clcontinue'] = continue_item
else:
break
return categories
def query_categories_by_revid(revid, language='en'):
"""
action=query,prop=categories
Returns a full list of categories for the revision id
"""
url = api_url % (language)
query_args = {
'action': 'query',
'prop': 'categories',
'revids': revid,
'format': 'json',
}
categories = []
while True:
json = _run_query(query_args, language)
for page_id in json['query']['pages']:
if 'categories' in json['query']['pages'][page_id].keys():
for category in json['query']['pages'][page_id]['categories']:
categories.append(category['title'])
if 'query-continue' in json:
continue_item = json['query-continue']['categories']['clcontinue']
query_args['clcontinue'] = continue_item
else:
break
return categories
def query_category_members(category, language='en', limit=100):
"""
action=query,prop=categories
Returns all the members of a category up to the specified limit
"""
url = api_url % (language)
query_args = {
'action': 'query',
'list': 'categorymembers',
'cmtitle': category,
'format': 'json',
'cmlimit': min(limit, 500)
}
members = []
while True:
json = _run_query(query_args, language)
for member in json['query']['categorymembers']:
members.append(member['title'])
if 'query-continue' in json and len(members) <= limit:
continue_item = json['query-continue']['categorymembers']['cmcontinue']
query_args['cmcontinue'] = continue_item
else:
break
return members[0:limit]
def query_random_titles(language='en', num_items=10):
"""
action=query,list=random
Queries wikipedia multiple times to get random pages from namespace 0
"""
url = api_url % (language)
query_args = {
'action': 'query',
'list': 'random',
'format': 'json',
}
random_titles = []
while len(random_titles) < num_items:
json = _run_query(query_args, language)
for random_page in json['query']['random']:
if random_page['ns'] == 0:
random_titles.append(random_page['title'])
return random_titles
def query_links(title, language='en'):
"""
action=query,prop=categories
Returns a full list of links on the page
"""
url = api_url % (language)
query_args = {
'action': 'query',
'prop': 'links',
'titles': title,
'format': 'json',
}
links = []
while True:
json = _run_query(query_args, language)
for page_id in json['query']['pages']:
if 'links' in json['query']['pages'][page_id].keys():
for category in json['query']['pages'][page_id]['links']:
links.append(category['title'])
if 'query-continue' in json:
continue_item = json['query-continue']['links']['plcontinue']
query_args['plcontinue'] = continue_item
else:
break
return links
def query_links_by_revid(revid, language='en'):
"""
action=query,prop=categories
Returns a full list of links on the page
"""
text = query_text_raw_by_revision(revid)['text']
links = get_links(text).values()
return links
def query_revision_by_date(title, language='en', date=datetime.date.today(), time="000000", direction='newer', limit=10):
"""
Queries wikipeida for revisions of an article on a certain date.
CCB: I'm not quite sure what I should be returning just yet...
"""
url = api_url % (language)
query_args = {
'action': 'query',
'format': 'json',
'prop': 'revisions',
'titles': title,
}
json = _run_query(query_args, language)
return json
def query_revision_diffs(rev_id_1, rev_id_2, language='en'):
"""
Queries wikipedia for the diff between two revisions of an article
CCB: I'm not quite sure what I should be returning just yet...
"""
url = api_url % (language)
query_args = {
'action': 'query',
'format': 'json',
'prop': 'revisions',
'revids': min(rev_id_1, rev_id_2),
'rvdiffto': max(rev_id_1, rev_id_2)
}
json = _run_query(query_args, language)
return json
def query_page_view_stats(title, language='en', start_date=(datetime.date.today()-datetime.timedelta(1)), end_date=datetime.date.today()):
"""
Queries stats.grok.se for the daily page views for wikipedia articles
"""
stats_api_url = 'http://stats.grok.se/json/%s/%s/%s'
earliest_date = datetime.date(2007, 01, 01)
query_date = max(start_date, earliest_date)
end_date = min(end_date, datetime.date.today())
total_views = 0
stats = {}
stats['monthly_views'] = {}
while(query_date < end_date):
query_date_str = query_date.strftime("%Y%m")
url = stats_api_url % (language, query_date_str, urllib.quote(title.encode('utf-8')))
search_results = urllib.urlopen(url)
json = simplejson.loads(search_results.read())
total_views += json['total_views']
stats['monthly_views'][query_date_str] = json
days_in_month = calendar.monthrange(query_date.year, query_date.month)[1]
query_date = query_date + datetime.timedelta(days_in_month)
stats['total_views'] = total_views
return stats
def query_text_raw(title, language='en'):
"""
action=query
Fetches the article in wikimarkup form
"""
query_args = {
'action': 'query',
'titles': title,
'rvprop': 'content',
'prop': 'info|revisions',
'format': 'json',
'redirects': ''
}
json = _run_query(query_args, language)
for page_id in json['query']['pages']:
if page_id != '-1' and 'missing' not in json['query']['pages'][page_id]:
response = {
'text': json['query']['pages'][page_id]['revisions'][0]['*'],
'revid': json['query']['pages'][page_id]['lastrevid'],
}
return response
return None
def query_text_raw_by_revid(revid, language='en'):
"""
action=query
Fetches the specified revision of an article in wikimarkup form
"""
url = api_url % (language)
query_args = {
'action': 'query',
'rvprop': 'content',
'prop': 'info|revisions',
'format': 'json',
'revids': revid,
}
json = _run_query(query_args, language)
for page_id, page_info in json['query']['pages'].items():
if '*' in page_info['revisions'][0]:
response = {
'text': json['query']['pages'][page_id]['revisions'][0]['*'],
'revid': json['query']['pages'][page_id]['lastrevid'],
}
return response
response = {
'text': None,
'revid': 0,
}
return response
def query_text_rendered_by_revid(revid, language='en'):
"""
action=query
Fetches the specified revision of an article in HTML form
"""
url = api_url % (language)
query_args = {
'action': 'parse',
'format': 'json',
'oldid': revid,
}
json = _run_query(query_args, language)
response = {
'html': json['parse']['text']['*'],
'revid': revid,
}
return response
def query_random_titles(language='en', num_items=10):
"""
action=query,list=random
Queries wikipedia multiple times to get random articles
"""
url = api_url % (language)
query_args = {
'action': 'query',
'list': 'random',
'format': 'json',
'rnnamespace': '0',
'rnlimit': str(num_items),
}
random_titles = []
while len(random_titles) < num_items:
json = _run_query(query_args, language)
for random_page in json['query']['random']:
random_titles.append(random_page['title'].encode("utf-8").replace(' ', '_'))
return random_titles
def query_text_rendered(page, language='en'):
"""
action=parse
Fetches the article in parsed html form
"""
query_args = {
'action': 'parse',
'page': page,
'format': 'json',
'redirects': ''
}
json = _run_query(query_args, language)
response = {
'html': json['parse']['text']['*'],
'revid': json['parse']['revid'],
}
return response
def query_rendered_altlang(title, title_lang, target_lang):
"""
Takes a title and the language the title is in, asks wikipedia for
alternative language offerings and fetches the article hosted by
wikipedia in the target language.
"""
lang_links = query_language_links(title, title_lang, limit=100)
if target_lang in lang_links:
return query_text_rendered(lang_links[target_lang], language=target_lang)
else:
return ValueError('Language not supported')
def get_sections(wikified_text):
"""
Parses the wikipedia markup for a page and returns
two arrays, one containing section headers and one
containing the (marked up) text of the section.
"""
title_pattern = re.compile(r'={1,6}([^=]+)={1,6}')
iterator = title_pattern.finditer(wikified_text)
headers = []
contents = []
header = ''
content_start = 0
for match in iterator:
headers.append(header)
content = wikified_text[content_start:match.start()]
contents.append(content)
header = match.groups()[0]
content_start = match.end()
headers.append(header)
content = wikified_text[content_start:len(wikified_text)-1]
contents.append(content)
return dict([('headers', headers), ('contents', contents)])
def get_first_section(wikified_text):
"""
Parses the wikipedia markup for a page and returns
the firs tsection
"""
title_pattern = re.compile('==.*?==')
iterator = title_pattern.finditer(wikified_text)
content_start = 0
content = ''
for match in iterator:
content = wikified_text[content_start:match.start()].encode("utf-8")
break
return content
def get_links(wikified_text):
"""
Parses the wikipedia markup for a page and returns
a dict of rendered link text onto underlying wiki links
"""
link_pattern = re.compile('\[\[.*?\]\]')
linked_text = {}
iterator = link_pattern.finditer(wikified_text)
for match in iterator:
link = wikified_text[match.start()+2:match.end()-2].split('|', 1)
linked_text[link[-1]] = link[0]
return linked_text
def get_article_titles(wikified_text):
"""
Parses the wikipedia markup for a page and returns
an array of article titles linked
Will change unicode string to UTF-8
"""
link_pattern = re.compile('\[\[.*?\]\]')
linked_text = []
iterator = link_pattern.finditer(wikified_text)
for match in iterator:
link = wikified_text[match.start()+2:match.end()-2].split('|', 1)
link_title = link[0].encode("utf-8")
linked_text.append(link_title.replace(' ', '_'))
return linked_text
def get_externallinks(wikified_text):
"""
Parses the wikipedia markup for a page and returns
a dict of rendered link text onto underlying wiki links
"""
link_pattern = re.compile(r'\[[^[\]]*?\](?!\])')
linked_text = {}
iterator = link_pattern.finditer(wikified_text)
for match in iterator:
link = wikified_text[match.start()+1:match.end()-1].split(' ', 1)
linked_text[link[-1]] = link[0]
return linked_text
def get_parsed_text(wikified_text, language='en'):
"""
action=parse
Parse the given wiki text
"""
query_args = {
'action': 'parse',
'text': wikified_text,
'format': 'json'
}
json = _run_query(query_args, language)
return json
def get_plain_text(wikified_text):
"""
Strip links and external links from the given text
"""
link_pattern = re.compile(r'\[\[(.*?)\]\]')
link_stripped = link_pattern.sub(lambda x: x.group(1).split('|',1)[-1], wikified_text)
externallink_pattern = re.compile(r'\[.*?\]')
all_stripped = externallink_pattern.sub('', link_stripped)
return all_stripped.strip()
def get_positive_controls(language, date, num_days):
"""returns the positive controls for the HIT"""
current_news = query_current_events(date, num_days)
top_news = {}
wikitopics_path = os.environ['WIKITOPICS']
articles_path = wikitopics_path + "/data/articles/" + language + "/" + str(date.year) + "/"
for i in range(0, num_days):
previousdays = datetime.timedelta(days=i)
new_date = date - previousdays;
article_date = new_date.strftime("%Y-%m-%d")
articles = articles_path + article_date
if (os.path.exists(articles)):
listing = os.listdir(articles)
for infile in listing:
if infile[-2:] == "es":
top_news[infile[:-10]] = article_date
intersection = list(set(current_news) & set(top_news.keys()))
for key,value in top_news.items():
if key not in intersection:
del top_news[key]
"""
For debugging
print current_news
print "\n\n\n\n"
print top_news
print "\n\n\n\n"
"""
return top_news
def get_negative_controls(language, date, num_random=10, num_days=1):
""" returns the negative controls for the HIT """
random = query_random_titles(language, num_random)
"""
For purpose of debugging the difference function for sets
(python generate_negative.py en 2011-05-11 50 15)
random.append('The_Pirate_Bay')
"""
wikitopics_path = os.environ['WIKITOPICS']
articles_path = wikitopics_path + "/data/articles/" + language + "/" + str(date.year) + "/"
top_news = []
for i in range(0, num_days):
previousdays = datetime.timedelta(days=i)
new_date = date - previousdays;
articles = articles_path + new_date.strftime("%Y-%m-%d")
if (os.path.exists(articles)):
listing = os.listdir(articles)
for infile in listing:
if infile[-2:] == "es":
top_news.append(infile[:-10])
difference = filter(lambda x:x not in top_news, random)
"""
For debugging
print random
print "\n\n\n"
print top_news
print "\n\n\n"
"""
return difference
def query_current_events(date, numDays=1):
"""
Retrieves the current events for a specified date.
Can also retrieve the previous dates if needed.
Currently only works for English.
"""
response = []
oneday = datetime.timedelta(days=1)
for i in range(0, numDays):
date = date - oneday
title = 'Portal:Current_events/' + date.strftime("%Y_%B_") + str(date.day)
text_raw = query_text_raw(title)
if not text_raw:
return None
text = text_raw['text']
lines = text.splitlines()
for line in lines:
if not line.startswith('*'):
continue
response.extend(get_article_titles(line))
return response
"""
For now, we just need the article title
event = {
'text' : get_plain_text(line),
'links' : get_links(line),
'externallinks' : get_externallinks(line),
'revid' : text_raw['revid']
}
response.append(event)
"""
def get_page_extract(title, language='en'):
args = {
'prop': 'extracts',
'titles': title,
'action': 'query',
'explaintext': True,
'format': 'json',
}
json = _run_query(args, language)
if '-1' in json['query']['pages']:
return None
return json['query']['pages'].values()[0]['extract']
|
from chainer.functions.connection import deconvolution_nd
from chainer import initializers
from chainer import link
from chainer.utils import conv_nd
from chainer import variable
class DeconvolutionND(link.Link):
"""N-dimensional deconvolution function.
This link wraps :func:`~chainer.functions.deconvolution_nd` function and
holds the filter weight and bias vector as its parameters.
Deconvolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
ndim (int): Number of spatial dimensions.
in_channels (int): Number of channels of input arrays.
If ``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints): Stride of filter application.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.
pad (int or tuple of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
nobias (bool): If ``True``, then this function does not use the bias.
outsize (tuple of ints): Expected output size of deconvolutional
operation. It should be a tuple of ints that represents the output
size of each dimension. Default value is ``None`` and the outsize
is estimated with input size, stride and pad.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be :math:`n+2` where :math:`n` is
the number of spatial dimensions.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should 1.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
.. seealso::
:func:`~chainer.functions.deconvolution_nd`
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,
set to ``None``.
.. admonition:: Example
There are several ways to make a DeconvolutionND link.
Let an input vector ``x`` be:
>>> x = np.arange(2 * 5 * 5 * 5, dtype=np.float32).reshape(
... 1, 2, 5, 5, 5)
1. Give the first four arguments explicitly:
>>> l = L.DeconvolutionND(3, 2, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 8, 8, 8)
2. Omit ``in_channels`` or fill it with ``None``:
The below two cases are the same.
>>> l = L.DeconvolutionND(3, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 8, 8, 8)
>>> l = L.DeconvolutionND(3, None, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 8, 8, 8)
When you omit the second argument, you need to specify the other
subsequent arguments from ``stride`` as keyword auguments. So the
below two cases are the same.
>>> l = L.DeconvolutionND(3, 7, 4, stride=2, pad=1)
>>> y = l(x)
>>> y.shape
(1, 7, 10, 10, 10)
>>> l = L.DeconvolutionND(3, None, 7, 4, 2, 1)
>>> y = l(x)
>>> y.shape
(1, 7, 10, 10, 10)
"""
def __init__(self, ndim, in_channels, out_channels, ksize=None, stride=1,
pad=0, nobias=False, outsize=None, initialW=None,
initial_bias=None, dilate=1, groups=1):
super(DeconvolutionND, self).__init__()
if ksize is None:
out_channels, ksize, in_channels = \
in_channels, out_channels, None
self.out_channels = out_channels
self.ksize = conv_nd.as_tuple(ksize, ndim)
self.stride = stride
self.pad = pad
self.outsize = outsize
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = int(groups)
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
initial_bias = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(initial_bias, out_channels)
def _initialize_params(self, in_channels):
if self.out_channels % self.groups != 0:
raise ValueError('the number of output channels must be'
'divisible by the number of groups')
if in_channels % self.groups != 0:
raise ValueError('the number of input channels must be'
'divisible by the number of groups')
W_shape = (
in_channels, int(self.out_channels / self.groups)) + self.ksize
self.W.initialize(W_shape)
def forward(self, x):
if self.W.array is None:
self._initialize_params(x.shape[1])
return deconvolution_nd.deconvolution_nd(
x, self.W, b=self.b, stride=self.stride, pad=self.pad,
outsize=self.outsize, dilate=self.dilate, groups=self.groups)
class Deconvolution1D(DeconvolutionND):
"""1-dimensional deconvolution layer.
.. note::
This link wraps :class:`~chainer.links.DeconvolutionND` by giving 1 to
the first argument ``ndim``, so see the details of the behavior in
the documentation of :class:`~chainer.links.DeconvolutionND`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, outsize=None, initialW=None, initial_bias=None,
dilate=1, groups=1):
super(Deconvolution1D, self).__init__(
1, in_channels, out_channels, ksize, stride, pad, nobias, outsize,
initialW, initial_bias, dilate, groups)
class Deconvolution3D(DeconvolutionND):
"""3-dimensional deconvolution layer.
.. note::
This link wraps :class:`~chainer.links.DeconvolutionND` by giving 3 to
the first argument ``ndim``, so see the details of the behavior in
the documentation of :class:`~chainer.links.DeconvolutionND`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, outsize=None, initialW=None, initial_bias=None,
dilate=1, groups=1):
super(Deconvolution3D, self).__init__(
3, in_channels, out_channels, ksize, stride, pad, nobias, outsize,
initialW, initial_bias, dilate, groups)
|
import os
import cv2
import torch
import torch.utils.data as data
class YCG09DataSet(data.Dataset):
def __init__(self, file_path, train_data=True, transform=None,
data_length=None, label_transform=None, start_index=0, split_char=' '):
super(YCG09DataSet, self).__init__()
self.img_file_path = os.path.join(file_path, 'images')
self.training = train_data
self.data_length = data_length
self.label_file = os.path.join(file_path, 'train.txt' if train_data else 'test.txt')
self.transform = transform
self.label_transform = label_transform
self.all_sample = []
self.start_index = start_index
self.use_start_index = start_index > 0
with open(self.label_file, 'r') as label_text:
label_lines = label_text.readlines()
for label_line in label_lines:
labels = label_line.strip().split(split_char)
filename = labels[0]
if split_char == ' ':
labels = [int(label.strip()) + 2 for label in labels[1:]]
elif split_char == '\t':
labels = [int(label.strip()) + 2 for label in labels[1].split(' ')]
else:
raise KeyError('unknown split_char')
self.all_sample.append((filename, tuple(labels)))
def reset_index(self, use_start_index):
self.use_start_index = use_start_index
def __len__(self):
if self.use_start_index:
return len(self.all_sample) - self.start_index
else:
if self.data_length is None:
return len(self.all_sample)
elif self.data_length > 1:
return int(self.data_length)
elif 0 < self.data_length < 1:
return int(len(self.all_sample) * self.data_length)
else:
return len(self.all_sample)
def __getitem__(self, index):
sample = self.all_sample[index + (self.start_index if self.use_start_index else 0)]
image = cv2.imread(os.path.join(self.img_file_path, sample[0]), cv2.IMREAD_COLOR)
if self.transform is not None:
image = self.transform(image)
target = torch.LongTensor(sample[1])
if self.label_transform is not None:
target = self.label_transform(target)
return image, target
if __name__ == '__main__':
dataset = YCG09DataSet('/mnt/data/BaiduNetdiskDownload', False)
for data in dataset:
print(data)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import random
BOARD_SIZE = 7 # how many pegs on the bottom-most row of the board
VIDEO_LENGTH = 10 # desired video length in seconds
FPS = 30 # frames per second for the video
diff = 0
def get_ball_bucket():
# simulate a ball falling down a galton board.
# a ball may fall left or right at any given peg with 50% chance.
# this function makes BOARD_SIZE many choices and counts the number of right choices,
# thus determining which bucket it would fall into at the bottom.
return [random.choice("LR") for _ in range(BOARD_SIZE)].count("R")
def galton_animate(num_balls):
fig, ax = plt.subplots() # get axes from matplotlib to plot
x = range(0, BOARD_SIZE + 1) # a board with n pegs on the bottom row has n+1 spaces for balls to fall into
simulation = [0] * (BOARD_SIZE + 1) # create an empty array to keep track of fallen balls
plot = plt.bar(x, simulation) # create an empty bar plot, to be updated later
ball_counter = 0 # since we want to simulate more than one ball per video frame, keep track of how many balls we've dropped so far
frames = VIDEO_LENGTH * FPS # calculate the total number of frames
balls_per_frame = max(num_balls // frames, 1) # drop at least one ball per frame
ax.annotate("balls: " + str(num_balls), xy=(0.75, 0.9), xycoords='axes fraction', fontsize=10,
bbox=dict(facecolor='white', alpha=0.8),
horizontalalignment='left', verticalalignment='bottom') # draw an indicator for how many balls this video is simulating
def animate(i):
# this function is run once per iteration of the simulation
nonlocal ball_counter
if ball_counter < num_balls: # make sure we don't drop too many balls
for _ in range(balls_per_frame):
if ball_counter >= num_balls:
return
simulation[get_ball_bucket()] += 1 # drop a random ball
ball_counter += 1
for bar, y in zip(plot, simulation):
bar.set_height(y) # update each bar on the bar plot
ax.relim() # rescale the numbers on the axes
ax.autoscale_view(True, True, True) # rescale the view to the axes and data.
ani = animation.FuncAnimation(fig, animate, frames + FPS,
interval=1/FPS, blit=False, repeat=False) # add one second of animation to see the finished board
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=FPS, metadata=dict(artist='Me'), bitrate=1800)
# Render the animation to disk
ani.save(str(num_balls) + '_balls.mp4', writer=writer)
print("Done simulation for " + str(num_balls) + " balls!")
if __name__ == "__main__":
# create animations for each given number of balls
for n in [100, 1000, 10000, 50000, 100000, 1000000]:
print("#Balls: ", n)
timec = time.time()
galton_animate(n)
timed = time.time()
timecd = str(timed - timec)
print("Simulation Time: ", timecd)
|
"""This module provides a high level interface for layer augmentation."""
import logging
from keras.models import clone_model
from condense.keras import wrappers
from condense.keras import support
from condense.optimizer import sparsity_functions
from copy import deepcopy
def wrap_model(model, sparsity_fn):
"""This function turns a model into a prunable copy of itself.
Every output layer ('output' in name) won't get affected by pruning.
Args:
model: Target model
sparsity_fn: desired sparsity function for this model
Todos:
* layers are not deep copied
* support for custom weights in PruningWrapper
Returns:
Augemented model (not a deepcopy)
"""
if not issubclass(type(sparsity_fn), sparsity_functions.SparsityFunction):
raise ValueError("""argument sprasity_fn should be a subclass of SparsityFunction.""")
class __WrappingFunction:
def __init__(self, sparsity_fn):
self.funciton = sparsity_fn
def wrap(self, layer):
if not support.is_supported_layer(layer) or 'output' in layer.name:
logging.warning('Layer %s is not supported.', layer.get_config()["name"])
return layer
wrapper = wrappers.PruningWrapper(layer, deepcopy(sparsity_fn))
return wrapper
# It is important to get the weights of each layer individually,
# because the wrapper will add additional variables to the model.
weights = [layer.get_weights() for layer in model.layers]
temp_wrapper = __WrappingFunction(sparsity_fn)
new_model = clone_model(model=model,
clone_function=temp_wrapper.wrap)
# Apply saved weights to each layer of the wrapped model individually.
for weight, layer in zip(weights, new_model.layers):
if isinstance(layer, wrappers.PruningWrapper):
layer.layer.set_weights(weight)
if model.optimizer and model.loss:
new_model.compile(model.optimizer, model.loss)
return new_model
def wrap_layer(layer, sparsity_fn):
"""This function applies the PruningWrapper class to a the target layer if pruning is supported.
The main use for this function is to serve as a clone_function for keras.models.clone_model().
Args:
layer: Keras Layer to be wrapped.
Returns:
Either a wrapped layer if supported or the original layer.
"""
if not support.is_supported_layer(layer):
logging.warning('Layer %s is not supported.', layer.get_config()["name"])
return layer
return wrappers.PruningWrapper(layer, sparsity_fn)
|
"""
Generation of intrinsic information from dynamically executed instances
A kind of fuzzing, maybe
"""
import builtins
import sys
import operator
import functools
import tqdm
import typhon.core.type_system.type_repr as type_repr
from typhon.core.type_system.intrinsics import intrinsic_function
skip_types = {
# Collections
"dict",
"set",
"frozenset",
"list",
"tuple",
"bytearray",
# Generators
"filter",
"map",
"zip",
"enumerate",
"reversed",
# User-defined class related
"property",
"super",
"classmethod",
"staticmethod",
# Misc,
"memoryview",
"slice",
"type",
"__loader__",
}
skip_attrs = {
"__abstractmethods__",
"__base__",
"__bases__",
"__dict__",
"__class__",
"mro",
"__mro__",
"__reduce__",
"__reduce_ex__",
"__sizeof__",
"__dictoffset__",
"__flags__",
"__module__",
"__itemsize__",
"__new__",
"__init__",
"__del__",
"__getattr__",
"__getattribute__",
"__setattr__",
"__delattr__",
"__dir__",
"__get__",
"__set__",
"__delete__",
"__set_name__",
"__slots__",
"__init_subclass__",
"__prepare__",
"__instancecheck__",
"__subclasscheck__",
"__class_getitem__",
"__call__",
"__await__",
"__aiter__",
"__anext__",
"__aenter__",
"__aexit__",
"format",
}
types = {
type(None): type_repr.RecordType("NoneType", {}),
type(...): type_repr.RecordType("ellipsis", {}),
}
type_ctors = dict()
funcs = dict()
for k, v in builtins.__dict__.items():
if k not in skip_types and type(v) is type:
types[v] = type_repr.RecordType("builtins." + k, {})
type_ctors[v] = k
for k, v in builtins.__dict__.items():
if k not in skip_types and type(v) is type:
for sup in v.__bases__:
if sup in types:
types[v].add_nomial_parent(types[sup])
roots = {
(complex, 1 + 1j)
}
def explore(callee, depth=3, add_to_roots=False, v=False):
if depth == 0:
try:
if v:
print(callee)
result = callee()
if v:
print(result)
except Exception as exc:
if v:
print(exc)
return []
if add_to_roots:
roots.add((type(result), result))
if type(result) in types:
return [type_repr.FunctionType((), types[type(result)])]
else:
return []
try:
callee(*([1] * depth))
except TypeError as exc:
feature = str(exc)
for d in (depth, depth + 1):
if ('(%d given)' % d) in feature or ('got %d' % d) in feature:
# print("No such depth", callee, depth)
return explore(callee, depth - 1, add_to_roots, v=v)
except Exception:
pass
overloads = []
reg = dict()
token = object()
for _, obj in list(roots) + [(None, token)]:
if type(obj) not in types:
print("Warning: %s not in recognized types" % obj, file=sys.stderr)
continue
T = types[type(obj)]
if token is not obj:
partial = functools.partial(callee, obj)
else:
partial = callee
for overload in explore(partial, depth - 1, add_to_roots, v=v):
args = (T,) + overload.args if token is not obj else overload.args
if str(args) in reg:
if reg[str(args)] != overload.r.name:
print(
"Warning: Inconsistent return type at %s(%s)"
% (callee, [x.name for x in args]), file=sys.stderr
)
continue
reg[str(args)] = overload.r.name
overloads.append(type_repr.FunctionType(
args,
overload.r
))
return overloads
for i in range(2):
old_size = len(roots)
for t in tqdm.tqdm(types.keys()):
if issubclass(t, BaseException):
continue
ctor = intrinsic_function.ArrowCollectionIntrinsic(
"<ctor>",
explore(t, add_to_roots=t is not complex)
)
if t in type_ctors:
funcs[type_ctors[t]] = ctor
# print(roots)
for _, obj in tqdm.tqdm(list(roots)):
for attr in dir(obj):
if attr in skip_attrs:
continue
attr_obj = getattr(obj, attr)
if type(attr_obj) in types:
types[type(obj)].members[attr] = types[type(attr_obj)]
elif callable(attr_obj):
if attr.startswith("__i") and hasattr(obj, attr.replace("__i", "__")):
continue
if attr.startswith("__r") and hasattr(obj, attr.replace("__r", "__")):
continue
if attr.startswith("__") and hasattr(operator, attr):
attr_obj = functools.partial(getattr(operator, attr), obj)
collect_type = intrinsic_function.ArrowCollectionIntrinsic(
types[type(obj)].name + "." + attr,
explore(attr_obj, add_to_roots=False)
)
if len(collect_type) == 0:
continue
types[type(obj)].add_function_member(
attr,
collect_type
)
head = """# -*- coding: utf-8 -*-
from ..intrinsic_function import ArrowCollectionIntrinsic
from ...type_repr import FunctionType, RecordType
"""
code = [head]
sname = lambda x: x.name.replace(".", "_")
for t in types.values():
code.append('%s = RecordType("%s", {})' % (sname(t), t.name))
ARROW = " FunctionType([%s], %s)"
FUNC = """%s.add_function_member(
"%s",
ArrowCollectionIntrinsic("%s", [
%s
])
)"""
for t in types.values():
for b in t.nomial_parents:
code.append(
"%s.add_nomial_parent(%s)"
% (sname(t), sname(b))
)
for k in t.members.keys():
if isinstance(t.members[k], intrinsic_function.ArrowCollectionIntrinsic):
sub_arrs = []
for sub in t.members[k]:
parms = ', '.join([sname(a) for a in sub.args])
sub_arrs.append(ARROW % (parms, sname(sub.r)))
code.append(FUNC % (sname(t), k, k, ', \n'.join(sub_arrs)))
else:
code.append(
'%s.members["%s"] = %s'
% (sname(t), k, sname(t.members[k]))
)
with open("temp/builtin_types_generated.py", "w") as fo:
for stub in code:
fo.write(stub)
fo.write("\n")
|
from . import ConditionType
from agentml.errors import VarNotDefinedError
class UserVarType(ConditionType):
"""
User Variable condition type
"""
def __init__(self):
"""
Initialize a new User Var Type instance
"""
super(UserVarType, self).__init__('user_var')
def get(self, agentml, user=None, key=None):
"""
Evaluate and return the current value of a user variable
:param user: The active user object
:type user: agentml.User or None
:param agentml: The active AgentML instance
:type agentml: AgentML
:param key: The variables key
:type key: str
:return: Current value of the user variable (or None if the variable has not been set)
:rtype : str or None
"""
if not user or not key:
return
try:
return user.get_var(key)
except VarNotDefinedError:
return
|
from collections import deque
from math import ceil
from types import SimpleNamespace
import cv2
import numpy as np
from face_recognition.detector import FaceDetectorModel
class ConfirmationWindowHandler:
def __init__(self):
self.cw = dict()
def add(self, key, value):
cw = self.get(key)
cw.append(value)
def get(self, key):
if key not in self.cw:
self.cw[key] = ConfirmationWindow([], maxlen=20)
return self.cw[key]
class ConfirmationWindow(deque):
@property
def most_common(self):
value = max(set(self), key=self.count)
return SimpleNamespace(**{"value": value, "count": self.count(value)})
def process_frame(frame, confirmation_window_handler,
face_recognition_model, emotion_model):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_detector = FaceDetectorModel()
faces = face_detector.predict(gray)
for face in faces:
(x, y, w, h) = face["pos"]
cv2.rectangle(frame, (x, y - 50), (x + w, y + h + 10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
if face_recognition_model is not None:
face_result = face_recognition_model.predict(roi_gray)
name = face_result["name"]
cv2.putText(frame,
f'{name}: {str(round(face_result["confidence"], 2))}',
(x - 60, y + h + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
(0, 0, 255), 2, cv2.LINE_AA)
if emotion_model is not None:
cropped_img = np.expand_dims(
np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
emotion_label = emotion_model.predict(cropped_img)
if confirmation_window_handler is not None:
confirmation_window_handler.get(name).append(emotion_label)
print(f"CW Name {name}: {confirmation_window_handler.get(name)}")
if confirmation_window_handler.get(name).most_common.count > \
ceil(confirmation_window_handler.get(name).maxlen * 0.6):
text = emotion_label
else:
text = "Neutral"
else:
text = emotion_label
cv2.putText(frame, text, (x + 20, y - 60),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,
cv2.LINE_AA)
return frame
|
from setuptools import find_packages, setup
import os
import sys
from distutils.sysconfig import get_python_lib
from setuptools import setup
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 8)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of Django requires Python {}.{}, but you're trying to
install it on Python {}.{}.
This may be because you are using a version of pip that doesn't
understand the python_requires classifier. Make sure you
have pip >= 9.0 and setuptools >= 24.2, then try again:
$ python -m pip install --upgrade pip setuptools
$ python -m pip install django
This will install the latest version of Django which works on your
version of Python. If you can't upgrade your pip (or Python), request
an older version of Django:
$ python -m pip install "django<2"
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
setup()
|
from layer import *
class Model:
def __init__(self, x, mosaic, mask, local_x, global_completion, local_completion, is_training, batch_size):
self.batch_size = batch_size
self.merged = x * (1 - mask) + mosaic * (mask)
self.imitation = self.generator(self.merged, is_training)
self.completion = self.imitation * mask + x * (1 - mask)
self.real = self.discriminator(x, local_x, reuse=False)
self.fake = self.discriminator(global_completion, local_completion, reuse=True)
self.g_loss = self.calc_g_loss(x, self.completion)
self.d_loss = self.calc_d_loss(self.real, self.fake)
self.g_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
self.d_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
def generator(self, x, is_training):
with tf.variable_scope('generator'):
with tf.variable_scope('conv1'):
x = conv_layer(x, [5, 5, 3, 64], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv2'):
x = conv_layer(x, [3, 3, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv3'):
x = conv_layer(x, [3, 3, 128, 128], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv4'):
x = conv_layer(x, [3, 3, 128, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv5'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv6'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated1'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated2'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 4)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated3'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 8)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('dilated4'):
x = dilated_conv_layer(x, [3, 3, 256, 256], 16)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv7'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv8'):
x = conv_layer(x, [3, 3, 256, 256], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('deconv1'):
x = deconv_layer(x, [4, 4, 128, 256], [self.batch_size, 64, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv9'):
x = conv_layer(x, [3, 3, 128, 128], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('deconv2'):
x = deconv_layer(x, [4, 4, 64, 128], [self.batch_size, 128, 128, 64], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv10'):
x = conv_layer(x, [3, 3, 64, 32], 1)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv11'):
x = conv_layer(x, [3, 3, 32, 3], 1)
x = tf.nn.tanh(x)
return x
def discriminator(self, global_x, local_x, reuse):
def global_discriminator(x):
is_training = tf.constant(True)
with tf.variable_scope('global'):
with tf.variable_scope('conv1'):
x = conv_layer(x, [5, 5, 3, 64], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv2'):
x = conv_layer(x, [5, 5, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv3'):
x = conv_layer(x, [5, 5, 128, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv4'):
x = conv_layer(x, [5, 5, 256, 512], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv5'):
x = conv_layer(x, [5, 5, 512, 512], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('fc'):
x = flatten_layer(x)
x = full_connection_layer(x, 1024)
return x
def local_discriminator(x):
is_training = tf.constant(True)
with tf.variable_scope('local'):
with tf.variable_scope('conv1'):
x = conv_layer(x, [5, 5, 3, 64], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv2'):
x = conv_layer(x, [5, 5, 64, 128], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv3'):
x = conv_layer(x, [5, 5, 128, 256], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('conv4'):
x = conv_layer(x, [5, 5, 256, 512], 2)
x = batch_normalize(x, is_training)
x = tf.nn.relu(x)
with tf.variable_scope('fc'):
x = flatten_layer(x)
x = full_connection_layer(x, 1024)
return x
with tf.variable_scope('discriminator', reuse=reuse):
global_output = global_discriminator(global_x)
local_output = local_discriminator(local_x)
with tf.variable_scope('concatenation'):
output = tf.concat((global_output, local_output), 1)
output = full_connection_layer(output, 1)
return output
def calc_g_loss(self, x, completion):
loss = tf.nn.l2_loss(x - completion)
return tf.reduce_mean(loss)
def calc_d_loss(self, real, fake):
alpha = 4e-4
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real, labels=tf.ones_like(real)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake, labels=tf.zeros_like(fake)))
return tf.add(d_loss_real, d_loss_fake) * alpha
|
import factory
import factory.fuzzy
from blog.models import Post
from django.utils import timezone
from factory.django import DjangoModelFactory
from i18n.tests.factories import LanguageFactory
from pytest_factoryboy import register
from users.tests.factories import UserFactory
@register
class PostFactory(DjangoModelFactory):
author = factory.SubFactory(UserFactory)
title = LanguageFactory("sentence")
slug = LanguageFactory("slug")
excerpt = LanguageFactory("sentence")
content = LanguageFactory("sentence")
published = timezone.now()
image = factory.django.ImageField()
class Meta:
model = Post
|
"""
15096. Batter Up
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 68 ms
해결 날짜: 2020년 9월 25일
"""
def main():
input()
bats = map(int, input().split())
res = list(filter(lambda x: x != -1, bats))
print(sum(res) / len(res))
if __name__ == '__main__':
main() |
for i in range(7):
if i == 5:
break
print i
for i in range(8):
if i%2 == 0:
continue
print i
for i in range(10):
if i*i > 50:
break
elif i%3 == 0:
continue
else:
print i*i
|
import torch
import torch.testing as testing
import pytest
from spherical_distortion.nn import Unresample, InterpolationType
import utils
import parameters as params
bs = 3
channels = 3
kernel_size = 4
def test_unresample_nearest_integer_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.NEAREST)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map6(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0, 5], [9, 11], [3, 10]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_nearest_real_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.NEAREST)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map7(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[1, 6], [10, 0], [0, 11]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_nearest_out_of_bounds_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.NEAREST)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map8(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0, 0], [0, 0], [4, 0]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_bilinear_integer_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.BILINEAR)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map6(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0, 5], [9, 11], [3, 10]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_bilinear_real_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.BILINEAR)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map7(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0.5, 5.5], [9.5, 5.5], [1.5,
10.5]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_bilinear_out_of_bounds_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.BILINEAR)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map8(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0, 0], [0, 0], [3, 0]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_bispherical_integer_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map6(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0, 5], [9, 11], [3, 10]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_bispherical_real_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map7(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0.5, 5.5], [9.5, 9.5], [1.5,
10.5]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_bispherical_out_of_bounds_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map8(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[0, 0], [0, 0], [4.75, 11]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
def test_unresample_weighted_sampling_cpu():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_4x7().repeat(bs, channels, 1, 1),
sample_map=params.sample_map5(),
interp_weights=params.interp_weights0(),
cuda=False)
# Manually computed correct result
correct_output = torch.tensor([[14, 15.8], [9.6, 19.1]]).double()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# GPU TESTS
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_nearest_integer_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.NEAREST)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map6(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0, 5], [9, 11], [3, 10]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_nearest_real_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.NEAREST)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map7(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[1, 6], [10, 0], [0, 11]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_nearest_out_of_bounds_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.NEAREST)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map8(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0, 0], [0, 0], [4, 0]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_bilinear_integer_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.BILINEAR)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map6(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0, 5], [9, 11], [3, 10]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_bilinear_real_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.BILINEAR)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map7(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0.5, 5.5], [9.5, 5.5],
[1.5, 10.5]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_bilinear_out_of_bounds_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.BILINEAR)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map8(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0, 0], [0, 0], [3, 0]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_bispherical_integer_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map6(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0, 5], [9, 11], [3, 10]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_bispherical_real_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map7(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0.5, 5.5], [9.5, 9.5],
[1.5, 10.5]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_bispherical_out_of_bounds_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_3x4().repeat(bs, channels, 1, 1),
sample_map=params.sample_map8(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[0, 0], [0, 0], [4.75, 11]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output)
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='CUDA not detected on system')
def test_unresample_weighted_sampling_cuda():
# Basic Unresample layer
layer = Unresample(InterpolationType.BISPHERICAL)
# Run a forward and backward pass
output, forward_time, backward_time, gradcheck_res = utils.mapped_resample_test(
layer,
input=params.input_4x7().repeat(bs, channels, 1, 1),
sample_map=params.sample_map5(),
interp_weights=params.interp_weights0(),
cuda=True)
# Manually computed correct result
correct_output = torch.tensor([[14, 15.8], [9.6, 19.1]]).double().cuda()
# Assert gradient check has passed
assert gradcheck_res
# Assert outputs match
testing.assert_allclose(output, correct_output) |
from datetime import datetime
from typing import NamedTuple, Dict, Optional
from ppe import models
from ppe.data_mapping.types import ImportedRow, repr_no_raw, DataFile
from ppe.data_mapping.utils import (
parse_date,
asset_name_to_item,
ErrorCollector,
parse_int_or_zero)
from ppe.dataclasses import OrderType
from xlsx_utils import SheetMapping, Mapping
class MakeRow(ImportedRow, NamedTuple):
iso_gowns: int
face_shields: int
delivery_date: Optional[datetime]
raw_data: Dict[str, any]
def __repr__(self):
return repr_no_raw(self)
def to_objects(self, error_collector: ErrorCollector):
objs = []
for mapping in item_mappings:
item = asset_name_to_item(mapping.sheet_column_name, error_collector)
quantity = getattr(self, mapping.obj_column_name)
purchase = models.Purchase(
item=item,
quantity=quantity,
vendor="Aggregate Data (no vendor available)",
raw_data=self.raw_data,
order_type=OrderType.Make,
)
delivery = models.ScheduledDelivery(
purchase=purchase, delivery_date=self.delivery_date, quantity=quantity,
)
objs.append(purchase)
objs.append(delivery)
return objs
sheet_columns = [
"Face Shields",
"ISO Gowns "
]
item_mappings = [
Mapping(
sheet_column_name=column,
obj_column_name=column.strip().lower().replace(" ", "_"),
proc=parse_int_or_zero,
)
for column in sheet_columns
]
EDC_PO_TRACKER = SheetMapping(
data_file=DataFile.SUPPLIERS_PARTNERS_XLSX,
sheet_name="Daily Delivery Roll up",
mappings={
*item_mappings,
Mapping(sheet_column_name="Date", obj_column_name="delivery_date", proc=parse_date),
},
include_raw=True,
obj_constructor=MakeRow,
header_row_idx=3
)
|
from django.conf import settings
from django.core.cache import cache
from django.db.models.base import ModelBase
from django.db.models.signals import post_save
from django.dispatch import Signal
from django.utils import six
class RohypnolRegister(object):
def __init__(self):
# The _registry is a dictionary like:
# {
# signal0: {
# model0: set(['key0']),
# model1: set(['key0','key1'])
# },
# signal1: {
# model0: set(['key2']),
# model2: set(['key0','key2'])
# },
# }
self._registry = {}
def register(self, models, keys, signals=post_save, **kwargs):
"""Registers a combination of one or more models, one or more keys and
one or more signals.
Whenever one of the signals is sent by one of the models, the
associated cache keys will be deleted.
By default, if you omit the `signals` parameter it will use the Django
`post_save` signal.
Example usage:
from rohypnol.register import rohypnol
# This will issue a cache.delete('article_list') whenever an article
# is saved
rohypnol.register(Article, 'article_list')
# Like the one above, but it will work for the custom signal instead
# of `post_save`
custom_signal = Signal()
rohypnol.register(Article, 'article_list', custom_signal)
# Combining things
rohypnol.register(Article, 'article_list', (post_save, custom_signal))
# Even more
rohypnol.register(Article,
('article_list', 'top_articles'),
(post_save, custom_signal))
# Finally
rohypnol.register((Article, Category),
('article_list', 'top_articles'),
(post_save, custom_signal))
"""
if not isinstance(signals, (list, tuple)):
signals = [signals]
for signal in signals:
if settings.DEBUG:
err = "{} is not a valid Signal subclass.".format(signal)
assert isinstance(signal, Signal), err
self._registry.setdefault(signal, {})
if not isinstance(models, (list, tuple)):
models = [models]
for model in models:
if settings.DEBUG:
err = "{} is not a valid ModelBase subclass.".format(model)
assert isinstance(model, ModelBase), err
self._registry.get(signal).setdefault(model, set())
if not isinstance(keys, (list, tuple)):
keys = [keys]
for key in keys:
self._registry.get(signal).get(model).add(key)
def connect(self):
"""
Connects all current registered signals to the cache delete function.
"""
for signal, models in six.iteritems(self._registry):
for model, keys in six.iteritems(models):
# Local function the current signal is going to be
# connected to.
# Defining it dynamically allows us to pass in the current
# set of keys for the given model, but we have to store
# a strong reference to it to avoid garbage collection.
def delete_cache(signal, sender=model, keys=keys):
cache.delete_many(list(keys))
signal.connect(delete_cache, sender=model, weak=False, dispatch_uid=signal)
def disconnect(self):
"""
Disconnects all current registered signals.
To reconnect, signals must be registered again.
"""
for signal, models in six.iteritems(self._registry):
for model, keys in six.iteritems(models):
signal.disconnect(sender=model, weak=False, dispatch_uid=signal)
self._registry = {}
rohypnol = RohypnolRegister()
def connect_all_signals():
"""Connects all registered signals.
This code should live in your url.py file in order to be executed once,
when all your applications are loaded.
"""
rohypnol.connect()
|
"""
Player package.
Contains radio player methods.
"""
from player import Player
|
class Solution:
# @param n, an integer
# @return an integer
def climbStairs(self, n):
options = n // 2
sum = 0
for i in range(options+1):
sum += self.combinationfunc(i,n-i)
return sum
def combinationfunc(self,i,k):
return self.factorials(k) / (self.factorials(i) * self.factorials(k - i))
def factorials(self,x):
if x == 1 or x == 0: return 1
return x * self.factorials(x - 1) |
# Generated by Django 3.2.8 on 2021-10-18 01:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ProfissionalEnc', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='profissionalenc',
options={'verbose_name_plural': 'Cadastro de Profissionais'},
),
]
|
from ...ut.bunch import bunchr, bunchset, bunchdel
from ..base import Mod
from ..mixins import (
ViewMixin,
NoEmptyQueryMixin,
make_blueprint_mixin,
IdentityGuessNameMixin,
make_field_guess_name_mixin
)
name = 'pixiv'
class Pixiv(
ViewMixin,
NoEmptyQueryMixin,
make_blueprint_mixin(__name__),
make_field_guess_name_mixin('mode', 'user_id', 'uri', 'username'),
IdentityGuessNameMixin,
Mod
):
name = name
display_name = name
has_advanced_search = True
description = '订阅喜欢的画师和各种榜单(日榜, 月榜, R18等...), 邮件通知里包含新作的缩略图.'
normal_search_prompt = '画师主页/id'
@property
def empty_query_tests(self):
return [
lambda q: not q,
self._test_empty_standard_query
]
def _test_empty_standard_query(self, query):
from .query import parse
q = parse(query)
field = {
'user_id': 'user_id',
'user_uri': 'uri',
'user_illustrations_uri': 'uri',
'username': 'username',
}.get(q['method'])
return field and not q[field]
@property
def carousel(self):
from flask import url_for
return url_for("main.example_search", kind=name, method="ranking", mode="daily", limit=10)
def view(self, name):
from .views import web, email
return {
'web': web,
'email': email,
}[name]
def changes(self, old, new, **kargs):
old = parse_result(old)
new = parse_result(new)
return {
'user_id': self.user_arts_changes,
'user_uri': self.user_arts_changes,
'user_illustrations_uri': self.user_arts_changes,
'ranking': self.ranking_changes,
'username': self.user_arts_changes,
}[new.query.method](old, new)
def user_arts_changes(self, old, new):
oldmap = {art.uri: art for art in getattr(old, 'arts', [])}
for art in new.arts:
if art.uri not in oldmap:
yield bunchr(kind='user_art.new', art=art)
def ranking_changes(self, old, new):
oldmap = {art.illust_id: art for art in getattr(old, 'arts', [])}
arts = []
for art in new.arts:
if art.illust_id not in oldmap:
arts.append(art)
if arts:
yield bunchr(kind='ranking', mode=new.query.mode, arts=arts)
def spy(self, query, timeout):
from .query import parse
query = parse(query)
return {
'ranking': self.spy_ranking,
}.get(query.method, self.spy_default)(query, timeout)
def spy_default(self, query, timeout):
from .query import regular
return super(Pixiv, self).spy(regular(query), timeout)
def spy_ranking(self, query, timeout):
from .translate import modemap
if query.mode not in modemap():
raise Exception('unknown mode: %s' % query.mode)
if 'limit' in query:
return self.spy_limited_ranking(query, timeout)
return self.spy_default(query, timeout)
def spy_limited_ranking(self, query, timeout):
from ..query import query as search
from .query import regular
limit = int(query.limit)
result = search(
self.name,
regular(bunchdel(query, 'limit')),
timeout
)
del result.arts[limit:]
result.query = query
return result
def regular(self, query_text):
from .query import regular
return self.name, regular(query_text)
def parse_result(d):
if d and 'query' in d:
from .query import parse
return bunchset(d, query=parse(d['query']))
return bunchr(d)
|
from __future__ import print_function
# author - Richard Liao
# Dec 26 2016
import numpy as np
import pandas as pd
import pickle
from collections import defaultdict
import re
from bs4 import BeautifulSoup
import sys
import os
os.environ['KERAS_BACKEND']='tensorflow'
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Embedding
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding, Dropout, LSTM, GRU, Bidirectional, TimeDistributed
from keras.models import Model
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras import initializers
MAX_SENT_LENGTH = 100
MAX_SENTS = 15
MAX_NB_WORDS = 200
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
def clean_str(text):
"""
Tokenization/string cleaning for dataset
Every dataset is lower cased except
"""
# if string:
text = re.sub(r"\\", " ", text)
text = re.sub(r"\'", " ", text)
text = re.sub(r"\"", " ", text)
return text.strip().lower()
data_train = pd.read_csv('./dataset/discours_type_classification.csv', sep='\t')
convert_dict={
'label':'category',
}
# # print(cat_list)
data_train = data_train.astype(convert_dict)
data_train['label_cat'] = data_train.label.cat.codes
print(data_train.shape)
from nltk import tokenize
reviews = []
labels = []
texts = []
for idx in range(data_train.text.shape[0]):
text = BeautifulSoup(data_train.text[idx],"lxml")
text = clean_str(text.get_text())
texts.append(text)
sentences = tokenize.sent_tokenize(text)
reviews.append(sentences)
labels.append(data_train.label_cat[idx])
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
data = np.zeros((len(texts), MAX_SENTS, MAX_SENT_LENGTH), dtype='int32')
for i, sentences in enumerate(reviews):
for j, sent in enumerate(sentences):
if j< MAX_SENTS:
wordTokens = text_to_word_sequence(sent)
k=0
for _, word in enumerate(wordTokens):
if k<MAX_SENT_LENGTH and tokenizer.word_index[word]<MAX_NB_WORDS:
data[i,j,k] = tokenizer.word_index[word]
k=k+1
word_index = tokenizer.word_index
print('Total %s unique tokens.' % len(word_index))
labels = to_categorical(np.asarray(labels))
print(('Shape of data tensor:', data.shape))
print(('Shape of label tensor:', labels.shape))
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
print('Number of positive and negative reviews in traing and validation set')
print(y_train.sum(axis=0))
print(y_val.sum(axis=0))
GLOVE_DIR = "./dataset"
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'synpaflex_w2v.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Total %s word vectors.' % len(embeddings_index))
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SENT_LENGTH,
trainable=True)
sentence_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
print(embedded_sequences.shape)
l_lstm = Bidirectional(LSTM(128))(embedded_sequences)
sentEncoder = Model(sentence_input, l_lstm)
review_input = Input(shape=(MAX_SENTS,MAX_SENT_LENGTH), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = Bidirectional(LSTM(128))(review_encoder)
preds = Dense(3, activation='softmax')(l_lstm_sent)
model = Model(review_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - Hierachical LSTM")
print(model.summary())
model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=10, batch_size=32)
# building Hierachical Attention network
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SENT_LENGTH,
trainable=True)
class AttLayer(Layer):
def __init__(self, **kwargs):
self.init = initializers.get('normal')
#self.input_spec = [InputSpec(ndim=3)]
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape)==3
#self.W = self.init((input_shape[-1],1))
self.W = self.init((input_shape[-1],))
#self.input_spec = [InputSpec(shape=input_shape)]
self.trainable_weights = [self.W]
super(AttLayer, self).build(input_shape) # be sure you call this somewhere!
def call(self, x, mask=None):
eij = K.tanh(K.dot(x, self.W))
ai = K.exp(eij)
weights = ai/K.sum(ai, axis=1).dimshuffle(0,'x')
weighted_input = x*weights.dimshuffle(0,1,'x')
return weighted_input.sum(axis=1)
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[-1])
sentence_input = Input(shape=(MAX_SENT_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(128, return_sequences=True))(embedded_sequences)
l_dense = TimeDistributed(Dense(200))(l_lstm)
l_att = AttLayer()(l_dense)
sentEncoder = Model(sentence_input, l_att)
review_input = Input(shape=(MAX_SENTS,MAX_SENT_LENGTH), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = Bidirectional(GRU(128, return_sequences=True))(review_encoder)
l_dense_sent = TimeDistributed(Dense(200))(l_lstm_sent)
l_att_sent = AttLayer()(l_dense_sent)
preds = Dense(3, activation='softmax')(l_att_sent)
model = Model(review_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - Hierachical attention network")
model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=10, batch_size=32)
predicted = model_CNN.predict(x_val)
predicted = np.argmax(predicted, axis=1)
print(metrics.classification_report(y_val, predicted)) |
import os
import smtplib # This module is defines an SMTP client session object that can be used to send mail
import imghdr #This module determines the type of image contained in a file
from email.message import EmailMessage
#Grab the credentials from Environment Variables locally stored on your system
Email_id = os.environ.get('EMAIL_ID') #Sender's email address obtained
Email_password = os.environ.get('EMAIL_PASSWORD') #Sender's password
Test_email_id = input("Enter Reciever Email") #Receiver's email
#For security purpose it is always recommended to grab the sender's email address and password from the system
#However you can simply put in the creditials as strings to the variables if needed
#Craft the email
msg = EmailMessage() #Creating an email object
msg['Subject'] = 'Invitation for a Chat' #Subject of the message
msg['From'] = Email_id #Sender's email address
msg['To'] = Test_email_id #Receiver's email address
#For sending to multiple recievers,open a .csv file and read the email address in a list of strings and pass the list
msg.set_content('Hey! I wanted to ask you out for a chat over a bowl of sizzling brownies topped with chocolate ice-cream over this weekend')
#The names/paths of the images loaded in a list
files = ['assets/icecream.jpg','assets/pastry.jpg']
for file in files:
with open(file,'rb') as f: #Make sure either the images/pdfs are in the same directory or the entire path is specified
file_data = f.read()
#Not required if we are sending pdfs
file_type = imghdr.what(f.name) #used to determine the type of image
file_name = f.name
#Add the attachment to the message
msg.add_attachment(file_data,maintype='image',subtype=file_type,filename=file_name)
#For using pdfs change: maintype = 'application' and sub_type='octet_stream'
#Set up SMTP Session over SSL
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(Email_id, Email_password) #Authentication
smtp.send_message(msg) #Send the message
#If you are facing timeout-error for SSL and lack time then use the following
#Uncomment the following and delete the above SMTP Session
#with smtplib.SMTP('smtp.gmail.com', 587) as smtp: :
# smtp.ehlo()
# smtp.starttls() # Encrypt the traffic using Tranport Layer Security
# smtp.ehlo()
# smtp.login(Email_id, Email_password) #Authentication
# smtp.send_message(msg) #Send the message
|
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSURLCache(TestCase):
def testConstants(self):
self.assertEqual(Foundation.NSURLCacheStorageAllowed, 0)
self.assertEqual(Foundation.NSURLCacheStorageAllowedInMemoryOnly, 1)
self.assertEqual(Foundation.NSURLCacheStorageNotAllowed, 2)
@min_os_level("10.10")
def testMethods10_10(self):
self.assertArgIsBlock(
Foundation.NSURLCache.getCachedResponseForDataTask_completionHandler_,
1,
b"v@",
)
|
"""
Functions for working with iRefIndex in terms of NetworkX graphs (instead of graph_tool)
"""
import numpy as np # for np.Inf TODO alternative?
import networkx as nx
import itertools
from .irefindex_parser import *
def get_ppi(fh, n_rows=None, hgnc=False):
"""Parse ppi network from irefindex
Parameters
----------
fh : file handle
irefindex data
n_rows : int
parse only first n_rows data rows from fh
hgnc : boolean
if true, add hgnc identifiers of interactors as node attributes
Returns
-------
G : networkx undirected graph
ppi network
"""
try:
fh.seek(0)
except AttributeError:
pass
header = parse_header(fh)
if n_rows is None:
# then parse all data rows
n_rows = np.Inf
# Construct interaction graph
G = nx.Graph()
i = 0
for line in fh:
line = line.strip()
datum = line.split("\t")
if (not is_human_human(header, datum)):
# include only human-human interactions
continue
node_a = None
node_b = None
id_a, id_b = get_checksums(header, datum)
if (hgnc):
# then add hgnc as a node attribute
hgncs_a, hgncs_b = get_hgnc_pair(header, datum)
attrs_a = { HGNCS_KEY: hgncs_a }
attrs_b = { HGNCS_KEY: hgncs_b }
G.add_node(id_a, attrs_a)
G.add_node(id_b, attrs_b)
G.add_edge(id_a, id_b)
i += 1
if i > n_rows:
break
return G
def get_alt_id_ppi(fh, id_type='hgnc', n_rows=None):
"""
Construct a Protein-Protein interaction network from <fh> where nodes are identifiers of the type <id_type>.
If an iRefIndex interactor identifier maps to multiple identifiers of the specified type, the resuling network
contains all edges in the cartesian product of idsA and idsB.
Parameters
----------
fh : io-like
irefindex database file
id_type : string
One of crogid, entrezgene/locuslink, genbank_protein_gi, hgnc, icrogid, irogid, pdb, refseq, rogid, uniprotkb
see get_alias_names or KNOWN_ALIASES
n_rows : int
stop reading fh after <n_rows> lines
Returns
-------
G : nx.Graph
See also get_ppi_hgnc
"""
try:
fh.seek(0)
except AttributeError:
raise ArgumentError("fh must implement seek")
if id_type not in KNOWN_ALIASES:
raise ValueError("id_type: {} is invalid and not one of {}", id_type, ", ".join(KNOWN_ALIASES))
min_lpr, max_lpr = get_lpr_extrema(fh)
fh.seek(0)
header = parse_header(fh)
if n_rows is None:
# then parse all data rows
n_rows = np.Inf
# Construct interaction graph
G = nx.Graph()
i = 0
for line in fh:
line = line.rstrip()
datum = line.split("\t")
lpr = get_lpr(header, datum)
if lpr is None:
# then assign worst confidence score
lpr = max_lpr
weight = lpr_to_conf(min_lpr, max_lpr, lpr)
if (not is_human_human(header, datum)):
# include only human-human interactions
continue
ids_a, ids_b = get_id_pair(header, datum, id_type=id_type)
for id_pair in itertools.product(ids_a, ids_b):
if G.has_edge(id_pair[0], id_pair[1]):
# take the maximum edge weight ("confidence")
edge_data = G.get_edge_data(id_pair[0], id_pair[1])
other_weight = edge_data['weight']
if weight > other_weight:
# then update the edge weight, otherwise leave the edge as is
G.add_edge(id_pair[0], id_pair[1], {'weight': weight})
else:
G.add_edge(id_pair[0], id_pair[1], {'weight': weight})
i += 1
if i > n_rows:
break
return G
def get_ppi_hgnc(fh, n_rows=None):
"""See get_ppi. As an alternative to hgnc=True, construct a network with
the networkx node identifiers as hgnc symbols. This results in a different
network than get_ppi(fh, hgnc=True) because not all the interactors in irefindex
have hgnc identifiers.
"""
try:
fh.seek(0)
except AttributeError:
pass
header = parse_header(fh)
if n_rows is None:
# then parse all data rows
n_rows = np.Inf
# Construct interaction graph
G = nx.Graph()
i = 0
for line in fh:
line = line.strip()
datum = line.split("\t")
if (not is_human_human(header, datum)):
# include only human-human interactions
continue
hgncs_a, hgncs_b = get_hgnc_pair(header, datum)
for hgnc_pair in itertools.product(hgncs_a, hgncs_b):
G.add_edge(hgnc_pair[0], hgnc_pair[1])
i += 1
if i > n_rows:
break
return G
def components(fh, **ppi_opts):
"""Return a list of sets of connected components
Parameters
-----------
fh : file handle
irefindex data file
ppi_opts : dict
keyword arguments for get_ppi
Returns
-------
comps_l : list of sets
Each set member is a tuple for an iRefIndex interactor identifier (<id-type>, <id-value>)
e.g. ('rogid', 'NqjvDnObnt6a2DcQGJjMD2/mI3I9606')
"""
G = get_ppi(fh, **ppi_opts)
comps_g = nx.connected_components(G) # set generator
comps_l = sorted(comps_g, key=len, reverse=True)
return comps_l
def write_edge_per_line(G, ofh):
"""Write graph in form "<id_a> <id_b>\n"
"""
for e in G.edges_iter():
# nodes are tuples of (<id_type>, <id_value>)
n1 = e[0]
n2 = e[1]
ofh.write("{}\t{}\n".format(n1[1], n2[1]))
def get_adj_mat(G):
"""Represent ppi network as adjacency matrix
Parameters
----------
G : networkx graph
ppi network, see get_ppi()
Returns
-------
adj : square sparse scipy matrix
(i,j) has a 1 if there is an interaction reported by irefindex
ids : list
same length as adj, ith index contains irefindex unique identifier for gene whose interactions are
reported in the ith row of adj
"""
ids = G.nodes()
adj = nx.to_scipy_sparse_matrix(G, nodelist=ids, dtype=bool)
return adj, ids
def get_laplacian(G):
"""Get graph laplacian
Parameters
----------
G : networkx graph
ppi network
Returns
-------
laplacian : square scipy sparse matrix
graph laplacian
ids : list
same length as lapacian containing node ids for each index
"""
ids = G.nodes()
laplacian = nx.laplacian_matrix(G, nodelist=ids)
return laplacian, ids
def filter_node_ids(G, ids):
"""
See irefindex_parser.filter_ids
"""
found = []
missing = []
for id in ids:
if id in G:
found.append(id)
else:
missing.append(id)
return (found, missing)
|
import json
import re
from datetime import datetime, timedelta
def bytes2json(bytes_in):
return json.loads(bytes_in.decode('utf-8'))
def clean_js_timestamp(raw_js_datestring=None):
# test_string = '/Date(1431113400000-0700)/'
date_pattern = 'Date\((\d*)(\d{3})([-+]\d{2})00\)'
matches = re.search(date_pattern, raw_js_datestring)
timestamp, millisecs, tz_offset = matches.groups()
offset = timedelta(hours=int(tz_offset))
# print(offset)
# dt_obj = datetime.utcfromtimestamp(int(timestamp)) + offset
dt_obj = datetime.utcfromtimestamp(int(timestamp))
return dt_obj
|
from django.db import models
from django.db.models.signals import pre_save, post_save
from .utils import unique_slug_generator
from django.utils.dateformat import DateFormat, TimeFormat
from estatisticas_facebook.util.graph import *
def getPageInfo(page):
raw_json = getNewGraphApi(page.name).get_object(page.name)
print (raw_json)
page.pretty_name = raw_json['name']
page.id = raw_json['id']
def getPageInsights(args):
PageInsights.objects.all().delete()
since = args['since']
raw_json = get_graph_object(args['id'],args['id']+'/insights?period=day&metric=page_fan_adds_unique,page_impressions_unique,page_engaged_users,page_stories,page_storytellers&since='+str(since))
pagedata = raw_json['data']
for obj in pagedata:
print (obj['name'])
for value in obj['values']:
page_insights = PageInsights(
name=obj['name'],
period=obj['period'],
title=obj['title'],
description=obj['description'],
end_time=value['end_time'],
value=value['value'],
page_id=args['id'])
page_insights.save()
# Create your models here.
class Page(models.Model):
id = models.CharField(primary_key = True, max_length = 45)
name = models.CharField(max_length = 18000, unique=True)
pretty_name = models.CharField(max_length = 18000,null=True, blank=True)
access_token = models.CharField(max_length = 18000,null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(null=True, blank=True)
post_paging = models.CharField(max_length = 512, null=True)
post_since = models.DateTimeField(null=True)
def __str__(self):
return self.name
@property
def title(self):
return self.name
class PageInsights(models.Model):
page = models.ForeignKey(Page)
value = models.IntegerField()
end_time = models.DateTimeField()
period = models.CharField(max_length = 50)
title = models.CharField(max_length = 4500)
description = models.CharField(max_length = 4500)
name = models.CharField(max_length = 4500)
created = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(null=True, blank=True)
def __str__(self):
return str(DateFormat(self.end_time).format('Y-m-d')) +': '+ self.title+' '+str(self.value)
def page_pre_save_reciever(sender, instance, *args, **kwargs):
if not instance.pretty_name:
getPageInfo(instance)
def slug_pre_save_reciever(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
#def rl_post_save_reciever(sender, instance, *args, **kwargs):
pre_save.connect(slug_pre_save_reciever, sender=PageInsights)
pre_save.connect(slug_pre_save_reciever, sender=Page)
pre_save.connect(page_pre_save_reciever, sender=Page)
#post_save.connect(rl_pre_save_reciever, sender=PageInsights)
|
"""
Jacobian of a general hyperelliptic curve
"""
# ****************************************************************************
# Copyright (C) 2006 David Kohel <kohel@maths.usyd.edu>
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
# ****************************************************************************
from sage.rings.all import Integer, QQ
from sage.misc.lazy_attribute import lazy_attribute
from sage.schemes.jacobians.abstract_jacobian import Jacobian_generic
from . import jacobian_homset
from . import jacobian_morphism
from sage.misc.lazy_import import lazy_import
from .jacobian_endomorphism_utils import get_is_geom_field, is_geom_trivial_when_field
lazy_import('sage.interfaces.genus2reduction', ['genus2reduction', 'Genus2reduction'])
class HyperellipticJacobian_generic(Jacobian_generic):
"""
EXAMPLES::
sage: FF = FiniteField(2003)
sage: R.<x> = PolynomialRing(FF)
sage: f = x**5 + 1184*x**3 + 1846*x**2 + 956*x + 560
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: a = x**2 + 376*x + 245; b = 1015*x + 1368
sage: X = J(FF)
sage: D = X([a,b])
sage: D
(x^2 + 376*x + 245, y + 988*x + 635)
sage: J(0)
(1)
sage: D == J([a,b])
True
sage: D == D + J(0)
True
An more extended example, demonstrating arithmetic in J(QQ) and
J(K) for a number field K/QQ.
::
sage: P.<x> = PolynomialRing(QQ)
sage: f = x^5 - x + 1; h = x
sage: C = HyperellipticCurve(f,h,'u,v')
sage: C
Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: PP = C.ambient_space()
sage: PP
Projective Space of dimension 2 over Rational Field
sage: C.defining_polynomial()
-x0^5 + x0*x1*x2^3 + x1^2*x2^3 + x0*x2^4 - x2^5
sage: C(QQ)
Set of rational points of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: K.<t> = NumberField(x^2-2)
sage: C(K)
Set of rational points of Hyperelliptic Curve over Number Field in t with defining polynomial x^2 - 2 defined by v^2 + u*v = u^5 - u + 1
sage: P = C(QQ)(0,1,1); P
(0 : 1 : 1)
sage: P == C(0,1,1)
True
sage: C(0,1,1).parent()
Set of rational points of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: P1 = C(K)(P)
sage: P2 = C(K)([2,4*t-1,1])
sage: P3 = C(K)([-1/2,1/8*(7*t+2),1])
sage: P1, P2, P3
((0 : 1 : 1), (2 : 4*t - 1 : 1), (-1/2 : 7/8*t + 1/4 : 1))
sage: J = C.jacobian()
sage: J
Jacobian of Hyperelliptic Curve over Rational Field defined by v^2 + u*v = u^5 - u + 1
sage: Q = J(QQ)(P); Q
(u, v - 1)
sage: for i in range(6): Q*i
(1)
(u, v - 1)
(u^2, v + u - 1)
(u^2, v + 1)
(u, v + 1)
(1)
sage: Q1 = J(K)(P1); print("%s -> %s"%( P1, Q1 ))
(0 : 1 : 1) -> (u, v - 1)
sage: Q2 = J(K)(P2); print("%s -> %s"%( P2, Q2 ))
(2 : 4*t - 1 : 1) -> (u - 2, v - 4*t + 1)
sage: Q3 = J(K)(P3); print("%s -> %s"%( P3, Q3 ))
(-1/2 : 7/8*t + 1/4 : 1) -> (u + 1/2, v - 7/8*t - 1/4)
sage: R.<x> = PolynomialRing(K)
sage: Q4 = J(K)([x^2-t,R(1)])
sage: for i in range(4): Q4*i
(1)
(u^2 - t, v - 1)
(u^2 + (-3/4*t - 9/16)*u + 1/2*t + 1/4, v + (-1/32*t - 57/64)*u + 1/2*t + 9/16)
(u^2 + (1352416/247009*t - 1636930/247009)*u - 1156544/247009*t + 1900544/247009, v + (-2326345442/122763473*t + 3233153137/122763473)*u + 2439343104/122763473*t - 3350862929/122763473)
sage: R2 = Q2*5; R2
(u^2 - 3789465233/116983808*u - 267915823/58491904, v + (-233827256513849/1789384327168*t + 1/2)*u - 15782925357447/894692163584*t)
sage: R3 = Q3*5; R3
(u^2 + 5663300808399913890623/14426454798950909645952*u - 26531814176395676231273/28852909597901819291904, v + (253155440321645614070860868199103/2450498420175733688903836378159104*t + 1/2)*u + 2427708505064902611513563431764311/4900996840351467377807672756318208*t)
sage: R4 = Q4*5; R4
(u^2 - 3789465233/116983808*u - 267915823/58491904, v + (233827256513849/1789384327168*t + 1/2)*u + 15782925357447/894692163584*t)
Thus we find the following identity::
sage: 5*Q2 + 5*Q4
(1)
Moreover the following relation holds in the 5-torsion subgroup::
sage: Q2 + Q4 == 2*Q1
True
TESTS::
sage: k.<a> = GF(9); R.<x> = k[]
sage: J1 = HyperellipticCurve(x^3 + x - 1, x+a).jacobian()
sage: FF = FiniteField(2003)
sage: R.<x> = PolynomialRing(FF)
sage: f = x**5 + 1184*x**3 + 1846*x**2 + 956*x + 560
sage: J2 = HyperellipticCurve(f).jacobian()
sage: J1 == J1
True
sage: J1 == J2
False
"""
def dimension(self):
"""
Return the dimension of this Jacobian.
OUTPUT:
Integer
EXAMPLES::
sage: k.<a> = GF(9); R.<x> = k[]
sage: HyperellipticCurve(x^3 + x - 1, x+a).jacobian().dimension()
1
sage: g = HyperellipticCurve(x^6 + x - 1, x+a).jacobian().dimension(); g
2
sage: type(g)
<... 'sage.rings.integer.Integer'>
"""
return Integer(self.curve().genus())
def point(self, mumford, check=True):
try:
return self(self.base_ring())(mumford)
except AttributeError:
raise ValueError("Arguments must determine a valid Mumford divisor.")
def _point_homset(self, *args, **kwds):
return jacobian_homset.JacobianHomset_divisor_classes(*args, **kwds)
def _point(self, *args, **kwds):
return jacobian_morphism.JacobianMorphism_divisor_class_field(*args, **kwds)
####################################################################
# Some properties of geometric Endomorphism ring and algebra
####################################################################
@lazy_attribute
def _have_established_geometrically_trivial(self):
r"""
Initialize the flag which determines whether or not we have
already established if the geometric endomorphism ring is
trivial.
This is related to the warning at the top of the
`jacobian_endomorphism_utils.py` module.
INPUT:
- ``self`` -- The Jacobian.
OUTPUT:
The boolean ``False``; this will be updated by other methods.
EXAMPLES:
This is LMFDB curve 262144.d.524288.2::
sage: R.<x> = QQ[]
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J._have_established_geometrically_trivial
False
"""
return False
@lazy_attribute
def _have_established_geometrically_field(self):
r"""
Initialize the flag which determines whether or not we have
already established if the geometric endomorphism ring is
trivial.
This is related to the warning at the top of the
`jacobian_endomorphism_utils.py` module.
INPUT:
- ``self`` -- The Jacobian.
OUTPUT:
The boolean ``False``; this will be updated by other methods.
EXAMPLES:
This is LMFDB curve 262144.d.524288.2::
sage: R.<x> = QQ[]
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J._have_established_geometrically_field
False
"""
return False
def geometric_endomorphism_algebra_is_field(self, B=200, proof=False):
r"""
Return whether the geometric endomorphism algebra is a field.
This implies that the Jacobian of the curve is geometrically
simple. It is based on Algorithm 4.10 from from [Lom2019]_
INPUT:
- ``B`` -- (default: 200) the bound which appears in the statement of
the algorithm from [Lom2019]_
- ``proof`` -- (default: False) whether or not to insist on a provably
correct answer. This is related to the warning in the docstring
of this module: if this function returns ``False``, then
strictly speaking this has not been proven to be ``False`` until one
has exhibited a non-trivial endomorphism, which these methods are not
designed to carry out. If one is convinced that this method should
return ``True``, but it is returning ``False``, then this can be
exhibited by increasing `B`.
OUTPUT:
Boolean indicating whether or not the geometric endomorphism
algebra is a field.
EXAMPLES:
This is LMFDB curve 262144.d.524288.2 which has QM. Although its
Jacobian is geometrically simple, the geometric endomorphism algebra
is not a field::
sage: R.<x> = QQ[]
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_algebra_is_field()
False
This is LMFDB curve 50000.a.200000.1::
sage: f = 8*x^5 + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_algebra_is_field()
True
"""
if self._have_established_geometrically_field:
return True
C = self.curve()
if C.genus() != 2:
raise NotImplementedError("Current implementation requires the curve to be of genus 2")
if C.base_ring() != QQ:
raise NotImplementedError("Current implementation requires the curve to be defined over the rationals")
f, h = C.hyperelliptic_polynomials()
if h != 0:
raise NotImplementedError("Current implementation requires the curve to be in the form y^2 = f(x)")
red_data = genus2reduction(0,f)
cond_C = red_data.conductor # WARNING: this is only the prime_to_2 conductor.
bad_primes = cond_C.prime_divisors()
self._bad_primes = bad_primes
is_abs_simp, is_def_geom_trivial = get_is_geom_field(f, C, bad_primes, B)
if is_def_geom_trivial:
self._have_established_geometrically_trivial = True
if is_abs_simp:
self._have_established_geometrically_field = True
return True
if proof:
raise NotImplementedError("Rigorous computation of lower bounds of endomorphism algebras has not yet been implemented.")
return False
def geometric_endomorphism_ring_is_ZZ(self, B=200, proof=False):
r"""
Return whether the geometric endomorphism ring of ``self`` is the
integer ring `\ZZ`.
INPUT:
- ``B`` -- (default: 200) the bound which appears in the statement of
the algorithm from [Lom2019]_
- ``proof`` -- (default: False) whether or not to insist on a provably
correct answer. This is related to the warning in the module docstring
of `jacobian_endomorphisms.py`: if this function returns ``False``, then
strictly speaking this has not been proven to be ``False`` until one has
exhibited a non-trivial endomorphism, which the methods in that module
are not designed to carry out. If one is convinced that this method
should return ``True``, but it is returning ``False``, then this can be
exhibited by increasing `B`.
OUTPUT:
Boolean indicating whether or not the geometric endomorphism
ring is isomorphic to the integer ring.
EXAMPLES:
This is LMFDB curve 603.a.603.2::
sage: R.<x> = QQ[]
sage: f = 4*x^5 + x^4 - 4*x^3 + 2*x^2 + 4*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
True
This is LMFDB curve 1152.a.147456.1 whose geometric endomorphism ring
is isomorphic to the group of 2x2 matrices over `\QQ`::
sage: f = x^6 - 2*x^4 + 2*x^2 - 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 20736.k.373248.1 whose geometric endomorphism ring
is isomorphic to the group of 2x2 matrices over a CM field::
sage: f = x^6 + 8
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 708.a.181248.1::
sage: R.<x> = QQ[]
sage: f = -3*x^6 - 16*x^5 + 36*x^4 + 194*x^3 - 164*x^2 - 392*x - 143
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
True
This is LMFDB curve 10609.a.10609.1 whose geometric endomorphism ring
is an order in a real quadratic field::
sage: f = x^6 + 2*x^4 + 2*x^3 + 5*x^2 + 6*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 160000.c.800000.1 whose geometric endomorphism ring
is an order in a CM field::
sage: f = x^5 - 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 262144.d.524288.2 whose geometric endomorphism ring
is an order in a quaternion algebra::
sage: f = x^5 + x^4 + 4*x^3 + 8*x^2 + 5*x + 1
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
This is LMFDB curve 578.a.2312.1 whose geometric endomorphism ring
is `\QQ \times \QQ`::
sage: f = 4*x^5 - 7*x^4 + 10*x^3 - 7*x^2 + 4*x
sage: C = HyperellipticCurve(f)
sage: J = C.jacobian()
sage: J.geometric_endomorphism_ring_is_ZZ()
False
"""
if self._have_established_geometrically_trivial:
return True
is_abs_simple = self.geometric_endomorphism_algebra_is_field(B=B, proof=proof)
if self._have_established_geometrically_trivial:
return True
if is_abs_simple and is_geom_trivial_when_field(self.curve(), self._bad_primes):
return True
if proof:
raise NotImplementedError("Rigorous computation of lower bounds of endomorphism rings has not yet been implemented.")
return False
|
import multiprocessing
import queue
from queue import Queue
import sys
from spats_shape_seq.pair import Pair
from spats_shape_seq.parse import FastqWriter, SamWriter
from spats_shape_seq.util import _debug, _warn
from spats_shape_seq.mask import PLUS_PLACEHOLDER, MINUS_PLACEHOLDER
class SpatsWorker(object):
'''Manages multiprocessing aspects of Spats.
'''
def __init__(self, run, processor, pair_db, result_set_id = None, force_mask = None):
self._run = run
self._processor = processor
self._pair_db = pair_db
self._result_set_id = result_set_id
self._force_mask = force_mask
self._workers = []
def _make_result(self, ident, pair, tagged = False):
res = [ ident,
pair.target.rowid if pair.target else None,
pair.mask_label,
pair.site if pair.has_site else -1,
pair.end if pair.has_site else -1,
len(pair.mutations) if pair.mutations else -1,
pair.multiplicity,
pair.failure ]
if tagged:
res.append(pair.tags)
return res
def _worker(self, worker_id):
try:
processor = self._processor
processor.reset_counts()
if self._pair_db:
self._pair_db.worker_id = worker_id
writeback = bool(self._result_set_id)
tagged = processor.uses_tags
use_quality = self._run._parse_quality
pair = Pair()
while True:
pairs = self._pairs_to_do.get()
if not pairs:
break
results = []
for lines in pairs:
if not pair.set_from_data(lines[3], str(lines[1]), str(lines[2]), lines[0]):
print('\nskipping empty pair: {}'.format(lines[3]))
continue
if use_quality:
pair.r1.quality = str(lines[4])
pair.r2.quality = str(lines[5])
if self._force_mask:
pair.set_mask(self._force_mask)
processor.process_pair(pair)
#if pair.failure:
# print('FAIL: {}'.format(pair.failure))
if writeback:
results.append(self._make_result(lines[3], pair, tagged))
if writeback:
self._results.put(results)
if not self._run.quiet:
sys.stdout.write('.')#str(worker_id))
sys.stdout.flush()
self._pairs_done.put(processor.counters.count_data())
except:
print("**** Worker exception, aborting...")
raise
def _createWorkers(self, num_workers):
for i in range(num_workers):
worker = multiprocessing.Process(target = self._worker, args = (i,))
self._workers.append(worker)
worker.start()
if not self._run.quiet:
print("Created {} workers".format(num_workers))
def _joinWorkers(self):
for w in self._workers:
w.join()
def run(self, pair_iterator):
num_workers = max(1, self._run.num_workers or multiprocessing.cpu_count())
if 1 == num_workers:
self.run_simple(pair_iterator)
return
self._pairs_to_do = multiprocessing.Queue(maxsize = 2 * num_workers)
self._pairs_done = multiprocessing.Queue()
self._results = multiprocessing.Queue()
self._createWorkers(num_workers)
quiet = self._run.quiet
more_pairs = True
pair_db = self._pair_db
writeback = bool(self._result_set_id)
num_batches = 0
total = 0
if writeback:
result_set_id = self._result_set_id
def put_batch():
pair_info = next(pair_iterator)
self._pairs_to_do.put(pair_info)
if not quiet:
sys.stdout.write('^')
sys.stdout.flush()
return sum(p[0] for p in pair_info) # need to take into account multiplicity for reads
def write_results():
all_results = []
num_batches = 0
try:
while True:
all_results.extend(self._results.get(True, 0.01))
num_batches += 1
if not quiet:
sys.stdout.write('v')
sys.stdout.flush()
except queue.Empty:
pass
if all_results:
pair_db.add_results(self._result_set_id, all_results)
return num_batches
while more_pairs:
try:
cur_count = 0
while cur_count < num_workers or num_batches < 2 * num_workers:
total += put_batch()
num_batches += 1
cur_count += 1
if writeback:
num_batches -= write_results()
except StopIteration:
more_pairs = False
except queue.Empty:
pass
if writeback:
while num_batches > 0:
num_batches -= write_results()
# put signal objects to indicate we're done
for i in range(num_workers):
self._pairs_to_do.put(None)
processor = self._processor
targets = { t.name : t for t in processor._targets.targets }
accumulated = 0
def accumulate_counts():
num_accumulated = 0
try:
while 1 < num_workers:
count_data, vect_data = self._pairs_done.get_nowait()
processor.counters.update_with_count_data(count_data, vect_data)
num_accumulated += 1
if not quiet:
sys.stdout.write('x')
sys.stdout.flush()
except queue.Empty:
pass
return num_accumulated
while accumulated < num_workers:
accumulated += accumulate_counts()
if not self._run.quiet:
print("\nAggregating data...")
self._joinWorkers()
processor.counters.total_pairs = total
#if self._pair_db:
# processor.counters.unique_pairs = self._pair_db.unique_pairs()
def run_simple(self, pair_iterator):
quiet = self._run.quiet
run_limit = self._run._run_limit
more_pairs = True
pair_db = self._pair_db
writeback = bool(self._result_set_id)
sam = bool(self._run.generate_sam)
channel_reads = bool(self._run.generate_channel_reads)
use_quality = self._run._parse_quality
total = 0
if writeback:
result_set_id = self._result_set_id
processor = self._processor
if self._pair_db:
self._pair_db.worker_id = 0
tagged = processor.uses_tags
pair = Pair()
if sam:
sam_writer = SamWriter(self._run.generate_sam, processor._targets.targets)
if channel_reads:
plus_writer = FastqWriter('R1_plus.fastq', 'R2_plus.fastq')
minus_writer = FastqWriter('R1_minus.fastq', 'R2_minus.fastq')
while more_pairs:
try:
while True:
pair_info = next(pair_iterator)
if not quiet:
sys.stdout.write('^')
sys.stdout.flush()
results = []
for lines in pair_info:
if not pair.set_from_data(lines[3], str(lines[1]), str(lines[2]), lines[0]):
print('\nskipping empty pair: {}'.format(lines[3]))
continue
if use_quality:
pair.r1.quality = str(lines[4])
pair.r2.quality = str(lines[5])
if self._force_mask:
pair.set_mask(self._force_mask)
try:
processor.process_pair(pair)
except:
print("**** Error processing pair: {} / {}".format(pair.r1.original_seq, pair.r2.original_seq))
raise
if sam:
sam_writer.write(pair)
if channel_reads and pair.has_site:
if pair.mask_label == self._run.masks[0] or pair.mask_label == PLUS_PLACEHOLDER:
plus_writer.write(pair)
else:
minus_writer.write(pair)
total += pair.multiplicity
if writeback:
results.append(self._make_result(lines[3], pair, tagged))
if not quiet:
sys.stdout.write('v')
sys.stdout.flush()
if results:
pair_db.add_results(self._result_set_id, results)
if not quiet:
sys.stdout.write('.')
sys.stdout.flush()
if run_limit and total > run_limit:
raise StopIteration()
except StopIteration:
more_pairs = False
if not self._run.quiet:
print("\nAggregating data...")
processor.counters.total_pairs = total
if self._pair_db:
processor.counters.unique_pairs = self._pair_db.unique_pairs()
|
import os, grpc, codecs, requests
import lnt.rpc.rpc_pb2 as ln, lnt.rpc.rpc_pb2_grpc as lnrpc
def create_stub(ctx):
cfg = ctx.parent.parent.config['LND']
macaroon = codecs.encode(open(cfg['MacaroonPath'], 'rb').read(), 'hex')
os.environ['GRPC_SSL_CIPHER_SUITES'] = 'HIGH+ECDSA'
cert = open(cfg['TlsCert'], 'rb').read()
ssl_creds = grpc.ssl_channel_credentials(cert)
channel = grpc.secure_channel(cfg['Host'], ssl_creds)
stub = lnrpc.LightningStub(channel)
return stub, macaroon
def normalize_channels(channels):
channels_d = {
str(c.chan_id): {
"active": c.active,
"remote_pubkey": c.remote_pubkey,
"channel_point": c.channel_point,
"capacity": c.capacity,
"local_balance": c.local_balance,
"remote_balance": c.remote_balance,
"commit_fee": c.commit_fee,
"commit_weight": c.commit_weight,
"fee_per_kw": c.fee_per_kw,
"total_satoshis_sent": c.total_satoshis_sent,
"total_satoshis_received": c.total_satoshis_received,
"num_updates": c.num_updates,
"pending_htlcs": c.pending_htlcs,
"csv_delay": c.csv_delay,
} for c in channels
}
return channels_d
def normalize_get_chan_response(chaninfo):
chaninfo_d = {
"channel_id": chaninfo.channel_id,
"chan_point": chaninfo.chan_point,
"last_update": chaninfo.last_update,
"node1_pub": chaninfo.node1_pub,
"node2_pub": chaninfo.node2_pub,
"capacity": chaninfo.capacity,
"node1_policy": {
"time_lock_delta": chaninfo.node1_policy.time_lock_delta,
"min_htlc": chaninfo.node1_policy.min_htlc,
"fee_base_msat": chaninfo.node1_policy.fee_base_msat,
"fee_rate_milli_msat": chaninfo.node1_policy.fee_rate_milli_msat,
"max_htlc_msat": chaninfo.node1_policy.max_htlc_msat,
},
"node2_policy": {
"time_lock_delta": chaninfo.node2_policy.time_lock_delta,
"min_htlc": chaninfo.node2_policy.min_htlc,
"fee_base_msat": chaninfo.node2_policy.fee_base_msat,
"fee_rate_milli_msat": chaninfo.node2_policy.fee_rate_milli_msat,
"max_htlc_msat": chaninfo.node2_policy.max_htlc_msat,
}
}
return chaninfo_d
def get_1ml_info(testnet:bool, pub_key):
resp = requests.get("https://1ml.com{}/node/{}/json".format('/testnet' if testnet else '', pub_key))
return resp.json() if resp.status_code == 200 else {}
|
def test():
raise '\xf1'.encode('ASCII')
yield
|
"""
Defines some tools to handle events.
In particular :
-> defines events' types
-> defines functions to read events from binary .dat files using numpy
-> defines functions to write events to binary .dat files using numpy
Copyright: (c) 2019-2020 Prophesee
"""
from __future__ import print_function
import numpy as np
BBOX_DTYPE = np.dtype({'names':['t','x','y','w','h','class_id','track_id','class_confidence'], 'formats':['<i8','<f4','<f4','<f4','<f4','<u4','<u4','<f4'], 'offsets':[0,8,12,16,20,24,28,32], 'itemsize':40})
def reformat_boxes(boxes):
"""ReFormat boxes according to new rule
This allows to be backward-compatible with imerit annotation.
't' = 'ts'
'class_confidence' = 'confidence'
"""
if 't' not in boxes.dtype.names or 'class_confidence' not in boxes.dtype.names:
new = np.zeros((len(boxes),), dtype=BBOX_DTYPE)
for name in boxes.dtype.names:
if name == 'ts':
new['t'] = boxes[name]
elif name == 'confidence':
new['class_confidence'] = boxes[name]
else:
new[name] = boxes[name]
return new
else:
return boxes
|
from unittest import TestCase
from moceansdk.modules.command.content_builder.wa_sticker_content_builder import (
WaStickerConentBuilder,
)
class TestWaStickerContentBuilder(TestCase):
def test_type(self):
self.assertEqual(WaStickerConentBuilder().type(), "sticker")
def test_request_data(self):
params = {
"rich_media_url": "rich_media_url",
"text": "text",
"type": "sticker",
}
obj = (
WaStickerConentBuilder()
.set_rich_media_url("rich_media_url")
.set_text("text")
)
self.assertEqual(obj.get_request_data(), params)
|
from typing import Dict
import pandas as pd
from gradetools.config import PYTHON_ALWAYS_INSERT_CONFIGS
from gradetools.excel.io import get_inputs_outputs_sheet_from_file_path, get_output_dict_from_output_range_dict
from gradetools.model_type import ModelType
from gradetools.project_3.config import EXCEL_OUTPUT_LOCATIONS
from gradetools.py.execute2.main import ReplacementConfig, read_notebook_and_run_extracting_globals, InsertConfig
from gradetools.py.strip_style import get_df_from_df_or_styler
def run_model_extract_results_dict(model_file: str, model_type: ModelType,
inputs_dict: Dict[str, Dict[str, float]]) -> Dict[str, float]:
if model_type == ModelType.EXCEL:
return _get_results_from_excel_model(model_file)
# Python or combo, get from Python
return _get_results_from_python_model(model_file, inputs_dict)
def _get_results_from_excel_model(model_file: str,
output_locations: Dict[str, str] = EXCEL_OUTPUT_LOCATIONS) -> Dict[str, float]:
ws = get_inputs_outputs_sheet_from_file_path(model_file)
table_location = output_locations['mc_table']
df = ws.range(table_location).expand().options(pd.DataFrame, index=False, header=False).value
df.columns = ['Beta', 'Market Return', 'Bond Price', 'Tax Rate', 'WACC']
# Get regular outputs
single_inputs_dict = output_locations.copy()
single_inputs_dict.pop('mc_table')
outputs = get_output_dict_from_output_range_dict(ws, single_inputs_dict)
_add_wacc_mc_outputs_to_dict(outputs, df)
return outputs
def _get_results_from_python_model(model_file: str, inputs_dict: Dict[str, Dict[str, float]]) -> Dict[str, float]:
model_rc = ReplacementConfig('model_data', 'ModelInputs', kwargs=inputs_dict['model'])
sim_rc = ReplacementConfig('sim_data', 'SimulationInputs', kwargs=inputs_dict['sim'])
globs = read_notebook_and_run_extracting_globals(
model_file,
[model_rc, sim_rc],
inserts=PYTHON_ALWAYS_INSERT_CONFIGS,
suppress_output=True
)
outputs = {}
try:
wacc_mc_df_or_styler = globs['wacc_mc_df']
outputs['coe'] = globs['coe']
outputs['mv_equity'] = globs['mv_equity']
outputs['pretax_cost_of_debt'] = globs['pretax_cost_of_debt']
outputs['aftertax_cost_of_debt'] = globs['aftertax_cost_of_debt']
outputs['mv_debt'] = globs['mv_debt']
outputs['wacc'] = globs['wacc']
except KeyError as e:
raise OutputNotFoundException(e)
wacc_df = get_df_from_df_or_styler(wacc_mc_df_or_styler)
_add_wacc_mc_outputs_to_dict(outputs, wacc_df)
return outputs
def _add_wacc_mc_outputs_to_dict(outputs: Dict[str, float], df: pd.DataFrame):
wacc_mean = df['WACC'].mean()
wacc_std = df['WACC'].std()
outputs['wacc_mean'] = wacc_mean
outputs['wacc_std'] = wacc_std
class OutputNotFoundException(Exception):
pass
|
from hask3.lang.syntax import sig
from hask3.lang.syntax import H
from hask3.lang.syntax import t
from hask3.lang.syntax import instance
from hask3.lang import List
from hask3.Data.Functor import fmap
from hask3.Control.Applicative import Applicative
class Monad(Applicative):
"""Basic operations over a monad.
Monad is a concept from a branch of mathematics known as Category Theory.
From the perspective of a Haskell programmer, however, it is best to think
of a monad as an abstract datatype of actions.
Dependencies:
- `~hask3.Control.Applicative.Applicative`:class:
Attributes:
- ``bind``
- ``__rshift__``
Minimal complete definition:
- ``bind``
"""
@classmethod
def make_instance(typeclass, cls, bind):
from hask3.hack import is_builtin
from hask3.lang.type_system import build_instance
from hask3.lang.syntax import H, t
bind = bind ** (H[Monad, "m"]/
t("m", "a") >> (H/ "a" >> t("m", "b")) >> t("m", "b"))
if not is_builtin(cls):
def bind_wrap(s, o):
return Monad[s].bind(s, o)
cls.__rshift__ = bind_wrap
build_instance(Monad, cls, {"bind": bind})
@sig(H[Monad, "m"]/ t("m", "a") >> (H/ "a" >> t("m", "b")) >> t("m", "b"))
def bind(m, fn):
"""``bind :: Monad m => m a -> (a -> m b) -> m b``
Monadic bind.
"""
return Monad[m].bind(m, fn)
@sig(H[Monad, "m"]/ t("m", t("m", "a")) >> t("m", "a"))
def join(m):
"""``join :: Monad m => m (m a) -> m a``
The join function is the conventional monad join operator. It is used to
remove one level of monadic structure, projecting its bound argument into
the outer level.
"""
from hask3.Prelude import id
return bind(m, id)
@sig(H[Monad, "m"]/ (H/ "a" >> "r") >> t("m", "a") >> t("m", "r"))
def liftM(fn, m):
"""``liftM :: Monad m => (a1 -> r) -> m a1 -> m r``
Promote a function to a monad.
"""
return fmap(fn, m)
def _list_bind(x, fn):
from itertools import chain
from hask3.lang import L
return L[chain.from_iterable(fmap(fn, x))]
instance(Monad, List).where(
bind = _list_bind
)
del _list_bind, Applicative, List, instance, t, H, sig
|
#!/usr/bin/env python
#
# generatePathFromObj.py
# Core3D
#
# Created by Julian Mayer on 16.12.07.
# Copyright (c) 2010 A. Julian Mayer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitationthe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, random
from struct import *
from vecmath import *
try:
from numpy import *
from scipy.interpolate import splprep, splev
except:
print "Error: NumPy or SkiPy not found"
sys.exit()
x = []
y = []
z = []
SCALE = 1.0
numberOfPoints = 3600
pointsPerUnit = 0
splineOrder = 2
smoothnessParameter = 3.0
try:
if (len(sys.argv)) == 1: raise Exception('input', 'error')
f = open(sys.argv[len(sys.argv) - 1], 'r')
of = 0
for i in range(1, len(sys.argv) - 1):
if sys.argv[i].startswith("-p="): smoothnessParameter = float(sys.argv[i][3:])
elif sys.argv[i].startswith("-i="): splineOrder = int(sys.argv[i][3:])
elif sys.argv[i].startswith("-s="): SCALE = float(sys.argv[i][3:])
elif sys.argv[i].startswith("-f="): numberOfPoints = int(sys.argv[i][3:])
elif sys.argv[i].startswith("-u="): pointsPerUnit = float(sys.argv[i][3:])
elif sys.argv[i].startswith("-o="): of = open(sys.argv[i][3:], 'w')
else: raise Exception('input', 'error')
if of == 0: of = open(sys.argv[len(sys.argv) - 1][:sys.argv[len(sys.argv) - 1].rfind(".")] + ".path", 'w')
except:
print """Usage: generateInterpolatedPathFromObj [options] obj_file
Options:
-s=<scale> Scale all coordinates by <scale>
-p=<smoothness> Use <smoothness> as smoothness parameter (Default: 3.0)
-i=<spline_order> Use interpolation of order <spline_order> (Default: 2)
-f=<num_points> Produce <num_points> points (Default: 3600)
-u=<points_per_unit> Produce <points_per_unit> points per unit of path length
-o=<path_file> Place the output path into <path_file>"""
sys.exit()
lines = f.readlines()
for line in lines:
c = line.split(" ")
if c[0] == "v":
x.append(SCALE * float(c[1]))
y.append(SCALE * float(c[2]))
z.append(SCALE * float(c[3]))
x.append(x[0])
y.append(y[0])
z.append(z[0])
if pointsPerUnit != 0:
distance = 0
for i in range(len(x)-1):
prevvec = [x[i], y[i], z[i]]
vec = [x[i+1], y[i+1], z[i+1]]
distance += magnitude(substract(vec, prevvec))
numberOfPoints = distance * pointsPerUnit
tckp,u = splprep([array(x), array(y), array(z)], s=smoothnessParameter, k=splineOrder, nest=-1) # find the knot points
xnew, ynew, znew = splev(linspace(0, 1, numberOfPoints), tckp) # evaluate spline, including interpolated points
out = ""
for i in range(len(xnew)):
out += pack('fff', xnew[i], ynew[i], znew[i])
of.write(out)
nearest = []
for i in range(len(x)):
nearest.append([1000, 0])
for v in range(len(xnew)-1):
vec1 = [x[i], y[i], z[i]]
vec2 = [xnew[v], ynew[v], znew[v]]
dist = magnitude(substract(vec1, vec2))
if (dist < nearest[i][0]):
nearest[i] = [dist, v]
random.seed()
for v in range(8):
xr = []
yr = []
zr = []
fewrandom = []
manyrandom = []
for i in range((len(nearest) / 10)):
fewrandom.append(random.uniform(-8,8))
fewrandom.append(fewrandom[0])
fewrandom.append(fewrandom[0])
for i in range(len(nearest)):
manyrandom.append(fewrandom[i / 10] * ((10 - (i % 10)) / 10.0) + fewrandom[i / 10 + 1] * ((i % 10) / 10.0))
for i in range(len(nearest)):
vec = [0,0,0]
if (i > 0):
prevtocurr = subtract([xnew[nearest[i][1]], ynew[nearest[i][1]], znew[nearest[i][1]]], [xnew[nearest[i-1][1]], ynew[nearest[i-1][1]], znew[nearest[i-1][1]]])
vec = add(vec, prevtocurr)
if (i < len(nearest) - 1):
currtonext = subtract([xnew[nearest[i+1][1]], ynew[nearest[i+1][1]], znew[nearest[i+1][1]]], [xnew[nearest[i][1]], ynew[nearest[i][1]], znew[nearest[i][1]]])
vec = add(vec, currtonext)
perpendicular = normalize([vec[2], 0, -vec[0]])
perpendicular = multiply(perpendicular, manyrandom[i])
xr.append(xnew[nearest[i][1]] + perpendicular[0])
yr.append(ynew[nearest[i][1]])
zr.append(znew[nearest[i][1]] + perpendicular[2])
xr.append(xr[0])
yr.append(yr[0])
zr.append(zr[0])
tckp,u = splprep([array(xr), array(yr), array(zr)], s=smoothnessParameter, k=splineOrder, nest=-1) # find the knot points
xs, ys, zs = splev(linspace(0, 1, numberOfPoints), tckp) # evaluate spline, including interpolated points
off = open(sys.argv[len(sys.argv) - 1][:sys.argv[len(sys.argv) - 1].rfind(".")] + ".path" + str(v), 'w')
out = ""
for i in range(len(xnew)):
out += pack('fff', xs[i], ys[i], zs[i])
off.write(out) |
import dataclasses
from typing import Optional
from esque.io.data_types import NoData, UnknownDataType
from esque.io.messages import Data
from esque.io.serializers.base import DataSerializer, SerializerConfig
@dataclasses.dataclass(frozen=True)
class RawSerializerConfig(SerializerConfig):
pass
class RawSerializer(DataSerializer):
config_cls = RawSerializerConfig
unknown_data_type: UnknownDataType = UnknownDataType()
def deserialize(self, raw_data: Optional[bytes]) -> Data:
if raw_data is None:
return Data.NO_DATA
return Data(payload=raw_data, data_type=self.unknown_data_type)
def serialize(self, data: Data) -> Optional[bytes]:
if isinstance(data.data_type, NoData):
return None
if not isinstance(data.payload, bytes):
raise TypeError(f"Data payload has to be bytes, not {type(data.payload).__name__}!")
return data.payload
|
"""URLs for rendering app"""
from django.conf.urls import url
from rendering import views
# pylint: disable=invalid-name
urlpatterns = [
url(r'^preview/email/$',
views.EmailPreviewView.as_view(),
name='preview_email'),
url(r'^preview/block/$',
views.BlockPreviewView.as_view(),
name='preview_block'),
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Code2Vec(nn.Module):
def __init__(self, nodes_dim, paths_dim, embedding_dim, output_dim, dropout):
super().__init__()
self.node_embedding = nn.Embedding(nodes_dim, embedding_dim)
self.path_embedding = nn.Embedding(paths_dim, embedding_dim)
self.W = nn.Parameter(torch.randn(1, embedding_dim, 3*embedding_dim))
self.a = nn.Parameter(torch.randn(1, embedding_dim, 1))
self.out = nn.Linear(embedding_dim, output_dim)
self.do = nn.Dropout(dropout)
def forward(self, starts, paths, ends):
#starts = paths = ends = [batch size, max length]
W = self.W.repeat(starts.shape[0], 1, 1)
#W = [batch size, embedding dim, embedding dim * 3]
embedded_starts = self.node_embedding(starts)
embedded_paths = self.path_embedding(paths)
embedded_ends = self.node_embedding(ends)
#embedded_* = [batch size, max length, embedding dim]
c = self.do(torch.cat((embedded_starts, embedded_paths, embedded_ends), dim=2))
#c = [batch size, max length, embedding dim * 3]
c = c.permute(0, 2, 1)
#c = [batch size, embedding dim * 3, max length]
x = torch.tanh(torch.bmm(W, c))
#x = [batch size, embedding dim, max length]
x = x.permute(0, 2, 1)
#x = [batch size, max length, embedding dim]
a = self.a.repeat(starts.shape[0], 1, 1)
#a = [batch size, embedding dim, 1]
z = torch.bmm(x, a).squeeze(2)
#z = [batch size, max length]
z = F.softmax(z, dim=1)
#z = [batch size, max length]
z = z.unsqueeze(2)
#z = [batch size, max length, 1]
x = x.permute(0, 2, 1)
#x = [batch size, embedding dim, max length]
v = torch.bmm(x, z).squeeze(2)
#v = [batch size, embedding dim]
out = self.out(v)
#out = [batch size, output dim]
return out |
import torch
def linear(x, weight, bias=None):
out = torch.bmm(x, weight.transpose(-2, -1))
if bias is not None:
out = out + bias.unsqueeze(-2)
return out
|
import opentuner
from opentuner import ConfigurationManipulator
from opentuner import LogIntegerParameter
from opentuner import EnumParameter
from opentuner import MeasurementInterface
from opentuner import Result
FILE = '../programs/server/server2.py'
INTERPRETER = '../../../python3/bin/python3.7'
MODULO_THRESHOLD = 500
TIME_LIMIT = 60.0
class GCTuner(MeasurementInterface):
def __init__(self, *args, **kwargs):
super(GCTuner, self).__init__(*args, **kwargs)
self.parallel_compile = True
base_cmd = '{} {}'.format(INTERPRETER, FILE)
run_result = self.call_program(base_cmd)
assert run_result['returncode'] == 0
values = [value.strip() for value in run_result['stderr'].split()]
self.default_memory = int(values[0])
self.default_objects = int(values[1])
param_cmd = 'RESEARCH_MODULO1={} RESEARCH_VERBOSE=1 {} {}'.format(MODULO_THRESHOLD, INTERPRETER, FILE)
run_result = self.call_program(param_cmd)
assert run_result['returncode'] == 0
values = [value.strip() for value in run_result['stderr'].split()]
self.instructions = [int(pair.split(':')[0]) for pair in values[2].split(',')[:-1]]
print('Located {} possible collection points.'.format(len(self.instructions)))
def manipulator(self):
m = ConfigurationManipulator()
m.add_parameter(EnumParameter('instruction1', self.instructions))
m.add_parameter(LogIntegerParameter('modulo1', 5, 1000000))
m.add_parameter(EnumParameter('generation1', [0, 1, 2]))
m.add_parameter(EnumParameter('instruction2', self.instructions))
m.add_parameter(LogIntegerParameter('modulo2', 5, 1000000))
m.add_parameter(EnumParameter('generation2', [0, 1, 2]))
return m
def compile(self, cfg, id):
run_cmd = 'RESEARCH_INSTRUCTION1={} RESEARCH_MODULO1={} RESEARCH_GENERATION1={} ' \
'RESEARCH_INSTRUCTION2={} RESEARCH_MODULO2={} RESEARCH_GENERATION2={} ' \
'{} {}'.format(cfg['instruction1'], cfg['modulo1'], cfg['generation1'], cfg['instruction2'],
cfg['modulo2'], cfg['generation2'], INTERPRETER, FILE)
try:
run_result = self.call_program(run_cmd, limit=TIME_LIMIT)
assert run_result['returncode'] == 0
except:
return Result(time=float('inf'))
values = [int(value.strip()) for value in run_result['stderr'].split()]
memory = values[0]
objects = values[1]
if memory >= self.default_memory:
score = 2000000000000000000 + memory
elif objects >= self.default_objects:
score = 1000000000000000000 + objects
else:
score = objects
return Result(time=score)
def run_precompiled(self, desired_result, input, limit, compile_result, id):
return compile_result
def save_final_config(self, configuration):
"""called at the end of tuning"""
cfg = configuration.data
print('Optimal values written to optimal.json: {}'.format(cfg))
self.manipulator().save_to_file(cfg, 'optimal.json')
run_cmd = 'RESEARCH_INSTRUCTION1={} RESEARCH_MODULO1={} RESEARCH_GENERATION1={} ' \
'RESEARCH_INSTRUCTION2={} RESEARCH_MODULO2={} RESEARCH_GENERATION2={} ' \
'{} {}'.format(cfg['instruction1'], cfg['modulo1'], cfg['generation1'], cfg['instruction2'],
cfg['modulo2'], cfg['generation2'], INTERPRETER, FILE)
run_result = self.call_program(run_cmd)
assert run_result['returncode'] == 0
values = [value.strip() for value in run_result['stderr'].split()]
optimal_memory = int(values[0])
optimal_objects = int(values[1])
print('Default strategy: {} {}'.format(self.default_memory, self.default_objects))
print('Optimal strategy: {} {}'.format(optimal_memory, optimal_objects))
if optimal_memory < self.default_memory and optimal_objects < self.default_objects:
print('Pareto dominant solution found.')
else:
print('Failed to find pareto dominant solution.')
if __name__ == '__main__':
arg_parser = opentuner.default_argparser()
GCTuner.main(arg_parser.parse_args())
|
import FWCore.ParameterSet.Config as cms
hiFJGridEmptyAreaCalculator = cms.EDProducer('HiFJGridEmptyAreaCalculator',
gridWidth = cms.double(0.05),
bandWidth = cms.double(0.2),
mapEtaEdges = cms.InputTag('hiFJRhoProducer','mapEtaEdges'),
mapToRho = cms.InputTag('hiFJRhoProducer','mapToRho'),
mapToRhoM = cms.InputTag('hiFJRhoProducer','mapToRhoM'),
pfCandSource = cms.InputTag('particleFlow'),
jetSource = cms.InputTag('kt4PFJetsForRho'),
doCentrality = cms.bool(True),
hiBinCut = cms.int32(100),
CentralityBinSrc = cms.InputTag("centralityBin","HFtowers"),
keepGridInfo = cms.bool(False),
)
from Configuration.Eras.Modifier_pA_2016_cff import pA_2016
pA_2016.toModify(hiFJGridEmptyAreaCalculator, doCentrality = False)
|
# Read from a file one line at a time.
# amwhaley@cisco.com
# twitter: @mandywhaley
# http://developer.cisco.com
# http://developer.cisco.com/learning
# Jan 15, 2015
# * THIS SAMPLE APPLICATION AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY
# * OF ANY KIND BY CISCO, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED
# * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR
# * PURPOSE, NONINFRINGEMENT, SATISFACTORY QUALITY OR ARISING FROM A COURSE OF
# * DEALING, LAW, USAGE, OR TRADE PRACTICE. CISCO TAKES NO RESPONSIBILITY
# * REGARDING ITS USAGE IN AN APPLICATION, AND IT IS PRESENTED ONLY AS AN
# * EXAMPLE. THE SAMPLE CODE HAS NOT BEEN THOROUGHLY TESTED AND IS PROVIDED AS AN
# * EXAMPLE ONLY, THEREFORE CISCO DOES NOT GUARANTEE OR MAKE ANY REPRESENTATIONS
# * REGARDING ITS RELIABILITY, SERVICEABILITY, OR FUNCTION. IN NO EVENT DOES
# * CISCO WARRANT THAT THE SOFTWARE IS ERROR FREE OR THAT CUSTOMER WILL BE ABLE
# * TO OPERATE THE SOFTWARE WITHOUT PROBLEMS OR INTERRUPTIONS. NOR DOES CISCO
# * WARRANT THAT THE SOFTWARE OR ANY EQUIPMENT ON WHICH THE SOFTWARE IS USED WILL
# * BE FREE OF VULNERABILITY TO INTRUSION OR ATTACK. THIS SAMPLE APPLICATION IS
# * NOT SUPPORTED BY CISCO IN ANY MANNER. CISCO DOES NOT ASSUME ANY LIABILITY
# * ARISING FROM THE USE OF THE APPLICATION. FURTHERMORE, IN NO EVENT SHALL CISCO
# * OR ITS SUPPLIERS BE LIABLE FOR ANY INCIDENTAL OR CONSEQUENTIAL DAMAGES, LOST
# * PROFITS, OR LOST DATA, OR ANY OTHER INDIRECT DAMAGES EVEN IF CISCO OR ITS
# * SUPPLIERS HAVE BEEN INFORMED OF THE POSSIBILITY THEREOF.-->
#.readline() reads in only 1 line of the file at a time.
print ("Read only the first line of the file:")
my_file_object = open("my-file.txt", "r")
print (my_file_object.readline())
print ("\n")
my_file_object.close() |
from veripy.parser import syntax
from veripy.typecheck.types import to_ast_type
ArithOps = syntax.ArithOps
CompOps = syntax.CompOps
BoolOps = syntax.BoolOps
BINOP_DICT = {
'+' : ArithOps.Add,
'-' : ArithOps.Minus,
'*' : ArithOps.Mult,
'//': ArithOps.IntDiv,
'%' : ArithOps.Mod,
'<=' : CompOps.Le,
'<' : CompOps.Lt,
'>=' : CompOps.Ge,
'>' : CompOps.Gt,
'==' : CompOps.Eq,
'!=' : CompOps.Neq,
'and' : BoolOps.And,
'or' : BoolOps.Or,
'==>' : BoolOps.Implies,
'<==>': BoolOps.Iff
}
UNOP_DICT = {
'not' : BoolOps.Not,
'-' : ArithOps.Neg
}
def unpackTokens(tokenlist):
it = iter(tokenlist)
while 1:
try:
yield (next(it), next(it))
except StopIteration:
break
class ASTBuilder:
def __init__(self, tokens):
self.value = tokens[0]
def makeAST(self):
raise NotImplementedError('Abstract Builder')
class ProcessInt(ASTBuilder):
def makeAST(self):
return syntax.Literal (syntax.VInt(self.value))
class ProcessBool(ASTBuilder):
def makeAST(self):
return syntax.Literal (syntax.VBool(self.value))
class ProcessVar(ASTBuilder):
def makeAST(self):
return syntax.Var(self.value)
class ProcessUnOp(ASTBuilder):
def __init__(self, tokens):
self.op, self.value = tokens[0]
def makeAST(self):
e = self.value.makeAST()
return syntax.UnOp(UNOP_DICT[self.op], e)
class ProcessBinOp(ASTBuilder):
def makeAST(self):
e1 = self.value[0].makeAST()
for (op, e2) in unpackTokens(self.value[1:]):
e2 = e2.makeAST()
e1 = syntax.BinOp(e1, BINOP_DICT[op], e2)
return e1
class ProcessSubscript(ASTBuilder):
def __init__(self, tokens):
self.var, *self.subscripts = tokens
def makeAST(self):
var = self.var.makeAST()
if not self.subscripts:
raise Exception('No subscript found')
result = None
while self.subscripts:
lbrk, *self.subscripts = self.subscripts
assert lbrk == '['
store = []
while self.subscripts and self.subscripts[0] != ']':
store.append(self.subscripts[0])
self.subscripts = self.subscripts[1:]
if len(store) == 3:
subscript = syntax.Slice(store[0], store[2])
elif len(store) == 2:
fst, snd = store
if fst == ':':
subscript = syntax.Slilce(None, snd.makeAST())
else:
subscript = syntax.Slice(fst.makeAST(), None)
else:
subscript = store[0].makeAST()
if result is None:
result = syntax.Subscript(var, subscript)
else:
result = syntax.Subscript(result, subscript)
if self.subscripts:
self.subscripts = self.subscripts[1:]
return result
class ProcessFnCall(ASTBuilder):
def __init__(self, tokens):
self.value = tokens
def makeAST(self):
func_name, *args = self.value
func_name = func_name.makeAST()
if func_name.name in UNOP_DICT:
return syntax.UnOp(UNOP_DICT[func_name.name], args[0].makeAST())
return syntax.FunctionCall(func_name, [x.makeAST() for x in args], native=False)
class ProcessQuantification(ASTBuilder):
def __init__(self, tokens):
self.value = tokens
def makeAST(self):
import veripy.transformer as trans
ty = None
if len(self.value) == 3:
quantifier, var, expr = self.value
elif len(self.value) == 4:
quantifier, var, ty, expr = self.value
if ty is not None:
ty = ty.makeAST()
ty = to_ast_type(ty.name)
ori = var.makeAST()
bounded = syntax.Var(ori.name + '$$0')
e = trans.subst(ori.name, bounded, expr.makeAST())
if quantifier == 'exists':
# exists x. Q <==> not forall x. not Q
return syntax.UnOp(BoolOps.Not,
syntax.Quantification(bounded,
syntax.UnOp(BoolOps.Not, e), ty=ty))
else:
return syntax.Quantification(bounded, e, ty=ty) |
with open("hfs_superdrive", "rb") as f:
buf = f.read()
with open("superdrive", "wb") as f:
f.write(buf[0x400:])
|
# -*- coding: utf-8 -*-
"""
Wraps a connection to koji, managing the session, and providing convenience
methods for interacting with the koji api.
"""
import os
import koji
from koji_wrapper.base import KojiWrapperBase
class KojiWrapper(KojiWrapperBase):
def __init__(self, **kwargs):
self._pathinfo = None
super().__init__(**kwargs)
def file_types(self, nvr, types=['image']):
"""
:param nvr: nvr of the desired build
:param types: list of koji archive types. This is currently any of:
'maven', 'win', or 'image'
:returns: list of file types of given build
"""
build = self.build(nvr)
if not build:
return None
file_types = set([])
for this_type in types:
archives_list = self.archives(buildID=build['id'], type=this_type)
if archives_list:
for archive in archives_list:
file_types.add(archive['type_name'])
if len(file_types):
return list(file_types)
# Default
# TODO: make sure we actually need this default - can't see why we do.
return ['rpm']
def srpm_url(self, nvr=None):
"""
:param nvr: reference to the rpm of the desired package.
This may be any of:
- int ID
- string N-V-R.A
- string N-V-R.A@location
- map containing 'name', 'version', 'release', and 'arch' (and
optionally 'location')
:returns: srpm url for a given nvr
"""
try:
build = self.build(nvr)
rpm_list = self.rpms(buildID=build.get('build_id'), arches='src')
src_rpm = None
for rpm in rpm_list:
if rpm.get('arch') == 'src':
src_rpm = rpm
break
return self._build_srpm_url(rpm=src_rpm, build=build)
except Exception as inst:
# TODO: either add logging or decide if we want to do more to
# handle errors here.
raise inst
def _build_srpm_url(self, rpm=None, build=None):
if self._pathinfo is None and self.topurl is not None:
self._pathinfo = koji.PathInfo(topdir=self.topurl)
# TODO: add error handling.
srpm_path = self._pathinfo.rpm(rpm)
base_path = self._pathinfo.build(build)
return os.path.join(base_path, srpm_path)
|
from .registry import DeviceRegistry, DeviceEntry, MediaDeviceType, MediaDeviceDiscoveryEvent
from .events import DiscoveryEventType
__all__ = ['DeviceRegistry', 'DiscoveryEventType', 'DeviceEntry', 'MediaDeviceType', 'MediaDeviceDiscoveryEvent']
|
class JobResultExample(object):
IN_PROGRESS = {
"status": {
"state": "IN PROGRESS"
},
"configuration": {
"copy": {
"sourceTable": {
"projectId": "source_project_id",
"tableId": "source_table_id$123",
"datasetId": "source_dataset_id"
},
"destinationTable": {
"projectId": "target_project_id",
"tableId": "target_table_id",
"datasetId": "target_dataset_id"
},
"createDisposition": "CREATE_IF_NEEDED",
"writeDisposition": "WRITE_TRUNCATE"
}
}
}
DONE = {
"status": {
"state": "DONE"
},
"statistics": {
"startTime": "1511356638992",
"endTime": "1511356648992"
},
"configuration": {
"copy": {
"sourceTable": {
"projectId": "source_project_id",
"tableId": "source_table_id$123",
"datasetId": "source_dataset_id"
},
"destinationTable": {
"projectId": "target_project_id",
"tableId": "target_table_id",
"datasetId": "target_dataset_id"
},
"createDisposition": "CREATE_IF_NEEDED",
"writeDisposition": "WRITE_TRUNCATE"
}
}
}
DONE_WITH_RETRY_ERRORS = {
"status": {
"state": "DONE",
"errors": [
{
"reason": "invalid",
"message": "Cannot read a table without a schema"
},
{
"reason": "backendError",
"message": "Backend error"
}
],
"errorResult": {
"reason": "backendError",
"message": "Backend error"
}
},
"statistics": {
"startTime": "1511356638992",
"endTime": "1511356648992"
},
"configuration": {
"copy": {
"sourceTable": {
"projectId": "source_project_id",
"tableId": "source_table_id$123",
"datasetId": "source_dataset_id"
},
"destinationTable": {
"projectId": "target_project_id",
"tableId": "target_table_id",
"datasetId": "target_dataset_id"
},
"createDisposition": "CREATE_NEVER",
"writeDisposition": "WRITE_TRUNCATE"
}
}
}
DONE_WITH_NOT_REPETITIVE_ERRORS = {
"status": {
"state": "DONE",
"errors": [
{
"reason": "invalid",
"message": "Cannot read a table without a schema"
},
{
"reason": "backendError",
"message": "Backend error"
}
],
"errorResult": {
"reason": "invalid",
"message": "Cannot read a table without a schema"
}
},
"statistics": {
"startTime": "1511356638992",
"endTime": "1511356648992"
},
"configuration": {
"copy": {
"sourceTable": {
"projectId": "source_project_id",
"tableId": "source_table_id$123",
"datasetId": "source_dataset_id"
},
"destinationTable": {
"projectId": "target_project_id",
"tableId": "target_table_id",
"datasetId": "target_dataset_id"
},
"createDisposition": "CREATE_IF_NEEDED",
"writeDisposition": "WRITE_TRUNCATE"
}
}
}
|
from app import app, database
from util import random_string
from flask import request
from hashlib import sha1
import arrow
import json
@app.route('/api/v1/paste', methods=('POST',))
def paste():
paste = None
language = None
user = None
expiration = None
domain = 'https://zifb.in/'
try:
data = json.loads(request.data)
except ValueError:
return json.dumps({'error': 'invalid json'}), 400
#Get Paste
if not data.has_key('paste'):
return json.dumps({'error': 'paste not found'}), 400
paste = data.get('paste')
#Get Language
if data.has_key('language'):
language = data.get('language')
#Get API_KEY/User
if data.has_key('api_key'):
user = database.ApiKey.objects(key=data.get('api_key')).first().user
#Get Expiration
if data.has_key('expiration'):
s = data.get('expiration')
try:
s = int(s)
except ValueError:
return json.dumps({'error': 'invalid expiration format, should be number of seconds'})
if s is None or s == 0:
expiration = None
else:
expiration = arrow.utcnow().replace(seconds=+s).datetime
if not user and not expiration:
expiration = arrow.utcnow().replace(hours=+1).datetime
#Get Domain
if data.has_key('domain'):
domain = 'https://{0}/'.format(data.get('domain'))
paste = database.Paste(name='testing', paste=paste, digest=sha1(paste.encode('utf-8')).hexdigest(), time=arrow.utcnow().datetime,
expire=expiration, user=user, language=language)
paste.name = random_string()
while database.Paste.objects(name=paste.name).first():
paste.name = random_string()
paste.save()
return json.dumps({'paste': '{0}{1}'.format(domain, paste.name),
'expires': arrow.get(paste.expire).format('YYYY/MM/DD hh:mm')})
|
from django.shortcuts import render,redirect
from .models import Cart
# Create your views here.
from products.models import Product
def cart_home(request):
cart_obj,new_obj = Cart.objects.new_or_get(request)
args = { "cart": cart_obj }
return render(request,"carts/cart_home.html",args)
def cart_update(request):
product_id = request.POST.get('product_id')
if product_id is not None:
try:
product_obj = Product.objects.get(id=product_id)
except Product.DoesNotExist:
print("Show Message to user, Product in gone?")
return redirect("cart:home")
cart_obj, new_obj = Cart.objects.new_or_get(request)
if product_obj in cart_obj.products.all():
cart_obj.products.remove(product_obj)
else:
cart_obj.products.add(product_obj)
request.session['cart_items'] = cart_obj.products.count()
#return redirect(product_obj.get_absolute_url()):home
return redirect("cart:home") |
# ov -> one variable
from .differentiable_function import DifferentiableFunction, T
from .constant import Constant
from .linear import Linear
from .trigonometry import Sin, Cos, sin, cos
from .exp import Exp, exp
from .log import Log, log
from .pow import Pow
__all__ = [
"cos",
"exp",
"log",
"sin",
"Constant",
"Cos",
"DifferentiableFunction",
"Exp",
"Linear",
"Log",
"Pow"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.