hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794eee7a39da3031070e91ec81d62d5dadd3bb1e
| 471
|
py
|
Python
|
Lab_3/part_2.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | 1
|
2022-01-12T21:48:23.000Z
|
2022-01-12T21:48:23.000Z
|
Lab_3/part_2.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | null | null | null |
Lab_3/part_2.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | null | null | null |
list_den = [2, 3, 4, 0, 5, 6, 8]
list_num = []
results = []
entered = 0
i=0
for num in list_den:
while True:
entered = int(input("Enter a number greater than or equal to 10: "))
if entered >= 10:
break
else:
print("Number must be greater than 10")
list_num.append(entered)
results.append((list_num[i]/list_den[i]) if (list_den[i] != 0) else (-1))
i += 1
print(results)
| 21.409091
| 77
| 0.524416
|
794ef040c3a7f25257f7612ec1f908c19e848ed0
| 2,424
|
py
|
Python
|
doc/sphinx/example-acoustics-1d/setplot_3.py
|
geoflows/geoclaw-4.x
|
c8879d25405017b38392aa3b1ea422ff3e3604ea
|
[
"BSD-3-Clause"
] | 7
|
2016-11-13T03:11:51.000Z
|
2021-09-07T18:59:48.000Z
|
doc/sphinx/example-acoustics-1d/setplot_3.py
|
che-wenchao/D-Claw
|
8ab5d971c9a7a7130e03a447a4b8642e292f4e88
|
[
"BSD-3-Clause"
] | 11
|
2020-01-14T18:00:37.000Z
|
2022-03-29T14:25:24.000Z
|
doc/sphinx/example-acoustics-1d/setplot_3.py
|
che-wenchao/D-Claw
|
8ab5d971c9a7a7130e03a447a4b8642e292f4e88
|
[
"BSD-3-Clause"
] | 6
|
2020-01-14T17:15:42.000Z
|
2021-12-03T17:28:44.000Z
|
"""
Single figure with two axes
=============================
The pressure q[0] and q[1] are plotted on two sets of axes in a single
figure.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
plotdata.clearfigures() # clear any old figures,axes,items data
plotfigure = plotdata.new_plotfigure(name='Solution', figno=1)
# Pressure:
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(211)'
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-.5,1.1]
plotaxes.title = 'Pressure'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = '-'
plotitem.color = 'b'
# Set up for second item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = 'o'
plotitem.color = '#ff00ff' # any color supported by matplotlib
# Velocity:
# Set up for second axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.axescmd = 'subplot(212)'
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-.5,.5]
plotaxes.title = 'Velocity'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d')
plotitem.plot_var = 1
plotitem.plotstyle = 'o-'
plotitem.color = 'b'
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html'# pointer for index page
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 1 # layout of plots
plotdata.latex_framesperline = 2 # layout of plots
plotdata.latex_makepdf = True # also run pdflatex?
return plotdata
| 31.076923
| 74
| 0.616749
|
794ef0926bc7ea5930935578d8207f5da587518d
| 1,374
|
py
|
Python
|
examples/linkedViews.py
|
nodedge/pyqtgraph
|
5a08650853a339c383281fd531fe059b74e1bbcd
|
[
"MIT"
] | 1
|
2021-04-28T05:16:24.000Z
|
2021-04-28T05:16:24.000Z
|
examples/linkedViews.py
|
nodedge/pyqtgraph
|
5a08650853a339c383281fd531fe059b74e1bbcd
|
[
"MIT"
] | null | null | null |
examples/linkedViews.py
|
nodedge/pyqtgraph
|
5a08650853a339c383281fd531fe059b74e1bbcd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This example demonstrates the ability to link the axes of views together
Views can be linked manually using the context menu, but only if they are given
names.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
app = pg.mkQApp("Linked Views Example")
#mw = QtGui.QMainWindow()
#mw.resize(800,800)
x = np.linspace(-50, 50, 1000)
y = np.sin(x) / x
win = pg.GraphicsLayoutWidget(show=True, title="pyqtgraph example: Linked Views")
win.resize(800,600)
win.addLabel("Linked Views", colspan=2)
win.nextRow()
p1 = win.addPlot(x=x, y=y, name="Plot1", title="Plot1")
p2 = win.addPlot(x=x, y=y, name="Plot2", title="Plot2: Y linked with Plot1")
p2.setLabel('bottom', "Label to test offset")
p2.setYLink('Plot1') ## test linking by name
## create plots 3 and 4 out of order
p4 = win.addPlot(x=x, y=y, name="Plot4", title="Plot4: X -> Plot3 (deferred), Y -> Plot1", row=2, col=1)
p4.setXLink('Plot3') ## Plot3 has not been created yet, but this should still work anyway.
p4.setYLink(p1)
p3 = win.addPlot(x=x, y=y, name="Plot3", title="Plot3: X linked with Plot1", row=2, col=0)
p3.setXLink(p1)
p3.setLabel('left', "Label to test offset")
#QtGui.QApplication.processEvents()
if __name__ == '__main__':
pg.mkQApp().exec_()
| 29.869565
| 104
| 0.699418
|
794ef1aab91c4c4546ee1703544ca5c10f7babb9
| 5,298
|
py
|
Python
|
onnx_tf/common/__init__.py
|
tanp5364/onnx-tensorflow
|
08e41de7b127a53d072a54730e4784fe50f8c7c3
|
[
"Apache-2.0"
] | 56
|
2018-12-20T11:16:20.000Z
|
2022-01-26T08:22:49.000Z
|
onnx_tf/common/__init__.py
|
tanp5364/onnx-tensorflow
|
08e41de7b127a53d072a54730e4784fe50f8c7c3
|
[
"Apache-2.0"
] | 16
|
2019-04-29T15:22:45.000Z
|
2021-04-22T16:05:09.000Z
|
onnx_tf/common/__init__.py
|
tanp5364/onnx-tensorflow
|
08e41de7b127a53d072a54730e4784fe50f8c7c3
|
[
"Apache-2.0"
] | 59
|
2019-03-05T12:37:43.000Z
|
2021-11-08T11:01:18.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import re
import sys
import uuid
import warnings
import logging
from onnx.backend.base import DeviceType
from tensorflow.python.client import device_lib
IS_PYTHON3 = sys.version_info > (3,)
logger = logging.getLogger('onnx-tf')
# create console handler and formatter for logger
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
class Deprecated:
"""Add deprecated message when function is called.
Usage:
from onnx_tf.common import deprecated
@deprecated
def func():
pass
UserWarning: func is deprecated. It will be removed in future release.
@deprecated("Message")
def func():
pass
UserWarning: Message
@deprecated({"arg": "Message",
"arg_1": deprecated.MSG_WILL_REMOVE,
"arg_2": "",})
def func(arg, arg_1, arg_2):
pass
UserWarning: Message
UserWarning: arg_1 of func is deprecated. It will be removed in future release.
UserWarning: arg_2 of func is deprecated.
"""
MSG_WILL_REMOVE = " It will be removed in future release."
def __call__(self, *args, **kwargs):
return self.deprecated_decorator(*args, **kwargs)
@staticmethod
def messages():
return {v for k, v in inspect.getmembers(Deprecated) if k.startswith("MSG")}
@staticmethod
def deprecated_decorator(arg=None):
# deprecate function with default message MSG_WILL_REMOVE
# @deprecated
if inspect.isfunction(arg):
def wrapper(*args, **kwargs):
warnings.warn("{} is deprecated.{}".format(
arg.__module__ + "." + arg.__name__, Deprecated.MSG_WILL_REMOVE))
return arg(*args, **kwargs)
return wrapper
deprecated_arg = arg if arg is not None else Deprecated.MSG_WILL_REMOVE
def deco(func):
# deprecate arg
# @deprecated({...})
if isinstance(deprecated_arg, dict):
for name, message in deprecated_arg.items():
if message in Deprecated.messages():
message = "{} of {} is deprecated.{}".format(
name, func.__module__ + "." + func.__name__, message or "")
warnings.warn(message)
# deprecate function with message
# @deprecated("message")
elif isinstance(deprecated_arg, str):
message = deprecated_arg
if message in Deprecated.messages():
message = "{} is deprecated.{}".format(
func.__module__ + "." + func.__name__, message)
warnings.warn(message)
return func
return deco
deprecated = Deprecated()
# This function inserts an underscore before every upper
# case letter and lowers that upper case letter except for
# the first letter.
def op_name_to_lower(name):
return re.sub('(?<!^)(?=[A-Z])', '_', name).lower()
def get_unique_suffix():
""" Get unique suffix by using first 8 chars from uuid.uuid4
to make unique identity name.
:return: Unique suffix string.
"""
return str(uuid.uuid4())[:8]
def get_perm_from_formats(from_, to_):
""" Get perm from data formats.
For example:
get_perm_from_formats('NHWC', 'NCHW') = [0, 3, 1, 2]
:param from_: From data format string.
:param to_: To data format string.
:return: Perm. Int list.
"""
return list(map(lambda x: from_.find(x), to_))
# TODO: allow more flexible placement
def get_device_option(device):
m = {DeviceType.CPU: '/cpu', DeviceType.CUDA: '/gpu'}
return m[device.type]
def get_data_format(x_rank):
""" Get data format by input rank.
Channel first if support CUDA.
:param x_rank: Input rank.
:return: Data format.
"""
sp_dim_names = ["D", "H", "W"]
sp_dim_lst = []
for i in range(x_rank - 2):
sp_dim_lst.append(sp_dim_names[-i - 1])
sp_dim_string = "".join(reversed(sp_dim_lst))
storage_format = "NC" + sp_dim_string
if supports_device("CUDA"):
compute_format = "NC" + sp_dim_string
else:
compute_format = "N" + sp_dim_string + "C"
return storage_format, compute_format
def supports_device(device):
""" Check if support target device.
:param device: CUDA or CPU.
:return: If supports.
"""
if device == "CUDA":
local_device_protos = device_lib.list_local_devices()
return len([x.name for x in local_device_protos if x.device_type == 'GPU'
]) > 0
elif device == "CPU":
return True
return False
@deprecated("onnx_tf.common.get_outputs_names is deprecated.{} {}".format(
deprecated.MSG_WILL_REMOVE,
"Use TensorflowGraph.get_outputs_names instead."))
def get_output_node_names(graph_def):
"""Get output node names from GraphDef.
Args:
graph_def: GraphDef object.
Returns:
List of output node names.
"""
nodes, input_names = dict(), set()
for node in graph_def.node:
nodes[node.name] = node
input_names.update(set(node.input))
return list(set(nodes) - input_names)
CONST_MINUS_ONE_INT32 = "_onnx_tf_internal_minus_one_int32"
CONST_ZERO_INT32 = "_onnx_tf_internal_zero_int32"
CONST_ONE_INT32 = "_onnx_tf_internal_one_int32"
CONST_ONE_FP32 = "_onnx_tf_internal_one_fp32"
| 26.893401
| 83
| 0.68271
|
794ef211c9bcdfbf5f165e36dd0f985e905d1f6e
| 4,839
|
py
|
Python
|
prediction_utils/prediction_utils/extraction_utils/extract_features.py
|
som-shahlab/sepsis_transfer_learning_public
|
e41b3d1f43f0e59726e04215ea0da9c9919c0f68
|
[
"MIT"
] | 9
|
2020-07-22T09:01:40.000Z
|
2022-02-03T18:44:15.000Z
|
prediction_utils/extraction_utils/extract_features.py
|
som-shahlab/fairness_benchmark
|
7d9e2619bf636acb2a9261ed7b4fdf59c105b8a1
|
[
"MIT"
] | null | null | null |
prediction_utils/extraction_utils/extract_features.py
|
som-shahlab/fairness_benchmark
|
7d9e2619bf636acb2a9261ed7b4fdf59c105b8a1
|
[
"MIT"
] | 2
|
2020-07-26T01:28:05.000Z
|
2021-12-07T13:16:52.000Z
|
import configargparse as argparse
import os
from prediction_utils.extraction_utils.featurizer import BigQueryOMOPFeaturizer
parser = argparse.ArgumentParser(description="An extraction script")
parser.add_argument(
"--data_path",
type=str,
default="/share/pi/nigam/projects/spfohl/cohorts/admissions/starr_20200404",
)
parser.add_argument(
"--features_by_analysis_path", type=str, default="features_by_analysis"
)
parser.add_argument(
"--gcloud_storage_path",
type=str,
default="gs://feature_extraction_exports/cohorts/scratch/",
)
parser.add_argument("--gcloud_project", type=str, default="som-nero-phi-nigam-starr")
parser.add_argument("--dataset_project", type=str, default="")
parser.add_argument("--rs_dataset_project", type=str, default="")
parser.add_argument("--dataset", type=str, default="starr_omop_cdm5_deid_20200404")
parser.add_argument("--rs_dataset", type=str, default="plp_cohort_tables")
parser.add_argument("--features_dataset", type=str, default="temp_dataset")
parser.add_argument("--features_prefix", type=str, default="features")
parser.add_argument(
"--cohort_name", type=str, default="admission_rollup_20200404_with_labels_sampled"
)
parser.add_argument("--index_date_field", type=str, default="admit_date")
parser.add_argument("--limit", type=int, default=None)
parser.add_argument("--row_id_field", type=str, default="prediction_id")
parser.add_argument(
"--google_application_credentials",
type=str,
default=os.path.expanduser("~/.config/gcloud/application_default_credentials.json"),
)
parser.add_argument("--dask_temp_dir", type=str, default=None)
parser.add_argument("--time_bins", type=int, default=None, nargs="*")
parser.add_argument("--time_bins_hourly", type=int, default=None, nargs="*")
parser.add_argument("--analysis_ids", type=str, default=None, nargs="*")
parser.add_argument("--exclude_analysis_ids", type=str, default=None, nargs="*")
parser.add_argument("--merged_name", type=str, default="merged_features")
parser.add_argument("--binary", dest="binary", action="store_true")
parser.add_argument(
"--featurize",
dest="featurize",
action="store_true",
help="Whether to run the featurization",
)
parser.add_argument(
"--no_featurize",
dest="featurize",
action="store_false",
help="Whether to run the featurization",
)
parser.add_argument(
"--cloud_storage",
dest="cloud_storage",
action="store_true",
help="Whether to write the results to cloud storage",
)
parser.add_argument(
"--no_cloud_storage",
dest="cloud_storage",
action="store_false",
help="Whether to write the results to cloud storage",
)
parser.add_argument(
"--merge_features",
dest="merge_features",
action="store_true",
help="Whether to merge the features",
)
parser.add_argument(
"--no_merge_features",
dest="merge_features",
action="store_false",
help="Whether to merge the features",
)
parser.add_argument(
"--create_parquet",
dest="create_parquet",
action="store_true",
help="Whether to create parquet on merge",
)
parser.add_argument(
"--no_create_parquet",
dest="create_parquet",
action="store_false",
help="Whether to create parquet on merge",
)
parser.add_argument(
"--create_sparse",
dest="create_sparse",
action="store_true",
help="Whether to create sparse array on merge",
)
parser.add_argument(
"--no_create_sparse",
dest="create_sparse",
action="store_false",
help="Whether to create sparse array on merge",
)
parser.add_argument(
"--overwrite",
dest="overwrite",
action="store_true",
help="Whether to overwrite results",
)
parser.set_defaults(
merge_features=False,
featurize=True,
create_parquet=True,
create_sparse=True,
create_h5=False,
binary=False,
cloud_storage=False,
overwrite=False,
)
if __name__ == "__main__":
args = parser.parse_args()
featurizer = BigQueryOMOPFeaturizer(
include_all_history=True,
**args.__dict__
)
if args.featurize:
if args.cloud_storage:
featurizer.featurize_to_destination(
analysis_ids=args.analysis_ids,
exclude_analysis_ids=args.exclude_analysis_ids,
merge_features=args.merge_features,
)
else:
featurizer.featurize(
analysis_ids=args.analysis_ids,
exclude_analysis_ids=args.exclude_analysis_ids,
)
if args.merge_features:
featurizer.merge_features(
merged_name=args.merged_name,
create_sparse=args.create_sparse,
create_parquet=args.create_parquet,
binary=args.binary,
load_extension="parquet",
dask_temp_dir=args.dask_temp_dir,
)
| 27.651429
| 88
| 0.699525
|
794ef2528edf9b5f5698b833e0b50158d4ed9271
| 10,851
|
py
|
Python
|
collections/nemo_asr/nemo_asr/parts/jasper.py
|
petermartigny/NeMo
|
b20821e637314940e36b63d32c601c43d1b74051
|
[
"Apache-2.0"
] | 1
|
2020-03-22T11:23:11.000Z
|
2020-03-22T11:23:11.000Z
|
collections/nemo_asr/nemo_asr/parts/jasper.py
|
petermartigny/NeMo
|
b20821e637314940e36b63d32c601c43d1b74051
|
[
"Apache-2.0"
] | null | null | null |
collections/nemo_asr/nemo_asr/parts/jasper.py
|
petermartigny/NeMo
|
b20821e637314940e36b63d32c601c43d1b74051
|
[
"Apache-2.0"
] | 1
|
2019-10-23T01:19:19.000Z
|
2019-10-23T01:19:19.000Z
|
# Taken straight from Patter https://github.com/ryanleary/patter
# TODO: review, and copyright and fix/add comments
import torch
import torch.nn as nn
jasper_activations = {
"hardtanh": nn.Hardtanh,
"relu": nn.ReLU,
"selu": nn.SELU,
}
def init_weights(m, mode='xavier_uniform'):
if isinstance(m, nn.Conv1d) or isinstance(m, MaskedConv1d):
if mode == 'xavier_uniform':
nn.init.xavier_uniform_(m.weight, gain=1.0)
elif mode == 'xavier_normal':
nn.init.xavier_normal_(m.weight, gain=1.0)
elif mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
else:
raise ValueError("Unknown Initialization mode: {0}".format(mode))
elif isinstance(m, nn.BatchNorm1d):
if m.track_running_stats:
m.running_mean.zero_()
m.running_var.fill_(1)
m.num_batches_tracked.zero_()
if m.affine:
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def get_same_padding(kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
if dilation > 1:
return (dilation * kernel_size) // 2 - 1
return kernel_size // 2
class MaskedConv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, heads=-1, bias=False,
use_mask=True):
if not (heads == -1 or groups == in_channels):
raise ValueError("Only use heads for depthwise convolutions")
if heads != -1:
self.real_out_channels = out_channels
in_channels = heads
out_channels = heads
groups = heads
super(MaskedConv1d, self).__init__(in_channels, out_channels,
kernel_size,
stride=stride,
padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.use_mask = use_mask
self.heads = heads
def get_seq_len(self, lens):
return ((lens + 2 * self.padding[0] - self.dilation[0] * (
self.kernel_size[0] - 1) - 1) / self.stride[0] + 1)
def forward(self, x, lens):
if self.use_mask:
lens = lens.to(dtype=torch.long)
max_len = x.size(2)
mask = torch.arange(max_len).to(lens.device)\
.expand(len(lens), max_len) >= lens.unsqueeze(1)
x = x.masked_fill(
mask.unsqueeze(1).type(torch.bool).to(device=x.device), 0
)
del mask
lens = self.get_seq_len(lens)
if self.heads != -1:
sh = x.shape
x = x.view(-1, self.heads, sh[-1])
out, lens = super(MaskedConv1d, self).forward(x), lens
if self.heads != -1:
out = out.view(sh[0], self.real_out_channels, -1)
return out, lens
class GroupShuffle(nn.Module):
def __init__(self, groups, channels):
super(GroupShuffle, self).__init__()
self.groups = groups
self.channels_per_group = channels // groups
def forward(self, x):
sh = x.shape
x = x.view(-1, self.groups, self.channels_per_group, sh[-1])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.groups * self.channels_per_group, sh[-1])
return x
class JasperBlock(nn.Module):
def __init__(self, inplanes, planes, repeat=3, kernel_size=11, stride=1,
dilation=1, padding='same', dropout=0.2, activation=None,
residual=True, groups=1, separable=False,
heads=-1, tied=False, normalization="batch",
norm_groups=1, residual_mode='add',
residual_panes=[], conv_mask=False):
super(JasperBlock, self).__init__()
if padding != "same":
raise ValueError("currently only 'same' padding is supported")
padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0])
self.conv_mask = conv_mask
self.separable = separable
self.residual_mode = residual_mode
self.conv = nn.ModuleList()
inplanes_loop = inplanes
if tied:
rep_layer = self._get_conv_bn_layer(
inplanes_loop,
planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding_val,
groups=groups,
heads=heads,
separable=separable,
normalization=normalization,
norm_groups=norm_groups)
for _ in range(repeat - 1):
if tied:
self.conv.extend(rep_layer)
else:
self.conv.extend(
self._get_conv_bn_layer(
inplanes_loop,
planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding_val,
groups=groups,
heads=heads,
separable=separable,
normalization=normalization,
norm_groups=norm_groups))
self.conv.extend(
self._get_act_dropout_layer(
drop_prob=dropout,
activation=activation))
inplanes_loop = planes
if tied:
self.conv.extend(rep_layer)
else:
self.conv.extend(
self._get_conv_bn_layer(
inplanes_loop,
planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding_val,
groups=groups,
heads=heads,
separable=separable,
normalization=normalization,
norm_groups=norm_groups))
self.res = nn.ModuleList() if residual else None
res_panes = residual_panes.copy()
self.dense_residual = residual
if residual:
if len(residual_panes) == 0:
res_panes = [inplanes]
self.dense_residual = False
for ip in res_panes:
self.res.append(
nn.ModuleList(
modules=self._get_conv_bn_layer(
ip,
planes,
kernel_size=1,
normalization=normalization,
norm_groups=norm_groups)))
self.out = nn.Sequential(
*
self._get_act_dropout_layer(
drop_prob=dropout,
activation=activation))
def _get_conv_bn_layer(self, in_channels, out_channels, kernel_size=11,
stride=1, dilation=1, padding=0, bias=False,
groups=1, heads=-1, separable=False,
normalization="batch", norm_groups=1):
if norm_groups == -1:
norm_groups = out_channels
if separable:
layers = [
MaskedConv1d(in_channels, in_channels, kernel_size,
stride=stride,
dilation=dilation, padding=padding, bias=bias,
groups=in_channels, heads=heads,
use_mask=self.conv_mask),
MaskedConv1d(in_channels, out_channels, kernel_size=1,
stride=1,
dilation=1, padding=0, bias=bias, groups=groups,
use_mask=self.conv_mask)
]
else:
layers = [
MaskedConv1d(in_channels, out_channels, kernel_size,
stride=stride,
dilation=dilation, padding=padding, bias=bias,
groups=groups,
use_mask=self.conv_mask)
]
if normalization == "group":
layers.append(nn.GroupNorm(
num_groups=norm_groups, num_channels=out_channels))
elif normalization == "instance":
layers.append(nn.GroupNorm(
num_groups=out_channels, num_channels=out_channels))
elif normalization == "layer":
layers.append(nn.GroupNorm(
num_groups=1, num_channels=out_channels))
elif normalization == "batch":
layers.append(nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1))
else:
raise ValueError(
f"Normalization method ({normalization}) does not match"
f" one of [batch, layer, group, instance].")
if groups > 1:
layers.append(GroupShuffle(groups, out_channels))
return layers
def _get_act_dropout_layer(self, drop_prob=0.2, activation=None):
if activation is None:
activation = nn.Hardtanh(min_val=0.0, max_val=20.0)
layers = [
activation,
nn.Dropout(p=drop_prob)
]
return layers
def forward(self, input_):
xs, lens_orig = input_
# compute forward convolutions
out = xs[-1]
lens = lens_orig
for i, l in enumerate(self.conv):
# if we're doing masked convolutions, we need to pass in and
# possibly update the sequence lengths
# if (i % 4) == 0 and self.conv_mask:
if isinstance(l, MaskedConv1d):
out, lens = l(out, lens)
else:
out = l(out)
# compute the residuals
if self.res is not None:
for i, layer in enumerate(self.res):
res_out = xs[i]
for j, res_layer in enumerate(layer):
if isinstance(res_layer, MaskedConv1d):
res_out, _ = res_layer(res_out, lens_orig)
else:
res_out = res_layer(res_out)
if self.residual_mode == 'add':
out = out + res_out
else:
out = torch.max(out, res_out)
# compute the output
out = self.out(out)
if self.res is not None and self.dense_residual:
return xs + [out], lens
return [out], lens
| 35.230519
| 79
| 0.518017
|
794ef309cf2c12c6de54d781a2575bfe23e58b82
| 920
|
py
|
Python
|
examples/Python/jupyter_strip_output.py
|
yxlao/Open3D
|
59691600bc0dc07a0aa4a7355b0dc5cee32d3531
|
[
"MIT"
] | 3
|
2019-06-13T21:51:14.000Z
|
2022-03-07T11:05:23.000Z
|
examples/Python/jupyter_strip_output.py
|
yxlao/Open3D
|
59691600bc0dc07a0aa4a7355b0dc5cee32d3531
|
[
"MIT"
] | 1
|
2019-01-24T00:33:28.000Z
|
2019-06-11T00:44:32.000Z
|
examples/Python/jupyter_strip_output.py
|
yxlao/Open3D
|
59691600bc0dc07a0aa4a7355b0dc5cee32d3531
|
[
"MIT"
] | 3
|
2018-11-19T16:13:06.000Z
|
2019-06-13T10:17:55.000Z
|
import nbformat
import nbconvert
from pathlib import Path
import os
if __name__ == "__main__":
# Setting os.environ["CI"] will disable interactive (blocking) mode in
# Jupyter notebooks
os.environ["CI"] = "true"
file_dir = Path(__file__).absolute().parent
nb_paths = sorted((file_dir / "Basic").glob("*.ipynb"))
nb_paths += sorted((file_dir / "Advanced").glob("*.ipynb"))
for nb_path in nb_paths:
print(f"Clean {nb_path.name}")
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
ep = nbconvert.preprocessors.ClearOutputPreprocessor(timeout=6000)
try:
ep.preprocess(nb, {"metadata": {"path": nb_path.parent}})
except nbconvert.preprocessors.execute.CellExecutionError:
print(f"Cleaning of {nb_path.name} failed")
with open(nb_path, "w", encoding="utf-8") as f:
nbformat.write(nb, f)
| 34.074074
| 74
| 0.640217
|
794ef459aece9ffb5dabf04b8994f620291ecd65
| 2,180
|
py
|
Python
|
base/nn/pt/model.py
|
yd8534976/Personae
|
5da37a51c446275a5d2e2c9d745fbf24b736c340
|
[
"MIT"
] | null | null | null |
base/nn/pt/model.py
|
yd8534976/Personae
|
5da37a51c446275a5d2e2c9d745fbf24b736c340
|
[
"MIT"
] | null | null | null |
base/nn/pt/model.py
|
yd8534976/Personae
|
5da37a51c446275a5d2e2c9d745fbf24b736c340
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import numpy as np
from abc import abstractmethod
class BasePTModel(object):
def __init__(self, env, **options):
self.env = env
try:
self.learning_rate = options['learning_rate']
except KeyError:
self.learning_rate = 0.001
try:
self.batch_size = options['batch_size']
except KeyError:
self.batch_size = 32
try:
self.save_path = options["save_path"]
except KeyError:
self.save_path = None
try:
self.mode = options['mode']
except KeyError:
self.mode = 'train'
@abstractmethod
def train(self):
pass
@abstractmethod
def predict(self, a):
pass
@abstractmethod
def restore(self):
pass
@abstractmethod
def run(self):
pass
class BaseRLPTModel(BasePTModel):
def __init__(self, env, a_space, s_space, **options):
super(BaseRLPTModel, self).__init__(env, **options)
self.env = env
self.a_space, self.s_space = a_space, s_space
try:
self.episodes = options['episodes']
except KeyError:
self.episodes = 30
try:
self.gamma = options['gamma']
except KeyError:
self.gamma = 0.9
try:
self.tau = options['tau']
except KeyError:
self.tau = 0.01
try:
self.buffer_size = options['buffer_size']
except KeyError:
self.buffer_size = 2000
try:
self.mode = options['mode']
except KeyError:
self.mode = 'train'
@abstractmethod
def _init_input(self, *args):
pass
@abstractmethod
def _init_nn(self, *args):
pass
@abstractmethod
def _init_op(self):
pass
@abstractmethod
def save_transition(self, s, a, r, s_n):
pass
@abstractmethod
def log_loss(self, episode):
pass
@staticmethod
def get_a_indices(a):
a = np.where(a > 1 / 3, 2, np.where(a < - 1 / 3, 1, 0)).astype(np.int32)[0].tolist()
return a
| 19.818182
| 92
| 0.542202
|
794ef61c1c9e24c6c6a75527e09ffb1fca30eb50
| 303
|
py
|
Python
|
wafw00f/plugins/tencent.py
|
aqyoung/scan-wafw00f
|
a95a94253f138d5ef791232ef4d8371de41622b6
|
[
"BSD-3-Clause"
] | 1
|
2019-08-01T11:19:55.000Z
|
2019-08-01T11:19:55.000Z
|
wafw00f/plugins/tencent.py
|
aqyoung/scan-wafw00f
|
a95a94253f138d5ef791232ef4d8371de41622b6
|
[
"BSD-3-Clause"
] | null | null | null |
wafw00f/plugins/tencent.py
|
aqyoung/scan-wafw00f
|
a95a94253f138d5ef791232ef4d8371de41622b6
|
[
"BSD-3-Clause"
] | 2
|
2017-12-27T15:56:15.000Z
|
2017-12-27T20:03:09.000Z
|
#!/usr/bin/env python
NAME = 'Tencent Cloud Firewall (Tencent Technologies)'
def is_waf(self):
for attack in self.attacks:
r = attack(self)
if r is None:
return
_, page = r
if b'waf.tencent-cloud.com/' in page:
return True
return False
| 20.2
| 54
| 0.570957
|
794efc1b8c69f15770db216885afe17bd00c0a87
| 13,876
|
py
|
Python
|
tensor2tensor/models/mtf_resnet.py
|
levskaya/tensor2tensor
|
4643800137f802693f880a1fab9e10de7ba32e66
|
[
"Apache-2.0"
] | 1
|
2021-04-05T09:55:38.000Z
|
2021-04-05T09:55:38.000Z
|
tensor2tensor/models/mtf_resnet.py
|
levskaya/tensor2tensor
|
4643800137f802693f880a1fab9e10de7ba32e66
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/models/mtf_resnet.py
|
levskaya/tensor2tensor
|
4643800137f802693f880a1fab9e10de7ba32e66
|
[
"Apache-2.0"
] | 1
|
2020-07-13T03:15:32.000Z
|
2020-07-13T03:15:32.000Z
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet model with model and data parallelism using MTF.
Integration of Mesh tensorflow with ResNet to do model parallelism.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import mesh_tensorflow as mtf
from tensor2tensor.layers import common_hparams
from tensor2tensor.utils import mtf_model
from tensor2tensor.utils import registry
import tensorflow as tf
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def batch_norm_relu(inputs, is_training, relu=True):
"""Block of batch norm and relu."""
inputs = mtf.layers.batch_norm(
inputs,
is_training,
BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
init_zero=(not relu))
if relu:
inputs = mtf.relu(inputs)
return inputs
def bottleneck_block(inputs,
filters,
is_training,
strides,
projection_shortcut=None,
row_blocks_dim=None,
col_blocks_dim=None):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: a `mtf.Tensor` of shape
`[batch_dim, row_blocks, col_blocks, rows, cols, in_channels]`.
filters: `int` number of filters for the first two convolutions. Note
that the third and final convolution will use 4 times as many filters.
is_training: `bool` for whether the model is in training mode.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
projection_shortcut: `function` to use for projection shortcuts (typically
a 1x1 convolution to match the filter dimensions). If None, no
projection is used and the input is passed as unchanged through the
shortcut connection.
row_blocks_dim: a mtf.Dimension, row dimension which is
spatially partitioned along mesh axis
col_blocks_dim: a mtf.Dimension, row dimension which is
spatially partitioned along mesh axis
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if projection_shortcut is not None:
filters_dim = mtf.Dimension("filtersp", filters)
shortcut = projection_shortcut(inputs, filters_dim)
# First conv block
inputs = mtf.layers.conv2d_with_blocks(
inputs,
mtf.Dimension("filters1", filters),
filter_size=[1, 1],
strides=[1, 1],
padding="SAME",
h_blocks_dim=None, w_blocks_dim=col_blocks_dim,
name="conv0")
# TODO(nikip): Add Dropout?
inputs = batch_norm_relu(inputs, is_training)
# Second conv block
inputs = mtf.layers.conv2d_with_blocks(
inputs,
mtf.Dimension("filters2", 4 * filters),
filter_size=[3, 3],
strides=[1, 1],
padding="SAME",
h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim,
name="conv1")
inputs = batch_norm_relu(inputs, is_training)
# Third wide conv filter block
inputs = mtf.layers.conv2d_with_blocks(
inputs,
mtf.Dimension("filters3", filters),
filter_size=[1, 1],
strides=strides,
padding="SAME",
h_blocks_dim=None, w_blocks_dim=col_blocks_dim,
name="conv2")
# TODO(nikip): Althought the original resnet code has this batch norm, in our
# setup this is causing no gradients to be passed. Investigate further.
# inputs = batch_norm_relu(inputs, is_training, relu=True)
# TODO(nikip): Maybe add residual with a projection?
return mtf.relu(
shortcut + mtf.rename_dimension(
inputs, inputs.shape.dims[-1].name, shortcut.shape.dims[-1].name))
def block_layer(inputs,
filters,
blocks,
strides,
is_training,
name,
row_blocks_dim=None,
col_blocks_dim=None):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
is_training: `bool` for whether the model is training.
name: `str`name for the Tensor output of the block layer.
row_blocks_dim: a mtf.Dimension, row dimension which is
spatially partitioned along mesh axis
col_blocks_dim: a mtf.Dimension, row dimension which is
spatially partitioned along mesh axis
Returns:
The output `Tensor` of the block layer.
"""
with tf.variable_scope(name, default_name="block_layer"):
# Only the first block per block_layer uses projection_shortcut and strides
def projection_shortcut(inputs, output_dim):
"""Project identity branch."""
inputs = mtf.layers.conv2d_with_blocks(
inputs,
output_dim,
filter_size=[1, 1],
strides=strides,
padding="SAME",
h_blocks_dim=None, w_blocks_dim=col_blocks_dim,
name="shortcut0")
return batch_norm_relu(
inputs, is_training, relu=False)
inputs = bottleneck_block(
inputs,
filters,
is_training,
strides=strides,
projection_shortcut=projection_shortcut,
row_blocks_dim=row_blocks_dim,
col_blocks_dim=col_blocks_dim)
for i in range(1, blocks):
with tf.variable_scope("bottleneck_%d" % i):
inputs = bottleneck_block(
inputs,
filters,
is_training,
strides=[1, 1, 1, 1],
projection_shortcut=None,
row_blocks_dim=row_blocks_dim,
col_blocks_dim=col_blocks_dim)
return inputs
@registry.register_model
class MtfResNet(mtf_model.MtfModel):
"""ResNet in mesh_tensorflow."""
def set_activation_type(self):
hparams = self._hparams
if hparams.activation_dtype == "float32":
activation_dtype = tf.float32
elif hparams.activation_dtype == "float16":
activation_dtype = tf.float16
elif hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
else:
raise ValueError(
"unknown hparams.activation_dtype %s" % hparams.activation_dtype)
return activation_dtype
def mtf_model_fn(self, features, mesh):
features = copy.copy(features)
tf.logging.info("features = %s" % features)
hparams = self._hparams
activation_dtype = self.set_activation_type()
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
# Declare all the dimensions
batch_dim = mtf.Dimension("batch", hparams.batch_size)
hidden_dim = mtf.Dimension("hidden", hparams.hidden_size)
filter_dim = mtf.Dimension("filters", hparams.filter_sizes[0])
rows_dim = mtf.Dimension("rows_size", hparams.rows_size)
cols_dim = mtf.Dimension("cols_size", hparams.cols_size)
row_blocks_dim = mtf.Dimension("row_blocks", hparams.row_blocks)
col_blocks_dim = mtf.Dimension("col_blocks", hparams.col_blocks)
classes_dim = mtf.Dimension("classes", 10)
channels_dim = mtf.Dimension("channels", 3)
one_channel_dim = mtf.Dimension("one_channel", 1)
inputs = features["inputs"]
x = mtf.import_tf_tensor(
mesh, tf.reshape(inputs, [
hparams.batch_size,
hparams.row_blocks,
hparams.rows_size // hparams.row_blocks,
hparams.col_blocks,
hparams.num_channels*hparams.cols_size // hparams.col_blocks,
hparams.num_channels]),
mtf.Shape(
[batch_dim, row_blocks_dim, rows_dim,
col_blocks_dim, cols_dim, channels_dim]))
x = mtf.transpose(x, [batch_dim, row_blocks_dim, col_blocks_dim,
rows_dim, cols_dim, channels_dim])
x = mtf.to_float(x)
x = mtf.layers.conv2d_with_blocks(
x,
filter_dim,
filter_size=[3, 3],
strides=[1, 1],
padding="SAME",
h_blocks_dim=None, w_blocks_dim=col_blocks_dim,
name="initial_filter")
x = batch_norm_relu(x, is_training)
# Conv blocks
# [block - strided block layer - strided block layer] x n
for layer in range(hparams.num_layers):
layer_name = "block_layer_%d" % layer
with tf.variable_scope(layer_name):
# Residual block layer
x = block_layer(
inputs=x,
filters=hparams.filter_sizes[0],
blocks=hparams.layer_sizes[0],
strides=[1, 1],
is_training=is_training,
name="block_layer1",
row_blocks_dim=None,
col_blocks_dim=None)
x = block_layer(
inputs=x,
filters=hparams.filter_sizes[1],
blocks=hparams.layer_sizes[1],
strides=[1, 1],
is_training=is_training,
name="block_layer2",
row_blocks_dim=None,
col_blocks_dim=None)
x = block_layer(
inputs=x,
filters=hparams.filter_sizes[2],
blocks=hparams.layer_sizes[2],
strides=[1, 1],
is_training=is_training,
name="block_layer3",
row_blocks_dim=None,
col_blocks_dim=None)
# Calculate the logits and loss.
out = x
outputs = mtf.layers.dense(
out, hidden_dim,
reduced_dims=out.shape.dims[-5:],
activation=mtf.relu, name="dense")
# We assume fixed vocab size for targets
labels = tf.squeeze(tf.to_int32(features["targets"]), [2, 3])
labels = mtf.import_tf_tensor(
mesh, tf.reshape(labels, [hparams.batch_size]), mtf.Shape([batch_dim]))
logits = mtf.layers.dense(outputs, classes_dim, name="logits")
soft_targets = mtf.one_hot(labels, classes_dim, dtype=activation_dtype)
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, soft_targets, classes_dim)
# Reshape logits so it doesn't break inside t2t.
logits = mtf.reshape(
logits,
mtf.Shape([batch_dim, one_channel_dim, classes_dim]))
loss = mtf.reduce_mean(loss)
return logits, loss
@registry.register_hparams
def mtf_resnet_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 32
hparams.max_length = 3072
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "batch:8")
hparams.add_hparam("layout", "batch:batch")
hparams.add_hparam("filter_size", 1024)
hparams.add_hparam("num_layers", 6)
# Share weights between input and target embeddings
hparams.shared_embedding = True
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 32)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("row_blocks", 1)
hparams.add_hparam("col_blocks", 1)
hparams.add_hparam("rows_size", 32)
hparams.add_hparam("cols_size", 32)
# Model-specific parameters
hparams.add_hparam("layer_sizes", [3, 4, 6, 3])
hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512])
hparams.add_hparam("is_cifar", False)
# Variable init
hparams.initializer = "normal_unit_scaling"
hparams.initializer_gain = 2.
# TODO(nikip): Change optimization scheme?
hparams.learning_rate = 0.1
return hparams
@registry.register_hparams
def mtf_resnet_tiny():
"""Catch bugs locally..."""
hparams = mtf_resnet_base()
hparams.num_layers = 2
hparams.hidden_size = 64
hparams.filter_size = 64
hparams.batch_size = 16
# data parallelism and model-parallelism
hparams.col_blocks = 1
hparams.mesh_shape = "batch:2"
hparams.layout = "batch:batch"
hparams.layer_sizes = [1, 2, 3]
hparams.filter_sizes = [64, 64, 64]
return hparams
@registry.register_hparams
def mtf_resnet_single():
"""Small single parameters."""
hparams = mtf_resnet_tiny()
hparams.mesh_shape = ""
hparams.layout = ""
hparams.hidden_size = 32
hparams.filter_size = 32
hparams.batch_size = 1
hparams.num_encoder_layers = 1
hparams.num_layers = 1
hparams.block_length = 16
return hparams
@registry.register_hparams
def mtf_resnet_base_single():
"""Small single parameters."""
hparams = mtf_resnet_base()
hparams.num_layers = 6
hparams.filter_size = 256
hparams.block_length = 128
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
@registry.register_hparams
def mtf_resnet_base_cifar():
"""Data parallel CIFAR parameters."""
hparams = mtf_resnet_base()
hparams.mesh_shape = "batch:32"
hparams.layoyt = "batch:batch"
hparams.batch_size = 8
hparams.num_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams
| 32.57277
| 79
| 0.679374
|
794efd079558e810e6448e15461f656d62951170
| 1,872
|
py
|
Python
|
connector/mysql.py
|
BranKein/Practice-for-Transporting-Image-by-Flask
|
22b4185ef3329a1c3408bd634dd85cc5e31edd0f
|
[
"MIT"
] | null | null | null |
connector/mysql.py
|
BranKein/Practice-for-Transporting-Image-by-Flask
|
22b4185ef3329a1c3408bd634dd85cc5e31edd0f
|
[
"MIT"
] | null | null | null |
connector/mysql.py
|
BranKein/Practice-for-Transporting-Image-by-Flask
|
22b4185ef3329a1c3408bd634dd85cc5e31edd0f
|
[
"MIT"
] | null | null | null |
from config.mysql import host, port, username, password, database
import pymysql
class Transaction(object):
def __init__(self, sql):
self.sql = sql
def start(self, level='REPEATABLE READ'):
self.sql.query("SET AUTOCOMMIT=FALSE")
self.sql.query(f"SET SESSION TRANSACTION ISOLATION LEVEL {level}")
self.sql.query("SET innodb_lock_wait_timeout = 10")
self.sql.query("START TRANSACTION")
def commit(self):
self.sql.query("COMMIT")
self.sql.query("SET AUTOCOMMIT=TRUE")
def rollback(self):
self.sql.query("ROLLBACK")
self.sql.query("SET AUTOCOMMIT=TRUE")
class MySQL(object):
def __init__(self, dict_cursor=False):
try:
self.__connInfo = pymysql.connect(host=host, port=port, user=username,
password=password, db=database, charset='utf8')
except Exception as e:
self.__cursor = None
raise e
else:
if dict_cursor:
self.__cursor = self.__connInfo.cursor(pymysql.cursors.DictCursor)
else:
self.__cursor = self.__connInfo.cursor()
self.query("SET AUTOCOMMIT=TRUE")
self.query("SET TIME_ZONE='+09:00'")
return
def __del__(self):
try:
self.__connInfo.close()
except:
return False
else:
return True
def query(self, query_string, *args):
if query_string is None:
return None
if self.__cursor is None:
return None
self.__cursor.execute(query_string, *args)
return self.__cursor.fetchall()
def escape(self, string):
return self.__connInfo.escape(string)
@property
def transaction(self):
return Transaction(self)
__all__ = ['MySQL']
| 28.363636
| 93
| 0.586538
|
794efd88c4c591016cc9be45d1d45757d02b47ff
| 4,887
|
py
|
Python
|
third_party/Paste/paste/translogger.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
third_party/Paste/paste/translogger.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
third_party/Paste/paste/translogger.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Middleware for logging requests, using Apache combined log format
"""
import logging
import six
import time
from six.moves.urllib.parse import quote
class TransLogger(object):
"""
This logging middleware will log all requests as they go through.
They are, by default, sent to a logger named ``'wsgi'`` at the
INFO level.
If ``setup_console_handler`` is true, then messages for the named
logger will be sent to the console.
"""
format = ('%(REMOTE_ADDR)s - %(REMOTE_USER)s [%(time)s] '
'"%(REQUEST_METHOD)s %(REQUEST_URI)s %(HTTP_VERSION)s" '
'%(status)s %(bytes)s "%(HTTP_REFERER)s" "%(HTTP_USER_AGENT)s"')
def __init__(self, application,
logger=None,
format=None,
logging_level=logging.INFO,
logger_name='wsgi',
setup_console_handler=True,
set_logger_level=logging.DEBUG):
if format is not None:
self.format = format
self.application = application
self.logging_level = logging_level
self.logger_name = logger_name
if logger is None:
self.logger = logging.getLogger(self.logger_name)
if setup_console_handler:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# We need to control the exact format:
console.setFormatter(logging.Formatter('%(message)s'))
self.logger.addHandler(console)
self.logger.propagate = False
if set_logger_level is not None:
self.logger.setLevel(set_logger_level)
else:
self.logger = logger
def __call__(self, environ, start_response):
start = time.localtime()
req_uri = quote(environ.get('SCRIPT_NAME', '')
+ environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
req_uri += '?'+environ['QUERY_STRING']
method = environ['REQUEST_METHOD']
def replacement_start_response(status, headers, exc_info=None):
# @@: Ideally we would count the bytes going by if no
# content-length header was provided; but that does add
# some overhead, so at least for now we'll be lazy.
bytes = None
for name, value in headers:
if name.lower() == 'content-length':
bytes = value
self.write_log(environ, method, req_uri, start, status, bytes)
return start_response(status, headers)
return self.application(environ, replacement_start_response)
def write_log(self, environ, method, req_uri, start, status, bytes):
if bytes is None:
bytes = '-'
if time.daylight:
offset = time.altzone / 60 / 60 * -100
else:
offset = time.timezone / 60 / 60 * -100
if offset >= 0:
offset = "+%0.4d" % (offset)
elif offset < 0:
offset = "%0.4d" % (offset)
remote_addr = '-'
if environ.get('HTTP_X_FORWARDED_FOR'):
remote_addr = environ['HTTP_X_FORWARDED_FOR']
elif environ.get('REMOTE_ADDR'):
remote_addr = environ['REMOTE_ADDR']
d = {
'REMOTE_ADDR': remote_addr,
'REMOTE_USER': environ.get('REMOTE_USER') or '-',
'REQUEST_METHOD': method,
'REQUEST_URI': req_uri,
'HTTP_VERSION': environ.get('SERVER_PROTOCOL'),
'time': time.strftime('%d/%b/%Y:%H:%M:%S ', start) + offset,
'status': status.split(None, 1)[0],
'bytes': bytes,
'HTTP_REFERER': environ.get('HTTP_REFERER', '-'),
'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'),
}
message = self.format % d
self.logger.log(self.logging_level, message)
def make_filter(
app, global_conf,
logger_name='wsgi',
format=None,
logging_level=logging.INFO,
setup_console_handler=True,
set_logger_level=logging.DEBUG):
from paste.util.converters import asbool
if isinstance(logging_level, (six.binary_type, six.text_type)):
logging_level = logging._levelNames[logging_level]
if isinstance(set_logger_level, (six.binary_type, six.text_type)):
set_logger_level = logging._levelNames[set_logger_level]
return TransLogger(
app,
format=format or None,
logging_level=logging_level,
logger_name=logger_name,
setup_console_handler=asbool(setup_console_handler),
set_logger_level=set_logger_level)
make_filter.__doc__ = TransLogger.__doc__
| 39.731707
| 84
| 0.600982
|
794efd8d9f55c7a682c977d933372fde3793b48a
| 1,755
|
bzl
|
Python
|
build/rust_toolchains.bzl
|
jmillikin/rust-fuse
|
17ea55d66d171b44877fa1610365ea9e459be2a2
|
[
"Apache-2.0"
] | 17
|
2020-08-07T10:36:45.000Z
|
2022-02-07T07:18:39.000Z
|
build/rust_toolchains.bzl
|
jmillikin/rust-fuse
|
17ea55d66d171b44877fa1610365ea9e459be2a2
|
[
"Apache-2.0"
] | 11
|
2020-08-02T04:47:38.000Z
|
2020-08-19T06:40:02.000Z
|
build/rust_toolchains.bzl
|
jmillikin/rust-fuse
|
17ea55d66d171b44877fa1610365ea9e459be2a2
|
[
"Apache-2.0"
] | 3
|
2021-02-28T03:58:44.000Z
|
2022-02-07T07:18:42.000Z
|
load("@io_bazel_rules_rust//rust:repositories.bzl", "rust_repository_set")
_NIGHTLY_DATE = "2020-12-30"
_SHA256S = {
"2020-12-30/rust-nightly-x86_64-apple-darwin": "2b5b885694d0d1a9bdd0473d9e2df1f2c6eac88986e3135e6573e1d71e7824dc",
"2020-12-30/llvm-tools-nightly-x86_64-apple-darwin": "8aca7ddf73983bf2db4846721787547fed16c2ad4dc5c260f7f05f6b93cea8e7",
"2020-12-30/rust-std-nightly-x86_64-apple-darwin": "17912a6a5aa56daeb0aed5fca8698bacc54950351d9f91989a524588e37e41ca",
"2020-12-30/rust-std-nightly-armv7-unknown-linux-musleabihf": "c7176fe7fccd6ab71535ce1abf81ab71c8cfdffbaa0f51f71d1d13b7f4526f22",
"2020-12-30/rust-std-nightly-x86_64-unknown-linux-musl": "3802d2c7271cdd3fc35921b0d9f999b9b34ac9d888b62085b976453a8b113700",
}
def _rust_repository_set(**kwargs):
rust_repository_set(
edition = "2018",
iso_date = _NIGHTLY_DATE,
rustfmt_version = "1.4.20",
sha256s = _SHA256S,
version = "nightly",
**kwargs
)
def rust_toolchains():
_rust_repository_set(
name = "rustc_armv7-unknown-linux-musleabihf_{}".format(_NIGHTLY_DATE),
exec_triple = "armv7-unknown-linux-musleabihf",
extra_target_triples = [
"x86_64-unknown-linux-musl",
],
)
_rust_repository_set(
name = "rustc_x86_64-unknown-linux-musl_{}".format(_NIGHTLY_DATE),
exec_triple = "x86_64-unknown-linux-musl",
extra_target_triples = [
"armv7-unknown-linux-musleabihf",
],
)
_rust_repository_set(
name = "rustc_x86_64-apple-darwin_{}".format(_NIGHTLY_DATE),
exec_triple = "x86_64-apple-darwin",
extra_target_triples = [
"armv7-unknown-linux-musleabihf",
"x86_64-unknown-linux-musl",
"x86_64-unknown-freebsd",
],
)
| 37.340426
| 133
| 0.720228
|
794eff0da7b004c1e15458d9bef4c8ce05f54654
| 4,169
|
py
|
Python
|
app/service/auth_svc.py
|
IGchra/caldera
|
75f5a9c3f63139f8f3c6ee6e7cb4ce094e82b1b9
|
[
"Apache-2.0"
] | 1
|
2020-01-27T00:48:03.000Z
|
2020-01-27T00:48:03.000Z
|
app/service/auth_svc.py
|
EmilioPanti/caldera
|
e743703b11883c08d415650e84bbc43bf6d9e9b7
|
[
"Apache-2.0"
] | null | null | null |
app/service/auth_svc.py
|
EmilioPanti/caldera
|
e743703b11883c08d415650e84bbc43bf6d9e9b7
|
[
"Apache-2.0"
] | null | null | null |
import base64
from collections import namedtuple
from aiohttp import web
from aiohttp.web_exceptions import HTTPUnauthorized, HTTPForbidden
from aiohttp_security import SessionIdentityPolicy, check_permission, remember, forget
from aiohttp_security import setup as setup_security
from aiohttp_security.abc import AbstractAuthorizationPolicy
from aiohttp_session import setup as setup_session
from aiohttp_session.cookie_storage import EncryptedCookieStorage
from cryptography import fernet
from app.utility.base_service import BaseService
def check_authorization(func):
async def process(func, *args, **params):
return await func(*args, **params)
async def helper(*args, **params):
await args[0].auth_svc.check_permissions(args[1])
result = await process(func, *args, **params)
return result
return helper
class AuthService(BaseService):
User = namedtuple('User', ['username', 'password', 'permissions'])
def __init__(self, api_key):
self.api_key = api_key
self.user_map = dict()
self.log = self.add_service('auth_svc', self)
async def apply(self, app, users):
"""
Set up security on server boot
:param app:
:param users:
:return: None
"""
for k, v in users.items():
self.user_map[k] = self.User(k, v, ('admin', 'user'),)
app.user_map = self.user_map
fernet_key = fernet.Fernet.generate_key()
secret_key = base64.urlsafe_b64decode(fernet_key)
storage = EncryptedCookieStorage(secret_key, cookie_name='API_SESSION')
setup_session(app, storage)
policy = SessionIdentityPolicy()
setup_security(app, policy, DictionaryAuthorizationPolicy(self.user_map))
@staticmethod
async def logout_user(request):
"""
Log the user out
:param request:
:return: None
"""
await forget(request, web.Response())
raise web.HTTPFound('/login')
async def login_user(self, request):
"""
Log a user in and save the session
:param request:
:return: the response/location of where the user is trying to navigate
"""
data = await request.post()
response = web.HTTPFound('/')
verified = await self._check_credentials(
request.app.user_map, data.get('username'), data.get('password'))
if verified:
await remember(request, response, data.get('username'))
return response
raise web.HTTPFound('/login')
async def check_permissions(self, request):
"""
Check if a request is allowed based on the user permissions
:param request:
:return: None
"""
try:
if request.headers.get('API_KEY') == self.api_key:
return True
elif 'localhost:' in request.host:
return True
await check_permission(request, 'admin')
except (HTTPUnauthorized, HTTPForbidden):
raise web.HTTPFound('/login')
""" PRIVATE """
async def _check_credentials(self, user_map, username, password):
self.log.debug('%s logging in' % username)
user = user_map.get(username)
if not user:
return False
return user.password == password
class DictionaryAuthorizationPolicy(AbstractAuthorizationPolicy):
def __init__(self, user_map):
super().__init__()
self.user_map = user_map
async def authorized_userid(self, identity):
"""Retrieve authorized user id.
Return the user_id of the user identified by the identity
or 'None' if no user exists related to the identity.
"""
if identity in self.user_map:
return identity
async def permits(self, identity, permission, context=None):
"""Check user permissions.
Return True if the identity is allowed the permission in the
current context, else return False.
"""
user = self.user_map.get(identity)
if not user:
return False
return permission in user.permissions
| 32.069231
| 86
| 0.6426
|
794f001d000687700b3233986b0bdb66d772c252
| 1,919
|
py
|
Python
|
tareas/2/ArvizuLuis/Asesorias_Luis.py
|
Miguelp-rez/sistop-2019-2
|
428444217ba0cc98030a9d84d8b415dcddad9b65
|
[
"CC-BY-4.0"
] | null | null | null |
tareas/2/ArvizuLuis/Asesorias_Luis.py
|
Miguelp-rez/sistop-2019-2
|
428444217ba0cc98030a9d84d8b415dcddad9b65
|
[
"CC-BY-4.0"
] | null | null | null |
tareas/2/ArvizuLuis/Asesorias_Luis.py
|
Miguelp-rez/sistop-2019-2
|
428444217ba0cc98030a9d84d8b415dcddad9b65
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import threading
global alumno
alumno = 0 #contador para el alumno
preguntas = 0 # ontador para las preguntas
max_preguntas = 3 #maximo de preguntas realizadas por cada alumno
max_alumnos = 5 #maximo de alumnos aceptados en el cubiculo
mutex = threading.Semaphore (1)
profesor = threading.Semaphore(0)
def Asesoria(): #Profesor resuelve dudas a los alumnos
time.sleep (1)
print ' ---- Profesor resolviendo dudas a Alumnos ---- '
def Pregunta(): #Los alumnos hacen sus 3 preguntas al profesor
global preguntas
global alumno
print ' \nAlumno preguntando '
print 'pregunta = 1'
preguntas += 1
print 'pregunta = 2'
preguntas += 1
print 'pregunta = 3'
preguntas += 1
if preguntas == max_preguntas: #Si el alumno hace sus 3 preguntas
print '\nSale el Alumno' #El alumno sale
alumno -= 1 #DEja libre un espacio en el cubiculo
print ' \nQuedan %d' %(alumno) + ' alumnos'
if alumno >= 5:
alumno ==-5
print 'Quedo vacio el cubiculo \n'
print 'El profesor duerme... \n'
def Profesor():
global alumno
global preguntas
while True:
profesor.acquire() #El profesor despierta para dar las asesorias
print '\n Profesor Despierto!' #mensaje que indica que el profesor desperto
mutex.acquire()
Pregunta() #los alumnos preguntan
mutex.release()
def Alumno():
global alumno
global preguntas
mutex.acquire()
alumno += 1 #Añade a los alumnos que van llegandop al cubiculo
print 'Llega un Alumno \n Total de Alumnos = %d' %(alumno)
if alumno == max_alumnos: #Si son 5 alumnos, el profesor despierta
profesor.release()
print '\t\n 5 Alumnos... \n\n' + 'Despertando al profesor...'
Asesoria() #inicia la asesoria
Pregunta() #los alumnos preguntan
mutex.release()
threading.Thread(target = Profesor, args = []).start()
while True:
threading.Thread(target = Alumno, args = []).start()
time.sleep(1)
| 24.602564
| 77
| 0.702449
|
794f010f3ab22c60fe35b9e00963f2bc4945c0d8
| 392
|
py
|
Python
|
database/migrations/0009_auto_20191227_1558.py
|
r0somak/inzynierka
|
9306f19fcce9ff1dbb3c7e47874d963769af7508
|
[
"MIT"
] | 2
|
2020-01-08T11:42:08.000Z
|
2020-01-08T12:37:50.000Z
|
database/migrations/0009_auto_20191227_1558.py
|
r0somak/inzynierka
|
9306f19fcce9ff1dbb3c7e47874d963769af7508
|
[
"MIT"
] | 10
|
2021-03-09T23:21:46.000Z
|
2022-02-26T20:12:59.000Z
|
database/migrations/0009_auto_20191227_1558.py
|
r0somak/inzynierka
|
9306f19fcce9ff1dbb3c7e47874d963769af7508
|
[
"MIT"
] | 1
|
2020-02-17T08:27:09.000Z
|
2020-02-17T08:27:09.000Z
|
# Generated by Django 3.0 on 2019-12-27 15:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('database', '0008_auto_20191227_1302'),
]
operations = [
migrations.AlterModelOptions(
name='daneepidemiologiczne',
options={'ordering': ('data', 'dane_statystyczne__wojewodztwo')},
),
]
| 21.777778
| 77
| 0.630102
|
794f01f0e714722648f3cf9d4e2f18fa4b309235
| 496
|
py
|
Python
|
plotly/validators/scatterpolar/selected/marker/_color.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scatterpolar/selected/marker/_color.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scatterpolar/selected/marker/_color.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='scatterpolar.selected.marker',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 26.105263
| 66
| 0.608871
|
794f0316b5c80f41d7e861646129285de950a733
| 1,052
|
py
|
Python
|
tests/setup_data.py
|
dmlerner/ynabassistant
|
8482a0cffb38dcb59c7d1ce00a524e07707e3ec1
|
[
"MIT"
] | 19
|
2019-12-23T14:35:37.000Z
|
2021-04-07T03:37:40.000Z
|
tests/setup_data.py
|
jhult/assistantforynab
|
96a213c19f08190a2d352a8759f75f761a9a6089
|
[
"MIT"
] | 1
|
2019-12-20T21:27:51.000Z
|
2019-12-20T21:27:51.000Z
|
tests/setup_data.py
|
jhult/assistantforynab
|
96a213c19f08190a2d352a8759f75f761a9a6089
|
[
"MIT"
] | 4
|
2020-01-10T14:38:08.000Z
|
2021-05-09T00:15:50.000Z
|
from assistantforynab.ynab import ynab
from assistantforynab import settings, utils
from assistantforynab.assistant import Assistant
'''
Restore state for start of tests:
Test data has unlabeled/unsplit
Annotated has labeled/split
Chase Amazon is empty
No other accounts
'''
def delete_extra_accounts():
whitelist = list(map(Assistant.accounts.by_name, ('Test Data', 'Annotated')))
utils.log_info('WARNING: THIS WILL DELETE ALL ACCOUNTS EXCEPT %s!!!' % whitelist)
confirm = input('Type "confirm" to confirm') == 'confirm'
if not confirm:
return
to_delete = filter(lambda a: a not in whitelist, Assistant.accounts)
ynab.queue_delete_accounts(to_delete)
return whitelist
def main():
ynab.api_client.init()
Assistant.download_ynab(accounts=True, transactions=True)
test_data, annotated = delete_extra_accounts()
ynab.do()
Assistant.download_ynab(accounts=True)
ynab.queue_clone_account(test_data, settings.account_name)
ynab.do()
if __name__ == '__main__':
main()
| 28.432432
| 85
| 0.726236
|
794f0353dbbc851cc1a50b03c6cf6a59d763e18b
| 9,733
|
py
|
Python
|
prody/trajectory/trajectory.py
|
RuJinlong/ProDy
|
cf23b96f147321e365b2c5fd2a527018c1babb67
|
[
"MIT"
] | 1
|
2021-06-20T11:53:09.000Z
|
2021-06-20T11:53:09.000Z
|
prody/trajectory/trajectory.py
|
rujinlong/ProDy
|
cf23b96f147321e365b2c5fd2a527018c1babb67
|
[
"MIT"
] | null | null | null |
prody/trajectory/trajectory.py
|
rujinlong/ProDy
|
cf23b96f147321e365b2c5fd2a527018c1babb67
|
[
"MIT"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
# -*- coding: utf-8 -*-
"""This module defines a class for handling multiple trajectories."""
import os.path
import numpy as np
from .trajbase import TrajBase
from .frame import Frame
from prody.trajectory import openTrajFile
__all__ = ['Trajectory']
class Trajectory(TrajBase):
"""A class for handling trajectories in multiple files."""
def __init__(self, name, **kwargs):
"""Trajectory can be instantiated with a *name* or a filename. When
name is a valid path to a trajectory file it will be opened for
reading."""
TrajBase.__init__(self, name)
self._trajectory = None
self._trajectories = []
self._filenames = set()
self._n_files = 0
self._cfi = 0 # current file index
assert 'mode' not in kwargs, 'mode is an invalid keyword argument'
self._kwargs = kwargs
if os.path.isfile(name):
self.addFile(name)
def __repr__(self):
if self._closed:
return '<Trajectory: {0} (closed)>'.format(self._title)
link = ''
if self._ag is not None:
link = 'linked to ' + str(self._ag) + '; '
files = ''
if self._n_files > 1:
files = '{0} files; '.format(self._n_files)
next = 'next {0} of {1} frames; '.format(self._nfi, self._n_csets)
if self._indices is None:
atoms = '{0} atoms'.format(self._n_atoms)
else:
atoms = 'selected {0} of {1} atoms'.format(
self.numSelected(), self._n_atoms)
return '<Trajectory: {0} ({1}{2}{3}{4})>'.format(
self._title, link, files, next, atoms)
def _nextFile(self):
self._cfi += 1
if self._cfi < self._n_files:
self._trajectory = self._trajectories[self._cfi]
if self._trajectory.nextIndex() > 0:
self._trajectory.reset()
def _gotoFile(self, i):
if i < self._n_files:
self._cfi = i
self._trajectory = self._trajectories[i]
if self._trajectory.nextIndex() > 0:
self._trajectory.reset()
def setAtoms(self, atoms):
for traj in self._trajectories:
traj.setAtoms(atoms)
TrajBase.setAtoms(self, atoms)
setAtoms.__doc__ = TrajBase.setAtoms.__doc__
def link(self, *ag):
if ag:
TrajBase.link(self, *ag)
for traj in self._trajectories:
traj.link(*ag)
else:
return self._ag
link.__doc__ = TrajBase.link.__doc__
def addFile(self, filename, **kwargs):
"""Add a file to the trajectory instance. Currently only DCD files
are supported."""
if not isinstance(filename, str):
raise ValueError('filename must be a string')
if os.path.abspath(filename) in self._filenames:
raise IOError('{0} is already added to the trajectory'
.format(filename))
assert 'mode' not in kwargs, 'mode is an invalid keyword argument'
traj = openTrajFile(filename, **(kwargs or self._kwargs))
n_atoms = self._n_atoms
if n_atoms != 0 and n_atoms != traj.numAtoms():
raise IOError('{0} must have same number of atoms as '
'previously loaded files'.format(traj.getTitle()))
if self._n_files == 0:
self._trajectory = traj
self._title = traj.getTitle()
if n_atoms == 0:
self._n_atoms = traj.numAtoms()
self._coords = traj._coords
self._trajectories.append(traj)
self._n_csets += traj.numFrames()
self._n_files += 1
if self._ag is not None:
traj.setAtoms(self._ag)
def numFiles(self):
"""Return number of open trajectory files."""
return self._n_files
def getFilenames(self, absolute=False):
"""Return list of filenames opened for reading."""
return [traj.getFilename(absolute) for traj in self._trajectories]
def getFrame(self, index):
if self._closed:
raise ValueError('I/O operation on closed file')
self.goto(index)
return next(self)
def getCoordsets(self, indices=None):
if self._closed:
raise ValueError('I/O operation on closed file')
if indices is None:
indices = np.arange(self._n_csets)
elif isinstance(indices, int):
indices = np.array([indices])
elif isinstance(indices, slice):
indices = np.arange(*indices.indices(self._n_csets))
indices.sort()
elif isinstance(indices, (list, np.ndarray)):
indices = np.unique(indices)
else:
raise TypeError('indices must be an integer or a list of '
'integers')
nfi = self._nfi
self.reset()
coords = np.zeros((len(indices), self.numSelected(), 3),
self._trajectories[0]._dtype)
prev = 0
next = self.nextCoordset
for i, index in enumerate(indices):
diff = index - prev
if diff > 1:
self.skip(diff)
coords[i] = next()
prev = index
self.goto(nfi)
return coords
getCoordsets.__doc__ = TrajBase.getCoordsets.__doc__
def __next__(self):
if self._closed:
raise ValueError('I/O operation on closed file')
nfi = self._nfi
if nfi < self._n_csets:
traj = self._trajectory
while traj._nfi == traj._n_csets:
self._nextFile()
traj = self._trajectory
unitcell = traj._nextUnitcell()
coords = traj._nextCoordset()
if self._ag is None:
frame = Frame(self, nfi, coords, unitcell)
else:
frame = self._frame
Frame.__init__(frame, self, nfi, None, unitcell)
self._ag.setACSLabel(self._title + ' frame ' + str(self._nfi))
self._nfi += 1
return frame
__next__.__doc__ = TrajBase.__next__.__doc__
next = __next__
def nextCoordset(self):
if self._closed:
raise ValueError('I/O operation on closed file')
if self._nfi < self._n_csets:
traj = self._trajectory
while traj._nfi == traj._n_csets:
self._nextFile()
traj = self._trajectory
if self._ag is not None:
self._ag.setACSLabel(self._title + ' frame ' + str(self._nfi))
traj = self._trajectory
self._nfi += 1
#if self._indices is None:
return traj.nextCoordset()
#else:
# return traj.nextCoordset()[self._indices]
nextCoordset.__doc__ = TrajBase.nextCoordset.__doc__
def goto(self, n):
if self._closed:
raise ValueError('I/O operation on closed file')
if not isinstance(n, int):
raise ValueError('n must be an integer')
n_csets = self._n_csets
if n == 0:
self.reset()
else:
if n < 0:
n = n_csets + n
if n < 0:
n = 0
elif n > n_csets:
n = n_csets
nfi = n
for which, traj in enumerate(self._trajectories):
if traj._n_csets >= nfi:
break
else:
nfi -= traj._n_csets
self._gotoFile(which)
self._trajectory.goto(nfi)
self._nfi = n
goto.__doc__ = TrajBase.goto.__doc__
def skip(self, n):
if self._closed:
raise ValueError('I/O operation on closed file')
if not isinstance(n, int):
raise ValueError('n must be an integer')
left = self._n_csets - self._nfi
if n > left:
n = left
while self._nfi < self._n_csets and n > 0:
traj = self._trajectory
skip = min(n, traj.numFrames() - traj.nextIndex())
traj.skip(skip)
if n > skip:
self._nextFile()
self._nfi += skip
n -= skip
skip.__doc__ = TrajBase.skip.__doc__
def reset(self):
if self._closed:
raise ValueError('I/O operation on closed file')
if self._trajectories:
for traj in self._trajectories:
traj.reset()
self._trajectory = self._trajectories[0]
self._cfi = 0
self._nfi = 0
reset.__doc__ = TrajBase.reset.__doc__
def close(self):
for traj in self._trajectories:
traj.close()
self._closed = True
close.__doc__ = TrajBase.close.__doc__
def hasUnitcell(self):
return np.all([traj.hasUnitcell() for traj in self._trajectories])
hasUnitcell.__doc__ = TrajBase.hasUnitcell.__doc__
def getTimestep(self):
"""Return list of timestep sizes, one number from each file."""
return [traj.getTimestep() for traj in self._trajectories]
def getFirstTimestep(self):
"""Return list of first timestep values, one number from each file."""
return [traj.getFirstTimestep() for traj in self._trajectories]
def getFrameFreq(self):
"""Return list of timesteps between frames, one number from each file.
"""
return [traj.getFrameFreq() for traj in self._trajectories]
def numFixed(self):
"""Return a list of fixed atom numbers, one from each file."""
return [traj.numFixed() for traj in self._trajectories]
| 31.095847
| 78
| 0.556355
|
794f0455ba734ec36e78e4619db2691e89c803fd
| 148
|
py
|
Python
|
umlawuli/apps.py
|
mrdvince/dgenie
|
0a901409a9c547d27212e330ad4fce31dc24de83
|
[
"Apache-2.0"
] | null | null | null |
umlawuli/apps.py
|
mrdvince/dgenie
|
0a901409a9c547d27212e330ad4fce31dc24de83
|
[
"Apache-2.0"
] | null | null | null |
umlawuli/apps.py
|
mrdvince/dgenie
|
0a901409a9c547d27212e330ad4fce31dc24de83
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class UmlawuliConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "umlawuli"
| 21.142857
| 56
| 0.763514
|
794f04e9fc3125e7cd6258b206f06f92be085308
| 596
|
py
|
Python
|
tests/test_kelly_criterion.py
|
leki75/kelly-criterion
|
15762531a241e8c6be876b3a72fad49d760b0b22
|
[
"BSD-3-Clause"
] | 58
|
2015-08-13T05:10:19.000Z
|
2022-01-18T15:15:09.000Z
|
tests/test_kelly_criterion.py
|
leki75/kelly-criterion
|
15762531a241e8c6be876b3a72fad49d760b0b22
|
[
"BSD-3-Clause"
] | 2
|
2019-06-13T20:23:37.000Z
|
2021-06-01T23:23:41.000Z
|
tests/test_kelly_criterion.py
|
leki75/kelly-criterion
|
15762531a241e8c6be876b3a72fad49d760b0b22
|
[
"BSD-3-Clause"
] | 25
|
2015-08-13T05:10:23.000Z
|
2022-01-27T11:32:52.000Z
|
from datetime import date
from pytest import approx
from kelly_criterion import calc_kelly_leverages
def test_kelly_criterion():
# Given a time period and multiple securities
start_date = date(2018, 1, 1)
end_date = date(2018, 12, 31)
securities = {'AAPL', 'IBM'}
# When we calculate kelly leverages
actual_leverages = calc_kelly_leverages(securities, start_date, end_date)
# Then the calculated leverages should match the actual ones
expected_leverages = {'AAPL': 1.2944, 'IBM': -5.2150}
assert expected_leverages == approx(actual_leverages, rel=1e-3)
| 28.380952
| 77
| 0.731544
|
794f054d621cdb1e5f002a2a601922c62b74f4cb
| 154
|
py
|
Python
|
Lib/site-packages/nclib/errors.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 57
|
2016-09-15T15:21:38.000Z
|
2022-02-23T03:48:05.000Z
|
nclib/errors.py
|
rhelmot/nclib
|
f18bedac43c2741b06a4e7c3c992b56ea4a2e5d9
|
[
"MIT"
] | 20
|
2016-12-15T21:50:25.000Z
|
2022-01-27T22:27:50.000Z
|
Lib/site-packages/nclib/errors.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 11
|
2016-09-16T06:23:23.000Z
|
2020-07-03T19:24:31.000Z
|
import socket
class NetcatError(Exception):
pass
class NetcatTimeout(NetcatError, socket.timeout):
pass
class NetcatEOF(NetcatError):
pass
| 14
| 49
| 0.753247
|
794f05da8887761cc44bcc6552e37991eb737ce6
| 2,105
|
py
|
Python
|
tests/rw_all.py
|
killvxk/retrowrite
|
879bc668125f096914129e991fe5855c8f548516
|
[
"MIT"
] | 1
|
2021-08-07T15:14:04.000Z
|
2021-08-07T15:14:04.000Z
|
tests/rw_all.py
|
killvxk/retrowrite
|
879bc668125f096914129e991fe5855c8f548516
|
[
"MIT"
] | null | null | null |
tests/rw_all.py
|
killvxk/retrowrite
|
879bc668125f096914129e991fe5855c8f548516
|
[
"MIT"
] | null | null | null |
import argparse
import json
import subprocess
import os
from multiprocessing import Pool
def do_test(cmd):
print("[!] Running on {}".format(cmd))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError:
print("[x] Failed {}".format(cmd))
def do_tests(tests, filter, args, outdir):
assert not (args.ddbg and args.parallel)
pool = Pool()
for test in tests:
if not filter(test):
continue
path = test["path"]
binp = os.path.join(path, test["name"])
outp = os.path.join(outdir, test["name"] + ".s")
if args.ddbg:
outp = os.path.join(outdir, test["name"] + "_asan")
cmd = "python -m debug.ddbg {} {}".format(binp, outp)
elif args.asan:
outp = os.path.join(outdir, test["name"] + "_asan")
cmd = "python -m rwtools.asan.asantool {} {}".format(binp, outp)
else:
cmd = "python -m librw.rw {} {}".format(binp, outp)
if args.parallel:
pool.apply_async(do_test, args=(cmd, ))
else:
do_test(cmd)
pool.close()
pool.join()
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument("test_file", type=str, help="JSON file containing tests")
argp.add_argument(
"--targets",
type=str,
help="Only test build target, comma separated string of names")
argp.add_argument(
"--asan",
action='store_true',
help="Instrument with asan")
argp.add_argument(
"--ddbg",
action='store_true',
help="Do delta debugging")
argp.add_argument(
"--parallel",
action='store_true',
help="Do multiple tests in parallel")
args = argp.parse_args()
filter = lambda x: True
if args.targets:
filter = lambda x: x["name"] in args.targets.split(",")
args.testfile = os.path.abspath(args.test_file)
outdir = os.path.dirname(args.test_file)
with open(args.test_file) as tfd:
do_tests(json.load(tfd), filter, args, outdir)
| 27.337662
| 79
| 0.585748
|
794f06b00b5acea7f3b88571c0a2062e631bd492
| 2,312
|
py
|
Python
|
reactor_master/decorators.py
|
AdamTheAnalyst/openc2-reactor-master
|
a2688c7e7ea659c100f0093fa3d8403a95b2dc42
|
[
"MIT"
] | null | null | null |
reactor_master/decorators.py
|
AdamTheAnalyst/openc2-reactor-master
|
a2688c7e7ea659c100f0093fa3d8403a95b2dc42
|
[
"MIT"
] | null | null | null |
reactor_master/decorators.py
|
AdamTheAnalyst/openc2-reactor-master
|
a2688c7e7ea659c100f0093fa3d8403a95b2dc42
|
[
"MIT"
] | 1
|
2020-04-16T12:35:58.000Z
|
2020-04-16T12:35:58.000Z
|
#####################################################
#__________ __ #
#\______ \ ____ _____ _____/ |_ ___________ #
# | _// __ \\__ \ _/ ___\ __\/ _ \_ __ \ #
# | | \ ___/ / __ \\ \___| | ( <_> ) | \/ #
# |____|_ /\___ >____ /\___ >__| \____/|__| #
# \/ \/ \/ \/ #
#####################################################
import base64
from reactor_master.models import OpenC2Action
from functools import wraps
from django.contrib.auth import authenticate, login
from django.http import HttpResponseForbidden
# Logging
import logging
logger = logging.getLogger("console")
def openc2_action(target_list, actuator_list=None):
"""
Decorator for OpenC2 target and actuator types.
"""
def register(function):
name = function.__name__
current_def = function.__globals__.get(name)
if current_def is None:
current_def = OpenC2Action(name)
# Generate all signatures
for target in target_list:
if actuator_list:
for actuator in actuator_list:
sig = {"action":name,"target":target,"actuator":actuator}
logger.info("Registered %s name with signature %s" % (name,sig))
current_def.register(sig, function)
else:
sig = {"action":name,"target":target}
logger.info("Registered %s name with signature %s" % (name,sig))
current_def.register(sig, function)
return current_def
return register
def http_basic_auth(func):
"""
Use as a decorator for views that need to perform HTTP basic
authorisation.
"""
@wraps(func)
def _decorator(request, *args, **kwargs):
if request.META.has_key('HTTP_AUTHORIZATION'):
try:
authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
username, password = auth.split(':', 1)
user = authenticate(username=username, password=password)
if user:
login(request, user)
else:
return HttpResponseForbidden()
except ValueError:
# Bad HTTP_AUTHORIZATION header
return HttpResponseForbidden()
return func(request, *args, **kwargs)
return _decorator
| 25.977528
| 71
| 0.58045
|
794f06f797fba46cda7b16075d419cae37bbcc2c
| 9,687
|
py
|
Python
|
docs/conf.py
|
Nawarrr/wagtail
|
4db71de5a2af19086026605be8fcb92c4be623aa
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
Nawarrr/wagtail
|
4db71de5a2af19086026605be8fcb92c4be623aa
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
Nawarrr/wagtail
|
4db71de5a2af19086026605be8fcb92c4be623aa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Wagtail documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 14 17:38:55 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
import django
import sphinx_wagtail_theme
from wagtail import VERSION, __version__
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
html_theme = "sphinx_wagtail_theme"
html_theme_path = [sphinx_wagtail_theme.get_html_theme_path()]
html_theme_options = {
"project_name": "Wagtail Documentation",
"github_url": "https://github.com/wagtail/wagtail/blob/main/docs/",
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# Autodoc may need to import some models modules which require django settings
# be configured
os.environ["DJANGO_SETTINGS_MODULE"] = "wagtail.tests.settings"
django.setup()
# Use SQLite3 database engine so it doesn't attempt to use psycopg2 on RTD
os.environ["DATABASE_ENGINE"] = "django.db.backends.sqlite3"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"myst_parser",
"sphinx_wagtail_theme",
]
if not on_rtd:
extensions.append("sphinxcontrib.spelling")
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Wagtail Documentation"
copyright = f"{datetime.now().year}, Torchbox and contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = "{}.{}".format(VERSION[0], VERSION[1])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "README.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# splhinxcontrib.spelling settings
spelling_lang = "en_GB"
spelling_word_list_filename = "spelling_wordlist.txt"
# sphinx.ext.intersphinx settings
intersphinx_mapping = {
"django": (
"https://docs.djangoproject.com/en/stable/",
"https://docs.djangoproject.com/en/stable/_objects/",
)
}
# -- Options for HTML output ----------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ["robots.txt"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# Since we are implementing search with Algolia DocSearch, we do not need Sphinx to
# generate its own index. It might not hurt to keep the Sphinx index, but it
# could potentially speed up the build process.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Wagtaildoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "Wagtail.tex", "Wagtail Documentation", "Torchbox", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "wagtail", "Wagtail Documentation", ["Torchbox"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Wagtail",
"Wagtail Documentation",
"Torchbox",
"Wagtail",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
def setup(app):
app.add_js_file("js/banner.js")
| 32.182724
| 98
| 0.714876
|
794f07f1c1aaf581774906dc1c2214a3ca1637db
| 443
|
py
|
Python
|
src/algorithms/answers/binary_search_algorithm.py
|
Soumithri/coding_problems
|
b10820d81677ef0edc9a5f2b310720d8e1df6c76
|
[
"MIT"
] | null | null | null |
src/algorithms/answers/binary_search_algorithm.py
|
Soumithri/coding_problems
|
b10820d81677ef0edc9a5f2b310720d8e1df6c76
|
[
"MIT"
] | 8
|
2020-08-05T02:27:45.000Z
|
2021-04-20T20:48:50.000Z
|
src/algorithms/answers/binary_search_algorithm.py
|
Soumithri/coding_problems
|
b10820d81677ef0edc9a5f2b310720d8e1df6c76
|
[
"MIT"
] | null | null | null |
def binary_search(target, num_list):
if not target:
raise Exception
if not num_list:
raise Exception
left, right = 0, len(num_list)-1
mid = left + (right - left)//2
while left <= right:
mid = left + (right - left)//2
if num_list[mid] == target:
return True
elif num_list[mid] < target:
left = mid + 1
else:
right = mid
return False
| 22.15
| 38
| 0.525959
|
794f095941a7245fb29b9aece94ac7a04df80a90
| 13,191
|
py
|
Python
|
sql/views.py
|
linqining/Archery
|
334d9e003b1825f7ab9cad4911156f85480cf3a1
|
[
"Apache-2.0"
] | null | null | null |
sql/views.py
|
linqining/Archery
|
334d9e003b1825f7ab9cad4911156f85480cf3a1
|
[
"Apache-2.0"
] | null | null | null |
sql/views.py
|
linqining/Archery
|
334d9e003b1825f7ab9cad4911156f85480cf3a1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import traceback
import simplejson as json
import re
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.models import Group
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from common.config import SysConfig
from sql.engines import get_engine
from common.utils.permission import superuser_required
from sql.engines.models import ReviewResult, ReviewSet
from sql.utils.tasks import task_info
from .models import Users, SqlWorkflow, QueryPrivileges, ResourceGroup, \
QueryPrivilegesApply, Config, SQL_WORKFLOW_CHOICES, InstanceTag
from sql.utils.workflow_audit import Audit
from sql.utils.sql_review import can_execute, can_timingtask, can_cancel
from common.utils.const import Const, WorkflowDict
from sql.utils.resource_group import user_groups, user_instances
import logging
logger = logging.getLogger('default')
def index(request):
index_path_url = SysConfig().get('index_path_url', 'sqlworkflow')
return HttpResponseRedirect(f"/{index_path_url.strip('/')}/")
def login(request):
"""登录页面"""
if request.user and request.user.is_authenticated:
return HttpResponseRedirect('/')
return render(request, 'login.html')
def sqlworkflow(request):
"""SQL上线工单列表页面"""
return render(request, 'sqlworkflow.html', {'status_list': SQL_WORKFLOW_CHOICES})
# 提交SQL的页面
@permission_required('sql.sql_submit', raise_exception=True)
def submit_sql(request):
user = request.user
# 获取组信息
group_list = user_groups(user)
# 获取所有有效用户,通知对象
active_user = Users.objects.filter(is_active=1)
# 获取系统配置
archer_config = SysConfig()
# 主动创建标签
InstanceTag.objects.get_or_create(tag_code='can_write', defaults={'tag_name': '支持上线', 'active': True})
context = {'active_user': active_user, 'group_list': group_list,
'enable_backup_switch': archer_config.get('enable_backup_switch')}
return render(request, 'sqlsubmit.html', context)
# 展示SQL工单详细页面
def detail(request, workflow_id):
workflow_detail = get_object_or_404(SqlWorkflow, pk=workflow_id)
if workflow_detail.status in ['workflow_finish', 'workflow_exception']:
rows = workflow_detail.sqlworkflowcontent.execute_result
else:
rows = workflow_detail.sqlworkflowcontent.review_content
# 自动审批不通过的不需要获取下列信息
if workflow_detail.status != 'workflow_autoreviewwrong':
# 获取当前审批和审批流程
audit_auth_group, current_audit_auth_group = Audit.review_info(workflow_id, 2)
# 是否可审核
is_can_review = Audit.can_review(request.user, workflow_id, 2)
# 是否可执行
is_can_execute = can_execute(request.user, workflow_id)
# 是否可定时执行
is_can_timingtask = can_timingtask(request.user, workflow_id)
# 是否可取消
is_can_cancel = can_cancel(request.user, workflow_id)
# 获取审核日志
try:
audit_id = Audit.detail_by_workflow_id(workflow_id=workflow_id,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
last_operation_info = Audit.logs(audit_id=audit_id).latest('id').operation_info
except Exception as e:
logger.debug(f'无审核日志记录,错误信息{e}')
last_operation_info = ''
else:
audit_auth_group = '系统自动驳回'
current_audit_auth_group = '系统自动驳回'
is_can_review = False
is_can_execute = False
is_can_timingtask = False
is_can_cancel = False
last_operation_info = None
# 获取定时执行任务信息
if workflow_detail.status == 'workflow_timingtask':
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflow_id)
job = task_info(job_id)
if job:
run_date = job.next_run
else:
run_date = ''
else:
run_date = ''
# 获取是否开启手工执行确认
manual = SysConfig().get('manual')
review_result = ReviewSet()
if rows:
try:
# 检验rows能不能正常解析
loaded_rows = json.loads(rows)
# 兼容旧数据'[[]]'格式,转换为新格式[{}]
if isinstance(loaded_rows[-1], list):
for r in loaded_rows:
review_result.rows += [ReviewResult(inception_result=r)]
rows = review_result.json()
except json.decoder.JSONDecodeError:
review_result.rows += [ReviewResult(
# 迫于无法单元测试这里加上英文报错信息
errormessage="Json decode failed."
"执行结果Json解析失败, 请联系管理员"
)]
rows = review_result.json()
else:
rows = workflow_detail.sqlworkflowcontent.review_content
if re.match(r"^select", workflow_detail.sqlworkflowcontent.sql_content.lower()):
query_engine = get_engine(instance=workflow_detail.instance)
select_result = query_engine.query(db_name=workflow_detail.db_name, sql=workflow_detail.sqlworkflowcontent.sql_content, limit_num=1000)
column_list = select_result.column_list
select_rows = select_result.rows
else:
column_list = []
select_rows = []
context = {'workflow_detail': workflow_detail, 'rows': rows, 'last_operation_info': last_operation_info,
'is_can_review': is_can_review, 'is_can_execute': is_can_execute, 'is_can_timingtask': is_can_timingtask,
'is_can_cancel': is_can_cancel, 'audit_auth_group': audit_auth_group, 'manual': manual,
'current_audit_auth_group': current_audit_auth_group, 'run_date': run_date, 'column_list': column_list, 'select_rows': select_rows}
return render(request, 'detail.html', context)
# 展示回滚的SQL页面
def rollback(request):
workflow_id = request.GET['workflow_id']
if workflow_id == '' or workflow_id is None:
context = {'errMsg': 'workflow_id参数为空.'}
return render(request, 'error.html', context)
workflow_id = int(workflow_id)
workflow = SqlWorkflow.objects.get(id=workflow_id)
try:
query_engine = get_engine(instance=workflow.instance)
list_backup_sql = query_engine.get_rollback(workflow=workflow)
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
workflow_detail = SqlWorkflow.objects.get(id=workflow_id)
workflow_title = workflow_detail.workflow_name
rollback_workflow_name = "【回滚工单】原工单Id:%s ,%s" % (workflow_id, workflow_title)
context = {'list_backup_sql': list_backup_sql, 'workflow_detail': workflow_detail,
'rollback_workflow_name': rollback_workflow_name}
return render(request, 'rollback.html', context)
@permission_required('sql.menu_sqlanalyze', raise_exception=True)
def sqlanalyze(request):
"""
SQL分析页面
:param request:
:return:
"""
# 获取实例列表
instances = [instance.instance_name for instance in user_instances(request.user, type='all', db_type='mysql')]
return render(request, 'sqlanalyze.html', {'instances': instances})
# SQL文档页面
@permission_required('sql.menu_document', raise_exception=True)
def dbaprinciples(request):
return render(request, 'dbaprinciples.html')
# dashboard页面
@permission_required('sql.menu_dashboard', raise_exception=True)
def dashboard(request):
return render(request, 'dashboard.html')
# SQL在线查询页面
@permission_required('sql.menu_query', raise_exception=True)
def sqlquery(request):
# 获取实例支持查询的标签id
tag_id = InstanceTag.objects.get_or_create(
tag_code='can_read', defaults={'tag_name': '支持查询', 'active': True})[0].id
# 获取用户关联实例列表
instances = [slave for slave in user_instances(request.user, type='all', db_type='all', tags=[tag_id])]
context = {'instances': instances}
return render(request, 'sqlquery.html', context)
# SQL慢日志页面
@permission_required('sql.menu_slowquery', raise_exception=True)
def slowquery(request):
# 获取用户关联实例列表
instances = [instance.instance_name for instance in user_instances(request.user, type='all', db_type='mysql')]
context = {'tab': 'slowquery', 'instances': instances}
return render(request, 'slowquery.html', context)
# SQL优化工具页面
@permission_required('sql.menu_sqladvisor', raise_exception=True)
def sqladvisor(request):
# 获取用户关联实例列表
instances = [instance.instance_name for instance in user_instances(request.user, type='all', db_type='mysql')]
context = {'instances': instances}
return render(request, 'sqladvisor.html', context)
# 查询权限申请列表页面
@permission_required('sql.menu_queryapplylist', raise_exception=True)
def queryapplylist(request):
user = request.user
# 获取资源组
group_list = user_groups(user)
context = {'group_list': group_list}
return render(request, 'queryapplylist.html', context)
# 查询权限申请详情页面
def queryapplydetail(request, apply_id):
workflow_detail = QueryPrivilegesApply.objects.get(apply_id=apply_id)
# 获取当前审批和审批流程
audit_auth_group, current_audit_auth_group = Audit.review_info(apply_id, 1)
# 是否可审核
is_can_review = Audit.can_review(request.user, apply_id, 1)
# 获取审核日志
if workflow_detail.status == 2:
try:
audit_id = Audit.detail_by_workflow_id(workflow_id=apply_id, workflow_type=1).audit_id
last_operation_info = Audit.logs(audit_id=audit_id).latest('id').operation_info
except Exception as e:
logger.debug(f'无审核日志记录,错误信息{e}')
last_operation_info = ''
else:
last_operation_info = ''
context = {'workflow_detail': workflow_detail, 'audit_auth_group': audit_auth_group,
'last_operation_info': last_operation_info, 'current_audit_auth_group': current_audit_auth_group,
'is_can_review': is_can_review}
return render(request, 'queryapplydetail.html', context)
# 用户的查询权限管理页面
def queryuserprivileges(request):
# 获取所有用户
user_list = QueryPrivileges.objects.filter(is_deleted=0).values('user_display').distinct()
context = {'user_list': user_list}
return render(request, 'queryuserprivileges.html', context)
# 会话管理页面
@permission_required('sql.menu_dbdiagnostic', raise_exception=True)
def dbdiagnostic(request):
# 获取用户关联实例列表
instances = [instance.instance_name for instance in user_instances(request.user, type='all', db_type='mysql')]
context = {'tab': 'process', 'instances': instances}
return render(request, 'dbdiagnostic.html', context)
# 工作流审核列表页面
def workflows(request):
return render(request, "workflow.html")
# 工作流审核详情页面
def workflowsdetail(request, audit_id):
# 按照不同的workflow_type返回不同的详情
audit_detail = Audit.detail(audit_id)
if audit_detail.workflow_type == WorkflowDict.workflow_type['query']:
return HttpResponseRedirect(reverse('sql:queryapplydetail', args=(audit_detail.workflow_id,)))
elif audit_detail.workflow_type == WorkflowDict.workflow_type['sqlreview']:
return HttpResponseRedirect(reverse('sql:detail', args=(audit_detail.workflow_id,)))
# 配置管理页面
@superuser_required
def config(request):
# 获取所有资源组名称
group_list = ResourceGroup.objects.all()
# 获取所有权限组
auth_group_list = Group.objects.all()
# 获取所有配置项
all_config = Config.objects.all().values('item', 'value')
sys_config = {}
for items in all_config:
sys_config[items['item']] = items['value']
context = {'group_list': group_list, 'auth_group_list': auth_group_list,
'config': sys_config, 'WorkflowDict': WorkflowDict}
return render(request, 'config.html', context)
# 资源组管理页面
@superuser_required
def group(request):
return render(request, 'group.html')
# 资源组组关系管理页面
@superuser_required
def groupmgmt(request, group_id):
group = ResourceGroup.objects.get(group_id=group_id)
return render(request, 'groupmgmt.html', {'group': group})
# 实例管理页面
@permission_required('sql.menu_instance', raise_exception=True)
def instance(request):
# 获取实例标签
tags = InstanceTag.objects.filter(active=True)
return render(request, 'instance.html', {'tags': tags})
# 实例用户管理页面
@permission_required('sql.menu_instance', raise_exception=True)
def instanceuser(request, instance_id):
return render(request, 'instanceuser.html', {'instance_id': instance_id})
# 实例参数管理页面
@permission_required('sql.menu_param', raise_exception=True)
def instance_param(request):
# 获取用户关联实例列表
instances = user_instances(request.user, type='all', db_type=['mysql', 'inception', 'goinception'])
context = {'tab': 'param_tab', 'instances': instances}
return render(request, 'param.html', context)
# binlog2sql页面
@permission_required('sql.menu_binlog2sql', raise_exception=True)
def binlog2sql(request):
# 获取实例列表
instances = [instance.instance_name for instance in user_instances(request.user, type='all', db_type='mysql')]
return render(request, 'binlog2sql.html', {'instances': instances})
# 数据库差异对比页面
@permission_required('sql.menu_schemasync', raise_exception=True)
def schemasync(request):
# 获取实例列表
instances = [instance.instance_name for instance in user_instances(request.user, type='all', db_type='mysql')]
return render(request, 'schemasync.html', {'instances': instances})
| 34.896825
| 146
| 0.70677
|
794f099a8fa0f21b1c87eeae82713a7cafbd7f42
| 2,100
|
py
|
Python
|
proxy/environment.py
|
gigimon/proxy-model.py
|
cf9112d299cfcd46f7d3ad716d34cc0300e2e494
|
[
"BSD-3-Clause"
] | null | null | null |
proxy/environment.py
|
gigimon/proxy-model.py
|
cf9112d299cfcd46f7d3ad716d34cc0300e2e494
|
[
"BSD-3-Clause"
] | null | null | null |
proxy/environment.py
|
gigimon/proxy-model.py
|
cf9112d299cfcd46f7d3ad716d34cc0300e2e494
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import subprocess
import logging
from solana.publickey import PublicKey
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
solana_url = os.environ.get("SOLANA_URL", "http://localhost:8899")
evm_loader_id = os.environ.get("EVM_LOADER")
neon_cli_timeout = float(os.environ.get("NEON_CLI_TIMEOUT", "0.1"))
class solana_cli:
def call(self, *args):
try:
cmd = ["solana",
"--url", solana_url,
] + list(args)
logger.debug("Calling: " + " ".join(cmd))
return subprocess.check_output(cmd, universal_newlines=True)
except subprocess.CalledProcessError as err:
logger.debug("ERR: solana error {}".format(err))
raise
class neon_cli:
def call(self, *args):
try:
cmd = ["neon-cli",
"--commitment=recent",
"--url", solana_url,
"--evm_loader={}".format(evm_loader_id),
] + list(args)
logger.debug("Calling: " + " ".join(cmd))
return subprocess.check_output(cmd, timeout=neon_cli_timeout, universal_newlines=True)
except subprocess.CalledProcessError as err:
logger.debug("ERR: neon-cli error {}".format(err))
raise
def version(self):
try:
cmd = ["neon-cli",
"--version"]
logger.debug("Calling: " + " ".join(cmd))
return subprocess.check_output(cmd, timeout=neon_cli_timeout, universal_newlines=True).split()[1]
except subprocess.CalledProcessError as err:
logger.debug("ERR: neon-cli error {}".format(err))
raise
def read_elf_params(out_dict):
for param in neon_cli().call("neon-elf-params").splitlines():
if param.startswith('NEON_') and '=' in param:
v = param.split('=')
out_dict[v[0]] = v[1]
ELF_PARAMS = {}
read_elf_params(ELF_PARAMS)
COLLATERAL_POOL_BASE = ELF_PARAMS.get("NEON_POOL_BASE")
ETH_TOKEN_MINT_ID: PublicKey = PublicKey(ELF_PARAMS.get("NEON_TOKEN_MINT"))
| 35
| 109
| 0.599524
|
794f0a0f858debb3d2f803d62bf84ee8c3e448ef
| 6,365
|
py
|
Python
|
publish/publish.py
|
chnuessli/wc-guide-data
|
4daca57c9183fa2677ad6d21fe5922f660727293
|
[
"MIT"
] | null | null | null |
publish/publish.py
|
chnuessli/wc-guide-data
|
4daca57c9183fa2677ad6d21fe5922f660727293
|
[
"MIT"
] | null | null | null |
publish/publish.py
|
chnuessli/wc-guide-data
|
4daca57c9183fa2677ad6d21fe5922f660727293
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
import requests
def get_features(data):
return data.get('features', [])
def get_properties(feature):
return feature.get('properties', {})
def get_type(properties):
if is_type_iv(properties):
return 'iv'
if is_type_pissoir(properties):
return 'pissoir'
return 'normal'
def is_type_pissoir(properties):
is_urinal = has_urinal(properties)
is_male = supports_male(properties)
is_female = supports_female(properties)
return is_urinal and is_male and not is_female
def supports_male(properties):
return properties.get("male", "no") == "yes"
def supports_female(properties):
return properties.get("female", "no") == "yes"
def has_urinal(properties):
return 'urinal' in properties.get('toilets:position', '')
def is_type_iv(properties):
return properties.get("wheelchair", "no") == "yes"
def has_access(properties):
access = properties.get("access", "no")
return access == "yes" or access == "public"
def has_changing_table(properties):
return properties.get("changing_table", "no") == "yes"
def requires_fee(properties):
return properties.get("fee", "no") == "yes"
def is_nette_toilette(properties):
name = str(get_name(properties)).lower()
description = str(get_description(properties)).lower()
nette_toilette = 'nette toilette'
return name == nette_toilette or description == nette_toilette
def get_name(properties):
return properties.get("name", None)
def get_operator(properties):
return properties.get("operator", None)
def get_description(properties):
return properties.get("description", None)
def get_osm_id(feature):
return feature.get("id", None)
def get_geometry(feature):
return feature.get("geometry", None)
def get_line_string_center(coordinates):
lon, lat = 0, 0
for coordinate in coordinates:
lon += coordinate[0]
lat += coordinate[1]
return [lon / len(coordinates), lat / len(coordinates)]
def get_polygon_center(coordinates):
polygons = [get_line_string_center(polygon) for polygon in coordinates]
return get_line_string_center(polygons)
def get_multipolygon_center(coordinates):
polygons = [get_polygon_center(polygon) for polygon in coordinates]
return get_line_string_center(polygons)
def geometry_to_point(geometry):
geometry_type = str(geometry.get("type", None)).lower()
coordinates = geometry.get("coordinates", None)
if len(coordinates) == 0:
return None
if geometry_type == "point":
return geometry
if geometry_type == "linestring":
return {
"type": "Point",
"coordinates": get_line_string_center(coordinates)
}
if geometry_type == "polygon":
return {
"type": "Point",
"coordinates": get_polygon_center(coordinates)
}
if geometry_type == "multipolygon":
return {
"type": "Point",
"coordinates": get_multipolygon_center(coordinates)
}
return None
def transform_feature(feature):
properties = get_properties(feature)
geometry = get_geometry(feature=feature)
return {
"type": "Feature",
"geometry": geometry_to_point(geometry),
"properties": {
"type": get_type(properties=properties),
"id": get_osm_id(feature=feature),
"description": get_description(properties=properties),
"name": get_name(properties=properties),
"operator": get_operator(properties=properties),
"access": has_access(properties=properties),
"features":
{
"wickeltisch": has_changing_table(properties=properties),
"kostenpflichtig": requires_fee(properties=properties),
"nettetoilette": is_nette_toilette(properties=properties)
},
}
}
def transform_geojson(data):
features = get_features(data=data)
transformed_features = [transform_feature(feature) for feature in features]
return {
"type": "FeatureCollection",
"features": transformed_features
}
def get_all_geojson_file_paths(folder_path):
return [os.path.join(folder_path, file) for file in os.listdir(folder_path) if file.endswith(".geojson")]
def get_raw_data(file_path):
with open(file_path) as json_file:
return json.load(json_file)
def get_publish_name(file_path):
file_name = os.path.basename(file_path)
file_name_without_extension = os.path.splitext(file_name)[0]
return file_name_without_extension.split('_')[1]
def prepare(folder_path):
file_paths = get_all_geojson_file_paths(folder_path)
path_and_data = [(file_path, get_raw_data(file_path)) for file_path in file_paths]
result = []
for path, raw_data in path_and_data:
name = get_publish_name(path)
data = transform_geojson(raw_data)
result.append((name, data))
return result
def put_to_data(url, data, username, password):
print("Put", url)
return requests.put(url=url, auth=(username, password), json=data)
def publish(backend, data, username, password):
for name, entry in data:
url = f"{backend}/area/{name}/"
response = put_to_data(url=url, data=entry, username=username, password=password)
print("Response code", response.status_code)
def run():
parser = argparse.ArgumentParser(description='Send GeoJSON to wc-guide-backend')
parser.add_argument('-u', '--user', dest='user', type=str, required=True)
parser.add_argument('-p', '--password', dest='password', type=str, required=True)
parser.add_argument('-b', '--backend', dest='backend', type=str, required=True)
parser.add_argument('-f', '--relative_folder_path', dest='relative_folder_path', type=str, required=True,
help='../data/')
parser.add_argument('-n', '--name', dest='name', type=str, required=False, help='Name used as primary key')
args = parser.parse_args()
dir_path = os.path.dirname(os.path.realpath(__file__))
folder_path = os.path.join(dir_path, args.relative_folder_path)
data = prepare(folder_path)
publish(backend=args.backend, data=data, username=args.user, password=args.password)
if __name__ == "__main__":
run()
| 28.931818
| 111
| 0.673841
|
794f0b0b5317180a3a680f16f3632b42cb1754cc
| 3,057
|
py
|
Python
|
modela/data/DataPipeline.py
|
metaprov/modela-python-sdk
|
45ed040a241d07351886f785e224ffb3615a8c94
|
[
"Apache-2.0"
] | 1
|
2022-03-05T23:21:20.000Z
|
2022-03-05T23:21:20.000Z
|
modela/data/DataPipeline.py
|
metaprov/modela-python-sdk
|
45ed040a241d07351886f785e224ffb3615a8c94
|
[
"Apache-2.0"
] | null | null | null |
modela/data/DataPipeline.py
|
metaprov/modela-python-sdk
|
45ed040a241d07351886f785e224ffb3615a8c94
|
[
"Apache-2.0"
] | null | null | null |
import grpc
from github.com.metaprov.modelaapi.pkg.apis.data.v1alpha1.generated_pb2 import DataPipeline as MDDataPipeline
from github.com.metaprov.modelaapi.services.datapipeline.v1.datapipeline_pb2_grpc import DataPipelineServiceStub
from github.com.metaprov.modelaapi.services.datapipeline.v1.datapipeline_pb2 import CreateDataPipelineRequest, \
UpdateDataPipelineRequest, \
DeleteDataPipelineRequest, GetDataPipelineRequest, ListDataPipelinesRequest
from modela.Resource import Resource
from modela.ModelaException import ModelaException
from typing import List, Union
class DataPipeline(Resource):
def __init__(self, item: MDDataPipeline = MDDataPipeline(), client=None, namespace="", name="", version=Resource.DefaultVersion):
super().__init__(item, client, namespace=namespace, name=name, version=version)
class DataPipelineClient:
def __init__(self, stub, modela):
self.modela = modela
self.__stub: DataPipelineServiceStub = stub
def create(self, datapipeline: DataPipeline) -> bool:
request = CreateDataPipelineRequest()
request.datapipeline.CopyFrom(datapipeline.raw_message)
try:
response = self.__stub.CreateDataPipeline(request)
return True
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def update(self, datapipeline: DataPipeline) -> bool:
request = UpdateDataPipelineRequest()
request.datapipeline.CopyFrom(datapipeline.raw_message)
try:
self.__stub.UpdateDataPipeline(request)
return True
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def get(self, namespace: str, name: str) -> Union[DataPipeline, bool]:
request = GetDataPipelineRequest()
request.namespace = namespace
request.name = name
try:
response = self.__stub.GetDataPipeline(request)
return DataPipeline(response.datapipeline, self)
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def delete(self, namespace: str, name: str) -> bool:
request = DeleteDataPipelineRequest()
request.namespace = namespace
request.name = name
try:
response = self.__stub.DeleteDataPipeline(request)
return True
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
def list(self, namespace: str) -> Union[List[DataPipeline], bool]:
request = ListDataPipelinesRequest()
request.namespace = namespace
try:
response = self.__stub.ListDataPipelines(request)
return [DataPipeline(item, self) for item in response.datapipelines.items]
except grpc.RpcError as err:
error = err
ModelaException.process_error(error)
return False
| 35.546512
| 133
| 0.685967
|
794f0b15410e6b359b4cb2703d53eacfaaa88169
| 3,046
|
py
|
Python
|
bokeh/sampledata/periodic_table.py
|
goncaloperes/bokeh
|
b857d2d17d7c19779bb0a7be2601d8238fb1d5e9
|
[
"BSD-3-Clause"
] | 1
|
2021-04-09T02:57:29.000Z
|
2021-04-09T02:57:29.000Z
|
bokeh/sampledata/periodic_table.py
|
goncaloperes/bokeh
|
b857d2d17d7c19779bb0a7be2601d8238fb1d5e9
|
[
"BSD-3-Clause"
] | 5
|
2021-05-07T10:31:27.000Z
|
2021-05-07T10:33:37.000Z
|
bokeh/sampledata/periodic_table.py
|
goncaloperes/bokeh
|
b857d2d17d7c19779bb0a7be2601d8238fb1d5e9
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a periodic table data set. It exposes an attribute ``elements``
which is a pandas Dataframe with the following fields:
.. code-block:: python
elements['atomic Number'] (units: g/cm^3)
elements['symbol']
elements['name']
elements['atomic mass'] (units: amu)
elements['CPK'] (convention for molecular modeling color)
elements['electronic configuration']
elements['electronegativity'] (units: Pauling)
elements['atomic radius'] (units: pm)
elements['ionic radius'] (units: pm)
elements['van der waals radius'] (units: pm)
elements['ionization enerygy'] (units: kJ/mol)
elements['electron affinity'] (units: kJ/mol)
elements['phase'] (standard state: solid, liquid, gas)
elements['bonding type']
elements['melting point'] (units: K)
elements['boiling point'] (units: K)
elements['density'] (units: g/cm^3)
elements['type'] (see below)
elements['year discovered']
elements['group']
elements['period']
where element types are:
actinoid
alkali metal
alkaline earth metal
halogen,
lanthanoid
metal
metalloid
noble gas
nonmetal
transition metalloid
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'elements',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
elements = package_csv('periodic_table', 'elements.csv')
| 35.011494
| 78
| 0.374918
|
794f0c24751f640429984d6fb34460bc2f4a226b
| 1,828
|
py
|
Python
|
src/python/dart/tool/migration/migrate_trigger_workflow_ids.py
|
RetailMeNotSandbox/dart
|
58a05f56c04fadd6741501262d92aeb143cd2f2e
|
[
"MIT"
] | 18
|
2016-03-03T19:10:21.000Z
|
2021-07-14T22:37:35.000Z
|
src/python/dart/tool/migration/migrate_trigger_workflow_ids.py
|
RetailMeNotSandbox/dart
|
58a05f56c04fadd6741501262d92aeb143cd2f2e
|
[
"MIT"
] | 62
|
2016-04-11T15:17:23.000Z
|
2017-09-08T17:18:53.000Z
|
src/python/dart/tool/migration/migrate_trigger_workflow_ids.py
|
RetailMeNotSandbox/dart
|
58a05f56c04fadd6741501262d92aeb143cd2f2e
|
[
"MIT"
] | 15
|
2016-03-03T15:38:34.000Z
|
2019-03-27T19:33:08.000Z
|
import logging
from dart.context.database import db
from dart.model.orm import TriggerDao
from dart.model.trigger import Trigger
from dart.service.patcher import patch_difference
from dart.tool.tool_runner import Tool
_logger = logging.getLogger(__name__)
class MigrateTriggerWorkflowIds(Tool):
def __init__(self):
super(MigrateTriggerWorkflowIds, self).__init__(_logger)
def run(self):
db.session.execute('ALTER TABLE trigger DISABLE TRIGGER trigger_update_timestamp')
try:
limit = 100
offset = 0
while True:
_logger.info('starting batch with limit=%s offset=%s' % (limit, offset))
trigger_daos = TriggerDao.query.order_by(TriggerDao.created).limit(limit).offset(offset).all()
if len(trigger_daos) == 0:
_logger.info('done - no more entities left')
break
for trigger_dao in trigger_daos:
workflow_id = trigger_dao.data.get('workflow_id')
if workflow_id:
trigger = trigger_dao.to_model()
assert isinstance(trigger, Trigger)
source_trigger = trigger.copy()
trigger.data.workflow_ids = [workflow_id]
patch_difference(TriggerDao, source_trigger, trigger, commit=False)
db.session.commit()
_logger.info('completed batch with limit=%s offset=%s' % (limit, offset))
offset += limit
except Exception as e:
db.session.rollback()
raise e
finally:
db.session.execute('ALTER TABLE trigger ENABLE TRIGGER trigger_update_timestamp')
if __name__ == '__main__':
MigrateTriggerWorkflowIds().run()
| 35.843137
| 110
| 0.604486
|
794f0cfd6bdab69ad952230748b3de93ee1fc656
| 18,140
|
py
|
Python
|
oslo_service/loopingcall.py
|
openstack/oslo.service
|
7a62271de38d0b113cded110a40158689748e210
|
[
"Apache-2.0"
] | 38
|
2015-05-28T11:44:28.000Z
|
2022-01-20T09:28:06.000Z
|
oslo_service/loopingcall.py
|
openstack/oslo.service
|
7a62271de38d0b113cded110a40158689748e210
|
[
"Apache-2.0"
] | 1
|
2020-04-08T14:18:50.000Z
|
2020-04-08T14:18:50.000Z
|
oslo_service/loopingcall.py
|
openstack/oslo.service
|
7a62271de38d0b113cded110a40158689748e210
|
[
"Apache-2.0"
] | 41
|
2015-07-01T08:32:00.000Z
|
2021-11-16T09:07:31.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
import sys
import time
from eventlet import event
from eventlet import greenthread
from oslo_log import log as logging
from oslo_utils import eventletutils
from oslo_utils import excutils
from oslo_utils import reflection
from oslo_utils import timeutils
from oslo_service._i18n import _
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCallBase.
The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCallBase.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue
class LoopingCallTimeOut(Exception):
"""Exception for a timed out LoopingCall.
The LoopingCall will raise this exception when a timeout is provided
and it is exceeded.
"""
pass
def _safe_wrapper(f, kind, func_name):
"""Wrapper that calls into wrapped function and logs errors as needed."""
def func(*args, **kwargs):
try:
return f(*args, **kwargs)
except LoopingCallDone:
raise # let the outer handler process this
except Exception:
LOG.error('%(kind)s %(func_name)r failed',
{'kind': kind, 'func_name': func_name},
exc_info=True)
return 0
return func
class LoopingCallBase(object):
_KIND = _("Unknown looping call")
_RUN_ONLY_ONE_MESSAGE = _("A looping call can only run one function"
" at a time")
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._thread = None
self.done = None
self._abort = eventletutils.EventletEvent()
@property
def _running(self):
return not self._abort.is_set()
def stop(self):
if self._running:
self._abort.set()
def wait(self):
return self.done.wait()
def _on_done(self, gt, *args, **kwargs):
self._thread = None
def _sleep(self, timeout):
self._abort.wait(timeout)
def _start(self, idle_for, initial_delay=None, stop_on_exception=True):
"""Start the looping
:param idle_for: Callable that takes two positional arguments, returns
how long to idle for. The first positional argument is
the last result from the function being looped and the
second positional argument is the time it took to
calculate that result.
:param initial_delay: How long to delay before starting the looping.
Value is in seconds.
:param stop_on_exception: Whether to stop if an exception occurs.
:returns: eventlet event instance
"""
if self._thread is not None:
raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE)
self.done = event.Event()
self._abort.clear()
self._thread = greenthread.spawn(
self._run_loop, idle_for,
initial_delay=initial_delay, stop_on_exception=stop_on_exception)
self._thread.link(self._on_done)
return self.done
# NOTE(bnemec): This is just a wrapper function we can mock so we aren't
# affected by other users of the StopWatch class.
def _elapsed(self, watch):
return watch.elapsed()
def _run_loop(self, idle_for_func,
initial_delay=None, stop_on_exception=True):
kind = self._KIND
func_name = reflection.get_callable_name(self.f)
func = self.f if stop_on_exception else _safe_wrapper(self.f, kind,
func_name)
if initial_delay:
self._sleep(initial_delay)
try:
watch = timeutils.StopWatch()
while self._running:
watch.restart()
result = func(*self.args, **self.kw)
watch.stop()
if not self._running:
break
idle = idle_for_func(result, self._elapsed(watch))
LOG.trace('%(kind)s %(func_name)r sleeping '
'for %(idle).02f seconds',
{'func_name': func_name, 'idle': idle,
'kind': kind})
self._sleep(idle)
except LoopingCallDone as e:
self.done.send(e.retvalue)
except Exception:
exc_info = sys.exc_info()
try:
LOG.error('%(kind)s %(func_name)r failed',
{'kind': kind, 'func_name': func_name},
exc_info=exc_info)
self.done.send_exception(*exc_info)
finally:
del exc_info
return
else:
self.done.send(True)
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
_RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call can only run"
" one function at a time")
_KIND = _('Fixed interval looping call')
def start(self, interval, initial_delay=None, stop_on_exception=True):
def _idle_for(result, elapsed):
delay = round(elapsed - interval, 2)
if delay > 0:
func_name = reflection.get_callable_name(self.f)
LOG.warning('Function %(func_name)r run outlasted '
'interval by %(delay).2f sec',
{'func_name': func_name, 'delay': delay})
return -delay if delay < 0 else 0
return self._start(_idle_for, initial_delay=initial_delay,
stop_on_exception=stop_on_exception)
class FixedIntervalWithTimeoutLoopingCall(LoopingCallBase):
"""A fixed interval looping call with timeout checking mechanism."""
_RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call with timeout"
" checking and can only run one function at"
" at a time")
_KIND = _('Fixed interval looping call with timeout checking.')
def start(self, interval, initial_delay=None,
stop_on_exception=True, timeout=0):
start_time = time.time()
def _idle_for(result, elapsed):
delay = round(elapsed - interval, 2)
if delay > 0:
func_name = reflection.get_callable_name(self.f)
LOG.warning('Function %(func_name)r run outlasted '
'interval by %(delay).2f sec',
{'func_name': func_name, 'delay': delay})
elapsed_time = time.time() - start_time
if timeout > 0 and elapsed_time > timeout:
raise LoopingCallTimeOut(
_('Looping call timed out after %.02f seconds')
% elapsed_time)
return -delay if delay < 0 else 0
return self._start(_idle_for, initial_delay=initial_delay,
stop_on_exception=stop_on_exception)
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
_RUN_ONLY_ONE_MESSAGE = _("A dynamic interval looping call can only run"
" one function at a time")
_TASK_MISSING_SLEEP_VALUE_MESSAGE = _(
"A dynamic interval looping call should supply either an"
" interval or periodic_interval_max"
)
_KIND = _('Dynamic interval looping call')
def start(self, initial_delay=None, periodic_interval_max=None,
stop_on_exception=True):
def _idle_for(suggested_delay, elapsed):
delay = suggested_delay
if delay is None:
if periodic_interval_max is not None:
delay = periodic_interval_max
else:
# Note(suro-patz): An application used to receive a
# TypeError thrown from eventlet layer, before
# this RuntimeError was introduced.
raise RuntimeError(
self._TASK_MISSING_SLEEP_VALUE_MESSAGE)
else:
if periodic_interval_max is not None:
delay = min(delay, periodic_interval_max)
return delay
return self._start(_idle_for, initial_delay=initial_delay,
stop_on_exception=stop_on_exception)
class BackOffLoopingCall(LoopingCallBase):
"""Run a method in a loop with backoff on error.
The passed in function should return True (no error, return to
initial_interval),
False (error, start backing off), or raise LoopingCallDone(retvalue=None)
(quit looping, return retvalue if set).
When there is an error, the call will backoff on each failure. The
backoff will be equal to double the previous base interval times some
jitter. If a backoff would put it over the timeout, it halts immediately,
so the call will never take more than timeout, but may and likely will
take less time.
When the function return value is True or False, the interval will be
multiplied by a random jitter. If min_jitter or max_jitter is None,
there will be no jitter (jitter=1). If min_jitter is below 0.5, the code
may not backoff and may increase its retry rate.
If func constantly returns True, this function will not return.
To run a func and wait for a call to finish (by raising a LoopingCallDone):
timer = BackOffLoopingCall(func)
response = timer.start().wait()
:param initial_delay: delay before first running of function
:param starting_interval: initial interval in seconds between calls to
function. When an error occurs and then a
success, the interval is returned to
starting_interval
:param timeout: time in seconds before a LoopingCallTimeout is raised.
The call will never take longer than timeout, but may quit
before timeout.
:param max_interval: The maximum interval between calls during errors
:param jitter: Used to vary when calls are actually run to avoid group of
calls all coming at the exact same time. Uses
random.gauss(jitter, 0.1), with jitter as the mean for the
distribution. If set below .5, it can cause the calls to
come more rapidly after each failure.
:param min_interval: The minimum interval in seconds between calls to
function.
:raises: LoopingCallTimeout if time spent doing error retries would exceed
timeout.
"""
_RNG = random.SystemRandom()
_KIND = _('Dynamic backoff interval looping call')
_RUN_ONLY_ONE_MESSAGE = _("A dynamic backoff interval looping call can"
" only run one function at a time")
def __init__(self, f=None, *args, **kw):
super(BackOffLoopingCall, self).__init__(f=f, *args, **kw)
self._error_time = 0
self._interval = 1
def start(self, initial_delay=None, starting_interval=1, timeout=300,
max_interval=300, jitter=0.75, min_interval=0.001):
if self._thread is not None:
raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE)
# Reset any prior state.
self._error_time = 0
self._interval = starting_interval
def _idle_for(success, _elapsed):
random_jitter = abs(self._RNG.gauss(jitter, 0.1))
if success:
# Reset error state now that it didn't error...
self._interval = starting_interval
self._error_time = 0
return self._interval * random_jitter
else:
# Perform backoff, random jitter around the next interval
# bounded by min_interval and max_interval.
idle = max(self._interval * 2 * random_jitter, min_interval)
idle = min(idle, max_interval)
# Calculate the next interval based on the mean, so that the
# backoff grows at the desired rate.
self._interval = max(self._interval * 2 * jitter, min_interval)
# Don't go over timeout, end early if necessary. If
# timeout is 0, keep going.
if timeout > 0 and self._error_time + idle > timeout:
raise LoopingCallTimeOut(
_('Looping call timed out after %.02f seconds')
% (self._error_time + idle))
self._error_time += idle
return idle
return self._start(_idle_for, initial_delay=initial_delay)
class RetryDecorator(object):
"""Decorator for retrying a function upon suggested exceptions.
The decorated function is retried for the given number of times, and the
sleep time between the retries is incremented until max sleep time is
reached. If the max retry count is set to -1, then the decorated function
is invoked indefinitely until an exception is thrown, and the caught
exception is not in the list of suggested exceptions.
"""
def __init__(self, max_retry_count=-1, inc_sleep_time=10,
max_sleep_time=60, exceptions=()):
"""Configure the retry object using the input params.
:param max_retry_count: maximum number of times the given function must
be retried when one of the input 'exceptions'
is caught. When set to -1, it will be retried
indefinitely until an exception is thrown
and the caught exception is not in param
exceptions.
:param inc_sleep_time: incremental time in seconds for sleep time
between retries
:param max_sleep_time: max sleep time in seconds beyond which the sleep
time will not be incremented using param
inc_sleep_time. On reaching this threshold,
max_sleep_time will be used as the sleep time.
:param exceptions: suggested exceptions for which the function must be
retried, if no exceptions are provided (the default)
then all exceptions will be reraised, and no
retrying will be triggered.
"""
self._max_retry_count = max_retry_count
self._inc_sleep_time = inc_sleep_time
self._max_sleep_time = max_sleep_time
self._exceptions = exceptions
self._retry_count = 0
self._sleep_time = 0
def __call__(self, f):
func_name = reflection.get_callable_name(f)
def _func(*args, **kwargs):
result = None
try:
if self._retry_count:
LOG.debug("Invoking %(func_name)s; retry count is "
"%(retry_count)d.",
{'func_name': func_name,
'retry_count': self._retry_count})
result = f(*args, **kwargs)
except self._exceptions:
with excutils.save_and_reraise_exception() as ctxt:
LOG.debug("Exception which is in the suggested list of "
"exceptions occurred while invoking function:"
" %s.",
func_name)
if (self._max_retry_count != -1 and
self._retry_count >= self._max_retry_count):
LOG.debug("Cannot retry %(func_name)s upon "
"suggested exception "
"since retry count (%(retry_count)d) "
"reached max retry count "
"(%(max_retry_count)d).",
{'retry_count': self._retry_count,
'max_retry_count': self._max_retry_count,
'func_name': func_name})
else:
ctxt.reraise = False
self._retry_count += 1
self._sleep_time += self._inc_sleep_time
return self._sleep_time
raise LoopingCallDone(result)
@functools.wraps(f)
def func(*args, **kwargs):
loop = DynamicLoopingCall(_func, *args, **kwargs)
evt = loop.start(periodic_interval_max=self._max_sleep_time)
LOG.debug("Waiting for function %s to return.", func_name)
return evt.wait()
return func
| 41.321185
| 79
| 0.59151
|
794f0d30ddea85d733f4fa94de6749eb5a7ed8e3
| 9,191
|
py
|
Python
|
tests/snc/agents/activity_rate_to_mpc_actions/test_feedback_mip_feasible_mpc_policy.py
|
dmcnamee/snc
|
c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786
|
[
"Apache-2.0"
] | 5
|
2021-03-24T16:23:10.000Z
|
2021-11-17T12:44:51.000Z
|
tests/snc/agents/activity_rate_to_mpc_actions/test_feedback_mip_feasible_mpc_policy.py
|
dmcnamee/snc
|
c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786
|
[
"Apache-2.0"
] | 3
|
2021-03-26T01:16:08.000Z
|
2021-05-08T22:06:47.000Z
|
tests/snc/agents/activity_rate_to_mpc_actions/test_feedback_mip_feasible_mpc_policy.py
|
dmcnamee/snc
|
c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786
|
[
"Apache-2.0"
] | 2
|
2021-03-24T17:20:06.000Z
|
2021-04-19T09:01:12.000Z
|
import numpy as np
from snc.agents.activity_rate_to_mpc_actions.feedback_mip_feasible_mpc_policy \
import FeedbackMipFeasibleMpcPolicy
def get_mpc_policy_sirl():
# Simple reentrant line like environment.
constituency_matrix = np.array([[1, 0, 1], [0, 1, 0]])
buffer_processing_matrix = np.array([[-1, 0, 0], [1, -1, 0], [0, 1, -1]])
return FeedbackMipFeasibleMpcPolicy(constituency_matrix, buffer_processing_matrix)
def get_mpc_policy_routing():
constituency_matrix = np.eye(3)
buffer_processing_matrix = np.array([[-1, -1, -1]])
return FeedbackMipFeasibleMpcPolicy(constituency_matrix, buffer_processing_matrix)
def get_mpc_policy_simple_link_routing_from_book():
mu12 = 1
mu13 = 1
mu25 = 1
mu32 = 1
mu34 = 1
mu35 = 1
mu45 = 1
mu5 = 1
buffer_processing_matrix = np.array([[-mu12, -mu13, 0, 0, 0, 0, 0, 0],
[mu12, 0, -mu25, mu32, 0, 0, 0, 0],
[0, mu13, 0, -mu32, -mu34, -mu35, 0, 0],
[0, 0, 0, 0, mu34, 0, -mu45, 0],
[0, 0, mu25, 0, 0, mu35, mu45, -mu5]])
constituency_matrix = np.eye(8)
return FeedbackMipFeasibleMpcPolicy(constituency_matrix, buffer_processing_matrix)
def test_get_nonidling_resources_zero_action():
sum_actions = np.array([[0], [0], [0]])
mpc_policy = get_mpc_policy_sirl()
nonidling_constituency_mat, nonidling_ones = mpc_policy.get_nonidling_resources(sum_actions)
assert np.all(nonidling_constituency_mat == np.zeros((2, 3)))
assert np.all(nonidling_ones == np.zeros((2, 1)))
def test_get_nonidling_resources_zero_action_res_1():
sum_actions = np.array([[0], [1], [0]])
mpc_policy = get_mpc_policy_sirl()
nonidling_constituency_mat, nonidling_ones = mpc_policy.get_nonidling_resources(sum_actions)
assert np.all(nonidling_constituency_mat == np.array([[0, 0, 0], [0, 1, 0]]))
assert np.all(nonidling_ones == np.array([[0], [1]]))
def test_get_nonidling_resources_zero_action_res_2():
sum_actions = np.array([[1], [0], [0]])
mpc_policy = get_mpc_policy_sirl()
nonidling_constituency_mat, nonidling_ones = mpc_policy.get_nonidling_resources(sum_actions)
assert np.all(nonidling_constituency_mat == np.array([[1, 0, 1], [0, 0, 0]]))
assert np.all(nonidling_ones == np.array([[1], [0]]))
def test_get_nonidling_resources_both_active():
sum_actions = np.array([[0], [1], [1]])
mpc_policy = get_mpc_policy_sirl()
nonidling_constituency_mat, nonidling_ones = mpc_policy.get_nonidling_resources(sum_actions)
assert np.all(nonidling_constituency_mat == np.array([[1, 0, 1], [0, 1, 0]]))
assert np.all(nonidling_ones == np.ones((2, 1)))
def test_generate_actions_with_feedback_empty_buffers():
sum_actions = np.ones((3, 1))
state = np.zeros((3, 1))
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.zeros((3, 1)))
def test_generate_actions_with_feedback_empty_buffer_1():
sum_actions = np.ones((3, 1))
state = np.array([[0], [1], [1]])
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.array([[0], [1], [1]]))
def test_generate_actions_with_feedback_empty_buffer_1_no_action_buffer_2():
sum_actions = np.array([[1], [1], [0]])
state = np.array([[0], [1], [1]])
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.array([[0], [1], [1]]))
def test_generate_actions_with_feedback_empty_buffers_1_and_3():
sum_actions = np.array([[0], [1], [0]])
state = np.array([[0], [1], [0]])
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.array([[0], [1], [0]]))
def test_generate_actions_with_feedback_priority_buffer_1():
sum_actions = np.array([[1001], [1000], [1000]])
state = np.array([[1], [1], [1]])
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.array([[1], [1], [0]]))
def test_generate_actions_with_feedback_priority_buffer_3():
sum_actions = np.array([[1000], [1000], [1001]])
state = np.array([[1], [1], [1]])
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.array([[0], [1], [1]]))
def test_generate_actions_with_feedback_no_priority():
sum_actions = np.array([[1000], [1000], [1000]])
state = np.array([[1], [1], [1]])
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert action[1] == 1
assert action[0] == 1 or action[2] == 1
def test_generate_actions_with_feedback_priority_buffer_3_but_empty():
sum_actions = np.array([[1000], [1000], [1001]])
state = np.array([[1], [1], [0]])
mpc_policy = get_mpc_policy_sirl()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.array([[1], [1], [0]]))
def test_generate_actions_with_feedback_routing_enough_items():
sum_actions = np.array([[1], [1], [1]])
state = np.array([[3]])
mpc_policy = get_mpc_policy_routing()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.all(action == np.ones((3, 1)))
def test_generate_actions_with_feedback_routing_only_one_item():
sum_actions = np.array([[1], [1], [1]])
state = np.array([[1]])
mpc_policy = get_mpc_policy_routing()
action = mpc_policy.generate_actions_with_feedback(sum_actions, state)
assert np.sum(action) == 1
def test_get_actions_drain_each_buffer_routing():
mpc_policy = get_mpc_policy_routing()
actions_drain_each_buffer = mpc_policy.get_actions_drain_each_buffer()
assert np.all(actions_drain_each_buffer[0] == [np.array([0, 1, 2])])
def test_get_action_drain_each_buffer_simple_link_routing():
mpc_policy = get_mpc_policy_simple_link_routing_from_book()
actions_drain_each_buffer = mpc_policy.get_actions_drain_each_buffer()
assert np.all(actions_drain_each_buffer[0] == [np.array([0, 1])])
assert np.all(actions_drain_each_buffer[1] == [np.array([2])])
assert np.all(actions_drain_each_buffer[2] == [np.array([3, 4, 5])])
assert np.all(actions_drain_each_buffer[3] == [np.array([6])])
assert np.all(actions_drain_each_buffer[4] == [np.array([7])])
def test_update_bias_counter_routing_enough_items():
mpc_policy = get_mpc_policy_routing()
state = np.array([[3]])
action = np.array([[1], [1], [1]])
sum_actions = np.ones((3, 1))
mpc_policy.update_bias_counter(state, action, sum_actions)
assert np.all(mpc_policy._bias_counter.value == np.zeros((3, 1)))
def test_update_bias_counter_routing_enough_items_not_required():
mpc_policy = get_mpc_policy_routing()
state = np.array([[3]])
action = np.array([[1], [1], [1]])
sum_actions = np.zeros((3, 1))
mpc_policy.update_bias_counter(state, action, sum_actions)
assert np.all(mpc_policy._bias_counter.value == np.zeros((3, 1)))
def test_update_bias_counter_routing_not_enough_items_1():
mpc_policy = get_mpc_policy_routing()
state = np.array([[2]])
action = np.array([[1], [1], [0]])
sum_actions = np.ones((3, 1))
mpc_policy.update_bias_counter(state, action, sum_actions)
assert np.all(mpc_policy._bias_counter.value == np.array([[0], [0], [1]]))
def test_update_bias_counter_routing_not_enough_items_1_not_required():
mpc_policy = get_mpc_policy_routing()
state = np.array([[2]])
action = np.array([[1], [1], [0]])
sum_actions = np.array([[1], [1], [0]])
mpc_policy.update_bias_counter(state, action, sum_actions)
assert np.all(mpc_policy._bias_counter.value == np.zeros((3, 1)))
def test_update_bias_counter_routing_not_enough_items_1_other_action():
mpc_policy = get_mpc_policy_routing()
state = np.array([[2]])
action = np.array([[0], [1], [1]])
sum_actions = np.ones((3, 1))
mpc_policy.update_bias_counter(state, action, sum_actions)
assert np.all(mpc_policy._bias_counter.value == np.array([[1], [0], [0]]))
def test_update_bias_counter_routing_not_enough_items_2():
mpc_policy = get_mpc_policy_routing()
state = np.array([[1]])
action = np.array([[0], [1], [0]])
sum_actions = np.ones((3, 1))
mpc_policy.update_bias_counter(state, action, sum_actions)
assert np.all(mpc_policy._bias_counter.value == np.array([[1], [0], [1]]))
def test_update_bias_counter_simple_link_routing():
mpc_policy = get_mpc_policy_simple_link_routing_from_book()
state = np.ones((5, 1))
action = np.array([1, 0, 1, 1, 0, 0, 1, 1])[:, None]
sum_actions = np.ones_like(action)
mpc_policy.update_bias_counter(state, action, sum_actions)
assert np.all(mpc_policy._bias_counter.value == np.array([0, 1, 0, 0, 1, 1, 0, 0])[:, None])
| 40.311404
| 96
| 0.684909
|
794f0dca949134f9430a7fe401920522143f995f
| 8,552
|
py
|
Python
|
desktop/core/ext-py/Django-1.2.3/tests/runtests.py
|
digideskio/hortonworks-sandbox
|
dd8e95c91faee3daa094707baeb94c3953b41efa
|
[
"Apache-2.0"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
desktop/core/ext-py/Django-1.2.3/tests/runtests.py
|
abayer/hue
|
27213bb8fe89cdf0547109081e9f29c03bcc8ca5
|
[
"Apache-2.0"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
desktop/core/ext-py/Django-1.2.3/tests/runtests.py
|
abayer/hue
|
27213bb8fe89cdf0547109081e9f29c03bcc8ca5
|
[
"Apache-2.0"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
#!/usr/bin/env python
import os, sys, traceback
import unittest
import django.contrib as contrib
CONTRIB_DIR_NAME = 'django.contrib'
MODEL_TESTS_DIR_NAME = 'modeltests'
REGRESSION_TESTS_DIR_NAME = 'regressiontests'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(contrib.__file__)
MODEL_TEST_DIR = os.path.join(os.path.dirname(__file__), MODEL_TESTS_DIR_NAME)
REGRESSION_TEST_DIR = os.path.join(os.path.dirname(__file__), REGRESSION_TESTS_DIR_NAME)
REGRESSION_SUBDIRS_TO_SKIP = ['locale']
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.admin',
'django.contrib.admindocs',
]
def get_test_models():
models = []
for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR):
for f in os.listdir(dirpath):
if f.startswith('__init__') or f.startswith('.') or \
f.startswith('sql') or f.startswith('invalid') or \
os.path.basename(f) in REGRESSION_SUBDIRS_TO_SKIP:
continue
models.append((loc, f))
return models
def get_invalid_models():
models = []
for loc, dirpath in (MODEL_TESTS_DIR_NAME, MODEL_TEST_DIR), (REGRESSION_TESTS_DIR_NAME, REGRESSION_TEST_DIR), (CONTRIB_DIR_NAME, CONTRIB_DIR):
for f in os.listdir(dirpath):
if f.startswith('__init__') or f.startswith('.') or f.startswith('sql'):
continue
if f.startswith('invalid'):
models.append((loc, f))
return models
class InvalidModelTestCase(unittest.TestCase):
def __init__(self, model_label):
unittest.TestCase.__init__(self)
self.model_label = model_label
def runTest(self):
from django.core.management.validation import get_validation_errors
from django.db.models.loading import load_app
from cStringIO import StringIO
try:
module = load_app(self.model_label)
except Exception, e:
self.fail('Unable to load invalid model module')
# Make sure sys.stdout is not a tty so that we get errors without
# coloring attached (makes matching the results easier). We restore
# sys.stderr afterwards.
orig_stdout = sys.stdout
s = StringIO()
sys.stdout = s
count = get_validation_errors(s, module)
sys.stdout = orig_stdout
s.seek(0)
error_log = s.read()
actual = error_log.split('\n')
expected = module.model_errors.split('\n')
unexpected = [err for err in actual if err not in expected]
missing = [err for err in expected if err not in actual]
self.assert_(not unexpected, "Unexpected Errors: " + '\n'.join(unexpected))
self.assert_(not missing, "Missing Errors: " + '\n'.join(missing))
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
old_installed_apps = settings.INSTALLED_APPS
old_root_urlconf = getattr(settings, "ROOT_URLCONF", "")
old_template_dirs = settings.TEMPLATE_DIRS
old_use_i18n = settings.USE_I18N
old_login_url = settings.LOGIN_URL
old_language_code = settings.LANGUAGE_CODE
old_middleware_classes = settings.MIDDLEWARE_CLASSES
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), TEST_TEMPLATE_DIR),)
settings.USE_I18N = True
settings.LANGUAGE_CODE = 'en'
settings.LOGIN_URL = '/accounts/login/'
settings.MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
)
settings.SITE_ID = 1
# For testing comment-utils, we require the MANAGERS attribute
# to be set, so that a test email is sent out which we catch
# in our tests.
settings.MANAGERS = ("admin@djangoproject.com",)
# Load all the ALWAYS_INSTALLED_APPS.
# (This import statement is intentionally delayed until after we
# access settings because of the USE_I18N dependency.)
from django.db.models.loading import get_apps, load_app
get_apps()
# Load all the test model apps.
test_labels_set = set([label.split('.')[0] for label in test_labels])
for model_dir, model_name in get_test_models():
model_label = '.'.join([model_dir, model_name])
# if the model was named on the command line, or
# no models were named (i.e., run all), import
# this model and add it to the list to test.
if not test_labels or model_name in test_labels_set:
if verbosity >= 1:
print "Importing model %s" % model_name
mod = load_app(model_label)
if mod:
if model_label not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(model_label)
# Add tests for invalid models.
extra_tests = []
for model_dir, model_name in get_invalid_models():
model_label = '.'.join([model_dir, model_name])
if not test_labels or model_name in test_labels:
extra_tests.append(InvalidModelTestCase(model_label))
try:
# Invalid models are not working apps, so we cannot pass them into
# the test runner with the other test_labels
test_labels.remove(model_name)
except ValueError:
pass
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
TestRunner = get_runner(settings)
if hasattr(TestRunner, 'func_name'):
# Pre 1.2 test runners were just functions,
# and did not support the 'failfast' option.
import warnings
warnings.warn(
'Function-based test runners are deprecated. Test runners should be classes with a run_tests() method.',
PendingDeprecationWarning
)
failures = TestRunner(test_labels, verbosity=verbosity, interactive=interactive,
extra_tests=extra_tests)
else:
test_runner = TestRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
failures = test_runner.run_tests(test_labels, extra_tests=extra_tests)
if failures:
sys.exit(bool(failures))
# Restore the old settings.
settings.INSTALLED_APPS = old_installed_apps
settings.ROOT_URLCONF = old_root_urlconf
settings.TEMPLATE_DIRS = old_template_dirs
settings.USE_I18N = old_use_i18n
settings.LANGUAGE_CODE = old_language_code
settings.LOGIN_URL = old_login_url
settings.MIDDLEWARE_CLASSES = old_middleware_classes
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [model model model ...]"
parser = OptionParser(usage=usage)
parser.add_option('-v','--verbosity', action='store', dest='verbosity', default='0',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.add_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option('--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed test.')
parser.add_option('--settings',
help='Python path to settings module, e.g. "myproject.settings". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.')
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
elif "DJANGO_SETTINGS_MODULE" not in os.environ:
parser.error("DJANGO_SETTINGS_MODULE is not set in the environment. "
"Set it or use --settings.")
django_tests(int(options.verbosity), options.interactive, options.failfast, args)
| 41.921569
| 161
| 0.684167
|
794f0dfe5192e41a9135adc31c6165732eb6fad3
| 15,427
|
py
|
Python
|
src/zvt/contract/api.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 2,032
|
2019-04-16T14:10:32.000Z
|
2022-03-31T12:40:13.000Z
|
src/zvt/contract/api.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 162
|
2019-05-07T09:57:46.000Z
|
2022-03-25T16:23:08.000Z
|
src/zvt/contract/api.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 755
|
2019-04-30T10:25:16.000Z
|
2022-03-29T17:50:49.000Z
|
# -*- coding: utf-8 -*-
import logging
import os
import platform
from typing import List, Union, Type
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy import func, exists, and_
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm import Query
from sqlalchemy.orm import sessionmaker, Session
from zvt import zvt_env
from zvt.contract import IntervalLevel
from zvt.contract import zvt_context
from zvt.contract.schema import Mixin, TradableEntity
from zvt.utils.pd_utils import pd_is_not_null, index_df
from zvt.utils.time_utils import to_pd_timestamp
logger = logging.getLogger(__name__)
def get_db_name(data_schema: DeclarativeMeta) -> str:
"""
get db name of the domain schema
:param data_schema:
:type data_schema:
:return:
:rtype:
"""
for db_name, base in zvt_context.dbname_map_base.items():
if issubclass(data_schema, base):
return db_name
def get_db_engine(
provider: str, db_name: str = None, data_schema: object = None, data_path: str = zvt_env["data_path"]
) -> Engine:
"""
get db engine of the (provider,db_name) or (provider,data_schema)
:param data_path:
:param provider:
:type provider:
:param db_name:
:type db_name:
:param data_schema:
:type data_schema:
:return:
:rtype:
"""
if data_schema:
db_name = get_db_name(data_schema=data_schema)
db_path = os.path.join(data_path, "{}_{}.db?check_same_thread=False".format(provider, db_name))
engine_key = "{}_{}".format(provider, db_name)
db_engine = zvt_context.db_engine_map.get(engine_key)
if not db_engine:
db_engine = create_engine("sqlite:///" + db_path, echo=False)
zvt_context.db_engine_map[engine_key] = db_engine
return db_engine
def get_schemas(provider: str) -> List[DeclarativeMeta]:
"""
get domain schemas supported by the provider
:param provider:
:type provider:
:return:
:rtype:
"""
schemas = []
for provider1, dbs in zvt_context.provider_map_dbnames.items():
if provider == provider1:
for dbname in dbs:
schemas1 = zvt_context.dbname_map_schemas.get(dbname)
if schemas1:
schemas += schemas1
return schemas
def get_db_session(provider: str, db_name: str = None, data_schema: object = None, force_new: bool = False) -> Session:
"""
get db session of the (provider,db_name) or (provider,data_schema)
:param provider:
:type provider:
:param db_name:
:type db_name:
:param data_schema:
:type data_schema:
:param force_new:
:type force_new:
:return:
:rtype:
"""
if data_schema:
db_name = get_db_name(data_schema=data_schema)
session_key = "{}_{}".format(provider, db_name)
if force_new:
return get_db_session_factory(provider, db_name, data_schema)()
session = zvt_context.sessions.get(session_key)
if not session:
session = get_db_session_factory(provider, db_name, data_schema)()
zvt_context.sessions[session_key] = session
return session
def get_db_session_factory(provider: str, db_name: str = None, data_schema: object = None):
"""
get db session factory of the (provider,db_name) or (provider,data_schema)
:param provider:
:type provider:
:param db_name:
:type db_name:
:param data_schema:
:type data_schema:
:return:
:rtype:
"""
if data_schema:
db_name = get_db_name(data_schema=data_schema)
session_key = "{}_{}".format(provider, db_name)
session = zvt_context.db_session_map.get(session_key)
if not session:
session = sessionmaker()
zvt_context.db_session_map[session_key] = session
return session
def get_entity_schema(entity_type: str) -> Type[TradableEntity]:
"""
get entity schema from name
:param entity_type:
:type entity_type:
:return:
:rtype:
"""
return zvt_context.tradable_schema_map[entity_type]
def get_schema_by_name(name: str) -> DeclarativeMeta:
"""
get domain schema by the name
:param name:
:type name:
:return:
:rtype:
"""
for schema in zvt_context.schemas:
if schema.__name__ == name:
return schema
def get_schema_columns(schema: DeclarativeMeta) -> List[str]:
"""
get all columns of the domain schema
:param schema:
:type schema:
:return:
:rtype:
"""
return schema.__table__.columns.keys()
def common_filter(
query: Query,
data_schema,
start_timestamp=None,
end_timestamp=None,
filters=None,
order=None,
limit=None,
time_field="timestamp",
):
assert data_schema is not None
time_col = eval("data_schema.{}".format(time_field))
if start_timestamp:
query = query.filter(time_col >= to_pd_timestamp(start_timestamp))
if end_timestamp:
query = query.filter(time_col <= to_pd_timestamp(end_timestamp))
if filters:
for filter in filters:
query = query.filter(filter)
if order is not None:
query = query.order_by(order)
else:
query = query.order_by(time_col.asc())
if limit:
query = query.limit(limit)
return query
def del_data(data_schema: Type[Mixin], filters: List = None, provider=None):
if not provider:
provider = data_schema.providers[0]
session = get_db_session(provider=provider, data_schema=data_schema)
query = session.query(data_schema)
if filters:
for f in filters:
query = query.filter(f)
query.delete()
session.commit()
def get_one(data_schema, id: str, provider: str = None, session: Session = None):
if "providers" not in data_schema.__dict__:
logger.error("no provider registered for: {}", data_schema)
if not provider:
provider = data_schema.providers[0]
if not session:
session = get_db_session(provider=provider, data_schema=data_schema)
return session.query(data_schema).get(id)
def get_data(
data_schema: Type[Mixin],
ids: List[str] = None,
entity_ids: List[str] = None,
entity_id: str = None,
codes: List[str] = None,
code: str = None,
level: Union[IntervalLevel, str] = None,
provider: str = None,
columns: List = None,
col_label: dict = None,
return_type: str = "df",
start_timestamp: Union[pd.Timestamp, str] = None,
end_timestamp: Union[pd.Timestamp, str] = None,
filters: List = None,
session: Session = None,
order=None,
limit: int = None,
index: Union[str, list] = None,
drop_index_col=False,
time_field: str = "timestamp",
):
if "providers" not in data_schema.__dict__:
logger.error("no provider registered for: {}", data_schema)
if not provider:
provider = data_schema.providers[0]
if not session:
session = get_db_session(provider=provider, data_schema=data_schema)
time_col = eval("data_schema.{}".format(time_field))
if columns:
# support str
if type(columns[0]) == str:
columns_ = []
for col in columns:
if isinstance(col, str):
columns_.append(eval("data_schema.{}".format(col)))
else:
columns_.append(col)
columns = columns_
# make sure get timestamp
if time_col not in columns:
columns.append(time_col)
if col_label:
columns_ = []
for col in columns:
if col.name in col_label:
columns_.append(col.label(col_label.get(col.name)))
else:
columns_.append(col)
columns = columns_
query = session.query(*columns)
else:
query = session.query(data_schema)
if entity_id:
query = query.filter(data_schema.entity_id == entity_id)
if entity_ids:
query = query.filter(data_schema.entity_id.in_(entity_ids))
if code:
query = query.filter(data_schema.code == code)
if codes:
query = query.filter(data_schema.code.in_(codes))
if ids:
query = query.filter(data_schema.id.in_(ids))
# we always store different level in different schema,the level param is not useful now
if level:
try:
# some schema has no level,just ignore it
data_schema.level
if type(level) == IntervalLevel:
level = level.value
query = query.filter(data_schema.level == level)
except Exception as e:
pass
query = common_filter(
query,
data_schema=data_schema,
start_timestamp=start_timestamp,
end_timestamp=end_timestamp,
filters=filters,
order=order,
limit=limit,
time_field=time_field,
)
if return_type == "df":
df = pd.read_sql(query.statement, query.session.bind)
if pd_is_not_null(df):
if index:
df = index_df(df, index=index, drop=drop_index_col, time_field=time_field)
return df
elif return_type == "domain":
return query.all()
elif return_type == "dict":
return [item.__dict__ for item in query.all()]
def data_exist(session, schema, id):
return session.query(exists().where(and_(schema.id == id))).scalar()
def get_data_count(data_schema, filters=None, session=None):
query = session.query(data_schema)
if filters:
for filter in filters:
query = query.filter(filter)
count_q = query.statement.with_only_columns([func.count(data_schema.id)]).order_by(None)
count = session.execute(count_q).scalar()
return count
def get_group(provider, data_schema, column, group_func=func.count, session=None):
if not session:
session = get_db_session(provider=provider, data_schema=data_schema)
if group_func:
query = session.query(column, group_func(column)).group_by(column)
else:
query = session.query(column).group_by(column)
df = pd.read_sql(query.statement, query.session.bind)
return df
def decode_entity_id(entity_id: str):
result = entity_id.split("_")
entity_type = result[0]
exchange = result[1]
code = "".join(result[2:])
return entity_type, exchange, code
def get_entity_type(entity_id: str):
entity_type, _, _ = decode_entity_id(entity_id)
return entity_type
def get_entity_exchange(entity_id: str):
_, exchange, _ = decode_entity_id(entity_id)
return exchange
def get_entity_code(entity_id: str):
_, _, code = decode_entity_id(entity_id)
return code
def df_to_db(
df: pd.DataFrame,
data_schema: DeclarativeMeta,
provider: str,
force_update: bool = False,
sub_size: int = 5000,
drop_duplicates: bool = True,
) -> object:
"""
FIXME:improve
store the df to db
:param df:
:param data_schema:
:param provider:
:param force_update:
:param sub_size:
:param drop_duplicates:
:return:
"""
if not pd_is_not_null(df):
return 0
if drop_duplicates and df.duplicated(subset="id").any():
logger.warning(f"remove duplicated:{df[df.duplicated()]}")
df = df.drop_duplicates(subset="id", keep="last")
db_engine = get_db_engine(provider, data_schema=data_schema)
schema_cols = get_schema_columns(data_schema)
cols = set(df.columns.tolist()) & set(schema_cols)
if not cols:
print("wrong cols")
return 0
df = df[cols]
size = len(df)
if platform.system() == "Windows":
sub_size = 900
if size >= sub_size:
step_size = int(size / sub_size)
if size % sub_size:
step_size = step_size + 1
else:
step_size = 1
saved = 0
for step in range(step_size):
df_current = df.iloc[sub_size * step : sub_size * (step + 1)]
if force_update:
session = get_db_session(provider=provider, data_schema=data_schema)
ids = df_current["id"].tolist()
if len(ids) == 1:
sql = f'delete from `{data_schema.__tablename__}` where id = "{ids[0]}"'
else:
sql = f"delete from `{data_schema.__tablename__}` where id in {tuple(ids)}"
session.execute(sql)
session.commit()
else:
current = get_data(
data_schema=data_schema, columns=[data_schema.id], provider=provider, ids=df_current["id"].tolist()
)
if pd_is_not_null(current):
df_current = df_current[~df_current["id"].isin(current["id"])]
if pd_is_not_null(df_current):
saved = saved + len(df_current)
df_current.to_sql(data_schema.__tablename__, db_engine, index=False, if_exists="append")
return saved
def get_entities(
entity_schema: Type[TradableEntity] = None,
entity_type: str = None,
exchanges: List[str] = None,
ids: List[str] = None,
entity_ids: List[str] = None,
entity_id: str = None,
codes: List[str] = None,
code: str = None,
provider: str = None,
columns: List = None,
col_label: dict = None,
return_type: str = "df",
start_timestamp: Union[pd.Timestamp, str] = None,
end_timestamp: Union[pd.Timestamp, str] = None,
filters: List = None,
session: Session = None,
order=None,
limit: int = None,
index: Union[str, list] = "code",
) -> List:
if not entity_schema:
entity_schema = zvt_context.tradable_schema_map[entity_type]
if not provider:
provider = entity_schema.providers[0]
if not order:
order = entity_schema.code.asc()
if exchanges:
if filters:
filters.append(entity_schema.exchange.in_(exchanges))
else:
filters = [entity_schema.exchange.in_(exchanges)]
return get_data(
data_schema=entity_schema,
ids=ids,
entity_ids=entity_ids,
entity_id=entity_id,
codes=codes,
code=code,
level=None,
provider=provider,
columns=columns,
col_label=col_label,
return_type=return_type,
start_timestamp=start_timestamp,
end_timestamp=end_timestamp,
filters=filters,
session=session,
order=order,
limit=limit,
index=index,
)
def get_entity_ids(
entity_type="stock", entity_schema: TradableEntity = None, exchanges=None, codes=None, provider=None, filters=None
):
df = get_entities(
entity_type=entity_type,
entity_schema=entity_schema,
exchanges=exchanges,
codes=codes,
provider=provider,
filters=filters,
)
if pd_is_not_null(df):
return df["entity_id"].to_list()
return None
# the __all__ is generated
__all__ = [
"get_db_name",
"get_db_engine",
"get_schemas",
"get_db_session",
"get_db_session_factory",
"get_entity_schema",
"get_schema_by_name",
"get_schema_columns",
"common_filter",
"del_data",
"get_one",
"get_data",
"data_exist",
"get_data_count",
"get_group",
"decode_entity_id",
"get_entity_type",
"get_entity_exchange",
"get_entity_code",
"df_to_db",
"get_entities",
"get_entity_ids",
]
| 27.112478
| 119
| 0.636546
|
794f0f6aba28285d1ce0fda808c3b6c1abf11f9b
| 285
|
py
|
Python
|
anjuke/items.py
|
awolfly9/anjuke
|
80ed47db05a032a66284073134d2649312201d02
|
[
"MIT"
] | null | null | null |
anjuke/items.py
|
awolfly9/anjuke
|
80ed47db05a032a66284073134d2649312201d02
|
[
"MIT"
] | null | null | null |
anjuke/items.py
|
awolfly9/anjuke
|
80ed47db05a032a66284073134d2649312201d02
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AnjukeItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19
| 51
| 0.684211
|
794f112c0b160fdccf438b6edbce4a895acacd77
| 431
|
py
|
Python
|
apps/coupon/migrations/0002_alter_coupon_id.py
|
FusionArc/retrodev_2
|
8b1e09f54ae0fb268dc7842718a014bc961a4dd7
|
[
"MIT"
] | null | null | null |
apps/coupon/migrations/0002_alter_coupon_id.py
|
FusionArc/retrodev_2
|
8b1e09f54ae0fb268dc7842718a014bc961a4dd7
|
[
"MIT"
] | null | null | null |
apps/coupon/migrations/0002_alter_coupon_id.py
|
FusionArc/retrodev_2
|
8b1e09f54ae0fb268dc7842718a014bc961a4dd7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-06-28 09:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coupon', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 22.684211
| 111
| 0.614849
|
794f13098729848c1bfc438be78e99beb938f872
| 2,789
|
py
|
Python
|
2018/puzzle_03/puzzle_03.py
|
alexanderson/advent-of-code
|
92bb34353f430e420869797cff3604046e1f1192
|
[
"MIT"
] | null | null | null |
2018/puzzle_03/puzzle_03.py
|
alexanderson/advent-of-code
|
92bb34353f430e420869797cff3604046e1f1192
|
[
"MIT"
] | null | null | null |
2018/puzzle_03/puzzle_03.py
|
alexanderson/advent-of-code
|
92bb34353f430e420869797cff3604046e1f1192
|
[
"MIT"
] | null | null | null |
import collections
import re
CUTOUT_RE = re.compile(
r'#(?P<id>\d+) @ (?P<x>\d+),(?P<y>\d+): (?P<width>\d+)x(?P<height>\d+)'
)
Cutout = collections.namedtuple('Cutout', 'id x y width height')
class Fabric:
def __init__(self, width, height):
self._width = width
self._height = height
self._grid = [
[set() for _ in range(self._width)]
for _ in range(self._height)
]
def add_cutout(self, cutout):
for y, x in coords(cutout):
self._grid[y][x].add(cutout.id)
def overlaps(self):
overlaps = 0
for row in self._grid:
for cell in row:
if len(cell) > 1:
overlaps += 1
return overlaps
def isolated_cutouts(self):
all_ids = set()
overlapping_ids = set()
for row in self._grid:
for cell in row:
all_ids |= cell
if len(cell) > 1:
overlapping_ids |= cell
return all_ids - overlapping_ids
def get_cutouts():
with open('input.txt') as input_file:
instructions = input_file.readlines()
lines = (line.rstrip('\n') for line in instructions)
return [
parse(line)
for line in lines
if line
]
def parse(cutout):
match = CUTOUT_RE.match(cutout)
assert match, cutout
groups = match.groupdict()
return Cutout(
id=int(groups['id']),
x=int(groups['x']),
y=int(groups['y']),
width=int(groups['width']),
height=int(groups['height']),
)
def grid_size(cutouts):
width = 0
height = 0
for cutout in cutouts:
width = max(width, cutout.x + cutout.width)
height = max(height, cutout.y + cutout.height)
return (
width + 1,
height + 1
)
def coords(cutout):
for y in range(cutout.y, cutout.y + cutout.height):
for x in range(cutout.x, cutout.x + cutout.width):
yield y, x
def test_overlap():
fabric = Fabric(8, 8)
instructions = (
'#1 @ 1,3: 4x4',
'#2 @ 3,1: 4x4',
'#3 @ 5,5: 2x2',
)
for instruction in instructions:
cutout = parse(instruction)
fabric.add_cutout(cutout)
overlaps = fabric.overlaps()
assert overlaps == 4, '{} != 4'.format(overlaps)
isolated_cutouts = fabric.isolated_cutouts()
assert isolated_cutouts == {3}, isolated_cutouts
if __name__ == '__main__':
test_overlap()
cutouts = get_cutouts()
width, height = grid_size(cutouts)
fabric = Fabric(width, height)
for cutout in cutouts:
fabric.add_cutout(cutout)
overlaps = fabric.overlaps()
print(overlaps)
isolated_cutouts = fabric.isolated_cutouts()
print(isolated_cutouts)
| 23.635593
| 75
| 0.562567
|
794f13bbfb6b10ca07f1f3a1e6b2aca8859eb39e
| 132,771
|
py
|
Python
|
pandas/core/indexes/multi.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-12-07T13:37:31.000Z
|
2021-12-07T13:37:31.000Z
|
pandas/core/indexes/multi.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/multi.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from functools import wraps
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Hashable,
Iterable,
List,
Sequence,
Tuple,
cast,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
index as libindex,
lib,
)
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import (
AnyArrayLike,
DtypeObj,
Scalar,
Shape,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
InvalidIndexError,
PerformanceWarning,
UnsortedIndexError,
)
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import (
array_equivalent,
isna,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
ensure_index,
get_unanimous_names,
)
from pandas.core.indexes.frozen import FrozenList
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import (
CategoricalIndex,
DataFrame,
Series,
)
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
{"klass": "MultiIndex", "target_klass": "MultiIndex or list of tuples"}
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
def names_compat(meth):
"""
A decorator to allow either `name` or `names` keyword but not both.
This makes it easier to share code with base class.
"""
@wraps(meth)
def new_meth(self_or_cls, *args, **kwargs):
if "name" in kwargs and "names" in kwargs:
raise TypeError("Can only provide one of `names` and `name`")
elif "name" in kwargs:
kwargs["names"] = kwargs.pop("name")
return meth(self_or_cls, *args, **kwargs)
return new_meth
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`__
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_hidden_attrs = Index._hidden_attrs | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
sortorder: int | None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(cls)
result._cache = {}
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
result._reset_identity()
return result
def _validate_codes(self, level: list, code: list):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(self, codes: list | None = None, levels: list | None = None):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > _lexsort_depth(self.codes, self.nlevels):
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> MultiIndex:
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return cls(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
@names_compat
def from_tuples(
cls,
tuples: Iterable[tuple[Hashable, ...]],
sortorder: int | None = None,
names: Sequence[Hashable] | None = None,
) -> MultiIndex:
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
tuples = cast(Collection[Tuple[Hashable, ...]], tuples)
arrays: list[Sequence[Hashable]]
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = np.asarray(tuples._values)
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrs = zip(*tuples)
arrays = cast(List[Sequence[Hashable]], arrs)
return cls.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(
cls, iterables, sortorder=None, names=lib.no_default
) -> MultiIndex:
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
return cls(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df: DataFrame, sortorder=None, names=None) -> MultiIndex:
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@cache_readonly
def _values(self) -> np.ndarray:
# We override here, since our parent uses _data, which we don't use.
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals.dtype):
vals = cast("CategoricalIndex", vals)
vals = vals._data._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or isinstance(
vals, (ABCDatetimeIndex, ABCTimedeltaIndex)
):
vals = vals.astype(object)
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "Index")
vals = np.array(vals, copy=False) # type: ignore[assignment]
values.append(vals)
arr = lib.fast_zip(values)
return arr
@property
def values(self) -> np.ndarray:
return self._values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@cache_readonly
def dtypes(self) -> Series:
"""
Return the dtypes as a Series for the underlying MultiIndex
"""
from pandas import Series
return Series(
{
f"level_{idx}" if level.name is None else level.name: level.dtype
for idx, level in enumerate(self.levels)
}
)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self) -> FrozenList:
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [x._rename(name=name) for x, name in zip(self._levels, self._names)]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self,
levels,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._view() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels_list = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view()
new_levels = FrozenList(new_levels_list)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._reset_cache()
def set_levels(
self, levels, level=None, inplace=None, verify_integrity: bool = True
):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [
... (1, "one"),
... (1, "two"),
... (2, "one"),
... (2, "two"),
... (3, "one"),
... (3, "two")
... ],
... names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two'),
(3, 'one'),
(3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
level, levels = _require_listlike(level, levels, "Levels")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.nlevels
3
"""
return len(self._levels)
@property
def levshape(self) -> Shape:
"""
A tuple with the length of each level.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a'], ['b'], ['c']])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.levshape
(1, 1, 1)
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self,
codes,
level=None,
copy: bool = False,
validate: bool = True,
verify_integrity: bool = False,
) -> None:
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes_list = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes_list[lev_num] = _coerce_indexer_frozen(
level_codes, lev, copy=copy
)
new_codes = FrozenList(new_codes_list)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._reset_cache()
def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = True):
"""
Set new codes on MultiIndex. Defaults to returning new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
.. deprecated:: 1.2.0
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if inplace is not None:
warnings.warn(
"inplace is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
else:
inplace = False
level, codes = _require_listlike(level, codes, "Codes")
if inplace:
idx = self
else:
idx = self._view()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(level) + 1 for level in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self) -> Callable[..., MultiIndex]:
return type(self).from_tuples
@doc(Index._shallow_copy)
def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex:
names = name if name is not lib.no_default else self.names
return type(self).from_tuples(values, sortorder=None, names=names)
def _view(self) -> MultiIndex:
result = type(self)(
levels=self.levels,
codes=self.codes,
sortorder=self.sortorder,
names=self.names,
verify_integrity=False,
)
result._cache = self._cache.copy()
result._cache.pop("levels", None) # GH32669
return result
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
.. deprecated:: 1.2.0
levels : sequence, optional
.. deprecated:: 1.2.0
codes : sequence, optional
.. deprecated:: 1.2.0
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if levels is not None:
warnings.warn(
"parameter levels is deprecated and will be removed in a future "
"version. Use the set_levels method instead.",
FutureWarning,
stacklevel=2,
)
if codes is not None:
warnings.warn(
"parameter codes is deprecated and will be removed in a future "
"version. Use the set_codes method instead.",
FutureWarning,
stacklevel=2,
)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
levels = levels if levels is not None else self.levels
codes = codes if codes is not None else self.codes
new_index = type(self)(
levels=levels,
codes=codes,
sortorder=self.sortorder,
names=names,
verify_integrity=False,
)
new_index._cache = self._cache.copy()
new_index._cache.pop("levels", None) # GH32669
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
""" return a boolean if we need a qualified .info display """
def f(level):
return "mixed" in level or "string" in level or "unicode" in level
return any(f(level) for level in self._inferred_type_levels)
@doc(Index.memory_usage)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None) -> str:
"""
Return the formatted data as a unicode string
"""
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level_strs = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level_strs)
# numpy 1.21 deprecated implicit string casting
level_strs = level_strs.astype(str)
level_strs = np.append(level_strs, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level_strs)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi._values
def format(
self,
name: bool | None = None,
formatter: Callable | None = None,
na_rep: str | None = None,
names: bool = False,
space: int = 2,
sparsify=None,
adjoin: bool = True,
) -> list:
if name is not None:
names = name
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_nd(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, lev_name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(lev_name, escape_chars=("\t", "\r", "\n"))
if lev_name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547 use value of sparsify as sentinel if it's "Falsey"
assert isinstance(sparsify, bool) or sparsify is lib.no_default
if sparsify in [False, lib.no_default]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = sparsify_labels(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import get_adjustment
adj = get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self) -> FrozenList:
return FrozenList(self._names)
def _set_names(self, names, level=None, validate: bool = True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : bool, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
# error: Cannot determine type of '__setitem__'
self._names[lev] = name # type: ignore[has-type]
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names,
fget=_get_names,
doc="""
Names of levels in MultiIndex.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z'])
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.names
FrozenList(['x', 'y', 'z'])
""",
)
# --------------------------------------------------------------------
@doc(Index._get_grouper_for_level)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if any(-1 in code for code in self.codes):
return False
if all(level.is_monotonic for level in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i)._values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self._values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self) -> list[str]:
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@doc(Index.duplicated)
def duplicated(self, keep="first") -> np.ndarray:
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
# error: Cannot override final attribute "_duplicated"
# (previously declared in base class "IndexOpsMixin")
_duplicated = duplicated # type: ignore[misc]
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@doc(Index.dropna)
def dropna(self, how: str = "any") -> MultiIndex:
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.set_codes(codes=new_codes)
def _get_level_values(self, level: int, unique: bool = False) -> Index:
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int
unique : bool, default False
if True, drop duplicated values
Returns
-------
Index
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level.
Length of returned vector is equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@doc(Index.unique)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def to_frame(self, index: bool = True, name=None) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([['a', 'b'], ['c', 'd']])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=['x', 'y'])
>>> df
x y
a c a c
b d b d
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self) -> Index:
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
See Also
--------
MultiIndex.from_tuples : Convert flat index back to MultiIndex.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self._values, tupleize_cols=False)
@property
def _is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._is_lexsorted()
def _is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
Examples
--------
In the below examples, the first level of the MultiIndex is sorted because
a<b<c, so there is no need to look at the next level.
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted()
True
In case there is a tie, the lexicographical sorting looks
at the next level of the MultiIndex.
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted()
False
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted()
True
>>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'],
... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted()
False
"""
return self._lexsort_depth == self.nlevels
@property
def lexsort_depth(self):
warnings.warn(
"MultiIndex.is_lexsorted is deprecated as a public function, "
"users should use MultiIndex.is_monotonic_increasing instead.",
FutureWarning,
stacklevel=2,
)
return self._lexsort_depth
@cache_readonly
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
if self.sortorder is not None:
return self.sortorder
return _lexsort_depth(self.codes, self.nlevels)
def _sort_levels_monotonic(self) -> MultiIndex:
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self._is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_platform_int(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_nd(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self) -> MultiIndex:
"""
Create new MultiIndex from current that removes unused levels.
Unused level(s) means levels that are not expressed in the
labels. The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will
also be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
if lev.isna().any() and len(uniques) == len(lev):
break
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = {
"levels": list(self.levels),
"codes": list(self.codes),
"sortorder": self.sortorder,
"names": list(self.names),
}
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key, warn_float=True)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
# in general cannot be sure whether the result will be sorted
sortorder = None
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
elif isinstance(key, slice):
if key.step is None or key.step > 0:
sortorder = self.sortorder
elif isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
sortorder = None
if slobj.step is None or slobj.step > 0:
sortorder = self.sortorder
new_codes = [level_codes[slobj] for level_codes in self.codes]
return type(self)(
levels=self.levels,
codes=new_codes,
names=self._names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(
self: MultiIndex,
indices,
axis: int = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> MultiIndex:
nv.validate_take((), kwargs)
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices)
na_value = -1
taken = [lab.take(indices) for lab in self.codes]
if allow_fill:
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self._values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats: int, axis=None) -> MultiIndex:
nv.validate_repeat((), {"axis": axis})
# error: Incompatible types in assignment (expression has type "ndarray",
# variable has type "int")
repeats = ensure_platform_int(repeats) # type: ignore[assignment]
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples when level is not specified
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=np.dtype("object"))
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if self._lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise") -> MultiIndex:
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
# If nan should be dropped it will equal -1 here. We have to check which values
# are not nan and equal -1, this means they are missing in the index
nan_codes = isna(codes)
values[(np.equal(nan_codes, False)) & (values == -1)] = -2
if index.shape[0] == self.shape[0]:
values[np.equal(nan_codes, True)] = -2
not_found = codes[values == -2]
if len(not_found) != 0 and errors != "ignore":
raise KeyError(f"labels {not_found} not found in level")
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1) -> MultiIndex:
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order) -> MultiIndex:
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=['x', 'y'])
>>> mi
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.reorder_levels(order=[1, 0])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
>>> mi.reorder_levels(order=['y', 'x'])
MultiIndex([(3, 1),
(4, 2)],
names=['y', 'x'])
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self) -> list[Categorical]:
"""
we are categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(
self, level=0, ascending: bool = True, sort_remaining: bool = True
) -> tuple[MultiIndex, np.ndarray]:
"""
Sort MultiIndex at the requested level.
The result will respect the original ordering of the associated
factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([[0, 0], [2, 1]])
>>> mi
MultiIndex([(0, 2),
(0, 1)],
)
>>> mi.sortlevel()
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(sort_remaining=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
>>> mi.sortlevel(1)
(MultiIndex([(0, 1),
(0, 2)],
), array([1, 0]))
>>> mi.sortlevel(1, ascending=False)
(MultiIndex([(0, 2),
(0, 1)],
), array([0, 1]))
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(
self, target, method=None, level=None, limit=None, tolerance=None
) -> tuple[MultiIndex, np.ndarray | None]:
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase.ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def _check_indexing_error(self, key):
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
def _should_fallback_to_positional(self) -> bool:
"""
Should integer key(s) be treated as positional?
"""
# GH#33355
return self.levels[0]._should_fallback_to_positional()
def _get_values_for_loc(self, series: Series, loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
if len(new_values) == 1 and not self.nlevels > 1:
# If more than one level left, we can not return a scalar
return new_values[0]
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = (key,) + (slice(None),) * (len(self.levels) - 1)
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# returned ndarray is np.intp
# empty indexer
if not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self._values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
# TODO: explicitly raise here? we only have one test that
# gets here, and it is checking that we raise with method="nearest"
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
# TODO: get_indexer_with_fill docstring says values must be _sorted_
# but that doesn't appear to be enforced
indexer = self._engine.get_indexer_with_fill(
target=target._values, values=self._values, method=method, limit=limit
)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target._values)
# Note: we only get here (in extant tests at least) with
# target.nlevels == self.nlevels
return ensure_platform_int(indexer)
def get_slice_bound(
self, label: Hashable | Sequence[Hashable], side: str, kind: str
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left", kind="loc")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right", kind="loc")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self._lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self._lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if isinstance(idx, slice) and k < n - 1:
# Get start and end value from slice, necessary when a non-integer
# interval is given as input GH#37707
start = idx.start
end = idx.stop
elif k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
elif isinstance(idx, slice):
idx = idx.start
return start + section.searchsorted(idx, side=side)
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels.
The location is returned as an integer/slice or boolean
mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
hash(key)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self._lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get location and sliced index for requested label(s)/level(s).
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
if not isinstance(level, (list, tuple)):
level = self._get_level_number(level)
else:
level = [self._get_level_number(lev) for lev in level]
return self._get_loc_level(key, level=level, drop_level=drop_level)
def _get_loc_level(self, key, level: int | list[int] = 0, drop_level: bool = True):
"""
get_loc_level but with `level` known to be positional, not name-based.
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludge around
orig_index = new_index = self[indexer]
for i in sorted(levels, reverse=True):
try:
new_index = new_index._drop_level_numbers([i])
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self._get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level: int = 0, indexer=None):
# `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
if step is not None and step < 0:
# Switch elements for negative step size
start, stop = stop - 1, start - 1
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.asarray(m) # type: ignore[assignment]
else:
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
m = np.zeros(len(codes), dtype=bool) # type: ignore[assignment]
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
elif isinstance(start, slice):
stop = len(level_index)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self._lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self._lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == idx, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
if isinstance(idx, slice):
start = idx.start
end = idx.stop
else:
start = level_codes.searchsorted(idx, side="left")
end = level_codes.searchsorted(idx, side="right")
if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(start, end)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self._lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self._lexsort_depth}"
)
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r) -> Int64Index:
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr: Index | None, indexer: Index | None, key) -> Index:
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
indexer_intersection = indexer.intersection(idxr)
if indexer_intersection.empty and not idxr.empty and not indexer.empty:
raise KeyError(key)
return indexer_intersection
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(
_convert_to_indexer(k), indexer=indexer, key=seq
)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers: Int64Index | None = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = (idxrs if indexers is None else indexers).union(
idxrs, sort=False
)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer, key=seq)
else:
# no matches we are done
return np.array([], dtype=np.int64)
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer, key=seq)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
key=seq,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
key=seq,
)
# empty indexer
if indexer is None:
return np.array([], dtype=np.int64)
assert isinstance(indexer, Int64Index), type(indexer)
indexer = self._reorder_indexer(seq, indexer)
return indexer._values
# --------------------------------------------------------------------
def _reorder_indexer(
self,
seq: tuple[Scalar | Iterable | AnyArrayLike, ...],
indexer: Int64Index,
) -> Int64Index:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self._is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
elif isinstance(k, slice) and k.step is not None and k.step < 0:
need_sort = True
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: tuple[np.ndarray, ...] = ()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if is_scalar(k):
# GH#34603 we want to treat a scalar the same as an all equal list
k = [k]
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
elif isinstance(k, slice) and k.step is not None and k.step < 0:
new_order = np.arange(n)[k][indexer]
elif isinstance(k, slice) and k.start is None and k.stop is None:
# slice(None) should not determine order GH#31330
new_order = np.ones((n,))[indexer]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None) -> MultiIndex:
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=self._names,
verify_integrity=False,
)
def equals(self, other: object) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if len(self) != len(other):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not self._should_compare(other):
# object Index or Categorical[object] may contain tuples
return False
return array_equivalent(self._values, other._values)
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
other_codes = other.codes[i]
self_mask = self_codes == -1
other_mask = other_codes == -1
if not np.array_equal(self_mask, other_mask):
return False
self_codes = self_codes[~self_mask]
self_values = self.levels[i]._values.take(self_codes)
other_codes = other_codes[~other_mask]
other_values = other.levels[i]._values.take(other_codes)
# since we use NaT both datetime64 and timedelta64 we can have a
# situation where a level is typed say timedelta64 in self (IOW it
# has other values than NaT) but types datetime64 in other (where
# its all NaT) but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other: MultiIndex) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def _union(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
# We could get here with CategoricalIndex other
rvals = other._values.astype(object, copy=False)
uniq_tuples = lib.fast_unique_multiple([self._values, rvals], sort=sort)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return is_object_dtype(dtype)
def _get_reconciled_name_object(self, other) -> MultiIndex:
"""
If the result of a set operation will be self,
return self, unless the names change, in which
case make a shallow copy of self.
"""
names = self._maybe_match_names(other)
if self.names != names:
return self.rename(names)
return self
def _maybe_match_names(self, other):
"""
Try to find common names to attach to the result of an operation between
a and b. Return a consensus list of names if they match at least partly
or list of None if they have completely different names.
"""
if len(self.names) != len(other.names):
return [None] * len(self.names)
names = []
for a_name, b_name in zip(self.names, other.names):
if a_name == b_name:
names.append(a_name)
else:
# TODO: what if they both have np.nan for their names?
names.append(None)
return names
def _intersection(self, other, sort=False) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
other = other.astype(object, copy=False)
uniq_tuples = None # flag whether _inner_indexer was successful
if self.is_monotonic and other.is_monotonic:
try:
inner_tuples = self._inner_indexer(other)[0]
sort = False # inner_tuples is already sorted
except TypeError:
pass
else:
uniq_tuples = algos.unique(inner_tuples)
if uniq_tuples is None:
left_unique = self.drop_duplicates()
indexer = left_unique.get_indexer(other.drop_duplicates())
uniq_tuples = left_unique.take(np.sort(indexer[indexer != -1]))
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def _difference(self, other, sort) -> MultiIndex:
other, result_names = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this._values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not isinstance(other, Index):
if len(other) == 0:
return self[:0], self.names
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
else:
result_names = get_unanimous_names(self, other)
return other, result_names
def symmetric_difference(self, other, result_name=None, sort=None):
# On equal symmetric_difference MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
tups = Index.symmetric_difference(self, other, result_name, sort)
if len(tups) == 0:
return type(self)(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
names=tups.name,
)
return type(self).from_tuples(tups, names=tups.name)
# --------------------------------------------------------------------
@doc(Index.astype)
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
"Setting a MultiIndex dtype to anything other than object "
"is not supported"
)
elif copy is True:
return self._view()
return self
def _validate_fill_value(self, item):
if not isinstance(item, tuple):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
return item
def insert(self, loc: int, item) -> MultiIndex:
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
item = self._validate_fill_value(item)
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc) -> MultiIndex:
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
@doc(Index.isin)
def isin(self, values, level=None) -> np.ndarray:
if level is None:
values = MultiIndex.from_tuples(values, names=self.names)._values
return algos.isin(self._values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
# ---------------------------------------------------------------
# Arithmetic/Numeric Methods - Disabled
__add__ = make_invalid_op("__add__")
__radd__ = make_invalid_op("__radd__")
__iadd__ = make_invalid_op("__iadd__")
__sub__ = make_invalid_op("__sub__")
__rsub__ = make_invalid_op("__rsub__")
__isub__ = make_invalid_op("__isub__")
__pow__ = make_invalid_op("__pow__")
__rpow__ = make_invalid_op("__rpow__")
__mul__ = make_invalid_op("__mul__")
__rmul__ = make_invalid_op("__rmul__")
__floordiv__ = make_invalid_op("__floordiv__")
__rfloordiv__ = make_invalid_op("__rfloordiv__")
__truediv__ = make_invalid_op("__truediv__")
__rtruediv__ = make_invalid_op("__rtruediv__")
__mod__ = make_invalid_op("__mod__")
__rmod__ = make_invalid_op("__rmod__")
__divmod__ = make_invalid_op("__divmod__")
__rdivmod__ = make_invalid_op("__rdivmod__")
# Unary methods disabled
__neg__ = make_invalid_op("__neg__")
__pos__ = make_invalid_op("__pos__")
__abs__ = make_invalid_op("__abs__")
__inv__ = make_invalid_op("__inv__")
def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int:
"""Count depth (up to a maximum of `nlevels`) with which codes are lexsorted."""
int64_codes = [ensure_int64(level_codes) for level_codes in codes]
for k in range(nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def sparsify_labels(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index: Index, key) -> Index:
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index._drop_level_numbers([0])
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index._drop_level_numbers([0])
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
def _require_listlike(level, arr, arrname: str):
"""
Ensure that level is either None or listlike, and arr is list-of-listlike.
"""
if level is not None and not is_list_like(level):
if not is_list_like(arr):
raise TypeError(f"{arrname} must be list-like")
if is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list-like")
level = [level]
arr = [arr]
elif level is None or is_list_like(level):
if not is_list_like(arr) or not is_list_like(arr[0]):
raise TypeError(f"{arrname} must be list of lists-like")
return level, arr
| 33.775375
| 88
| 0.534808
|
794f1436947ca7836b9de996790b97c40a5b88e3
| 2,060
|
py
|
Python
|
wwpdb/utils/nmr/CifToNmrStar.py
|
wwPDB/py-wwpdb_utils_nmr
|
531f1cf5dba0c1fb4e5d710d740c3ee05d34dafb
|
[
"Apache-2.0"
] | null | null | null |
wwpdb/utils/nmr/CifToNmrStar.py
|
wwPDB/py-wwpdb_utils_nmr
|
531f1cf5dba0c1fb4e5d710d740c3ee05d34dafb
|
[
"Apache-2.0"
] | null | null | null |
wwpdb/utils/nmr/CifToNmrStar.py
|
wwPDB/py-wwpdb_utils_nmr
|
531f1cf5dba0c1fb4e5d710d740c3ee05d34dafb
|
[
"Apache-2.0"
] | 1
|
2021-06-21T10:46:22.000Z
|
2021-06-21T10:46:22.000Z
|
##
# File: CifToNmrStar.py
# Date: 19-Jul-2021
#
# Updates:
# 13-Oct-2021 M. Yokochi - code revision according to PEP8 using Pylint (DAOTHER-7389, issue #5)
##
""" Wrapper class for CIF to NMR-STAR converter.
@author: Masashi Yokochi
"""
import sys
import os
import os.path
import pynmrstar
from wwpdb.utils.nmr.io.mmCIFUtil import mmCIFUtil
class CifToNmrStar:
""" Simple CIF to NMR-STAR converter.
"""
def __init__(self, log=sys.stderr):
self.__lfh = log
def convert(self, cifPath=None, strPath=None):
""" Convert CIF to NMR-STAR for re-upload without CS data
"""
if cifPath is None or strPath is None:
return False
try:
cifObj = mmCIFUtil(filePath=cifPath)
block_name_list = cifObj.GetBlockIDList()
strObj = pynmrstar.Entry.from_scratch(os.path.basename(cifPath))
for block_name in block_name_list:
sf = pynmrstar.Saveframe.from_scratch(block_name)
dict_list = cifObj.GetDataBlock(block_name)
has_sf_category = False
for category, itVals in dict_list.items():
if not has_sf_category:
sf.set_tag_prefix(category)
for item, value in zip(itVals['Items'], itVals['Values'][0]):
sf.add_tag(item, block_name if item == 'Sf_framecode' else value)
has_sf_category = True
else:
lp = pynmrstar.Loop.from_scratch(category)
for item in itVals['Items']:
lp.add_tag(item)
for row in itVals['Values']:
lp.add_data(row)
sf.add_loop(lp)
strObj.add_saveframe(sf)
strObj.write_to_file(strPath, skip_empty_tags=False)
except Exception as e:
self.__lfh.write(f"+ERROR- CifToNmrStar.convert() {str(e)}\n")
return False
| 26.075949
| 97
| 0.558252
|
794f14567a0eba710792faf7f2218156a6ac21a8
| 397
|
py
|
Python
|
openai/deepq/__init__.py
|
niksaz/dota2-expert-demo
|
b46ce6fc2c9223f07d91db78633c7d20c36f4653
|
[
"MIT"
] | 17
|
2018-12-26T10:18:35.000Z
|
2021-11-04T02:49:29.000Z
|
openai/deepq/__init__.py
|
niksaz/dota2-expert-demo
|
b46ce6fc2c9223f07d91db78633c7d20c36f4653
|
[
"MIT"
] | null | null | null |
openai/deepq/__init__.py
|
niksaz/dota2-expert-demo
|
b46ce6fc2c9223f07d91db78633c7d20c36f4653
|
[
"MIT"
] | 5
|
2019-01-26T06:10:15.000Z
|
2021-11-04T02:49:30.000Z
|
from openai.deepq import models # noqa
from openai.deepq.build_graph import build_act, build_train # noqa
from openai.deepq.deepq import learn, load_act # noqa
from openai.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
def wrap_atari_dqn(env):
from baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
| 44.111111
| 84
| 0.811083
|
794f14a6162211e376244e0ac44ddae2d98cb7f7
| 1,239
|
py
|
Python
|
agents/random_agents_test.py
|
lavanyashukla/ml-fairness-gym
|
fb68b379d4284b7af746b2a051d518b3bd45ab00
|
[
"Apache-2.0"
] | null | null | null |
agents/random_agents_test.py
|
lavanyashukla/ml-fairness-gym
|
fb68b379d4284b7af746b2a051d518b3bd45ab00
|
[
"Apache-2.0"
] | null | null | null |
agents/random_agents_test.py
|
lavanyashukla/ml-fairness-gym
|
fb68b379d4284b7af746b2a051d518b3bd45ab00
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for fairness_gym.agents.random_agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import test_util
from agents import random_agents
class RandomAgentsTest(absltest.TestCase):
def test_can_run_with_env(self):
env = test_util.DummyEnv()
agent = random_agents.RandomAgent(
action_space=env.action_space,
observation_space=env.observation_space,
reward_fn=None)
test_util.run_test_simulation(env=env, agent=agent)
if __name__ == '__main__':
absltest.main()
| 30.219512
| 74
| 0.76594
|
794f14c189536db6d356f14982383559ef5589e4
| 144
|
py
|
Python
|
queries/query_organization/query.py
|
akarapun/elearning
|
fe116d5815925269819061ea183cbfdb773844cf
|
[
"MIT"
] | 1
|
2020-03-14T11:00:14.000Z
|
2020-03-14T11:00:14.000Z
|
queries/query_organization/query.py
|
akarapun/elearning
|
fe116d5815925269819061ea183cbfdb773844cf
|
[
"MIT"
] | null | null | null |
queries/query_organization/query.py
|
akarapun/elearning
|
fe116d5815925269819061ea183cbfdb773844cf
|
[
"MIT"
] | null | null | null |
import graphene
from query_organization.getAllOrg import GetAllOrg
class OrganizationQuery(
GetAllOrg,
graphene.ObjectType):
pass
| 16
| 50
| 0.784722
|
794f15031a3947f8a56bb058759a9a49fc085b14
| 393
|
py
|
Python
|
website1/wsgi.py
|
sandykaka/vroombaby
|
61222390c24c44b50cf2771a38bbfb243f5ecb11
|
[
"MIT"
] | null | null | null |
website1/wsgi.py
|
sandykaka/vroombaby
|
61222390c24c44b50cf2771a38bbfb243f5ecb11
|
[
"MIT"
] | 3
|
2020-02-11T23:08:19.000Z
|
2021-06-10T20:48:00.000Z
|
website1/wsgi.py
|
sandykaka/vroombaby
|
61222390c24c44b50cf2771a38bbfb243f5ecb11
|
[
"MIT"
] | null | null | null |
"""
WSGI config for website1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website1.settings')
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
794f15d74ee0465affc367ea8d74bc8d20a0a7f6
| 29
|
py
|
Python
|
tests/test_iex.py
|
lkire/share-dash
|
4c94ea8e72c88b3d3976247ef9718560b63da67d
|
[
"MIT"
] | null | null | null |
tests/test_iex.py
|
lkire/share-dash
|
4c94ea8e72c88b3d3976247ef9718560b63da67d
|
[
"MIT"
] | null | null | null |
tests/test_iex.py
|
lkire/share-dash
|
4c94ea8e72c88b3d3976247ef9718560b63da67d
|
[
"MIT"
] | null | null | null |
from dashboards.iex import *
| 14.5
| 28
| 0.793103
|
794f164f98e5ad2949c2cca6bdcbaba9aff82023
| 6,392
|
py
|
Python
|
asposewordscloud/models/requests/delete_table_cell_request.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/requests/delete_table_cell_request.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/requests/delete_table_cell_request.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="delete_table_cell_request.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
from six.moves.urllib.parse import quote
class DeleteTableCellRequest(object):
"""
Request model for delete_table_cell operation.
Initializes a new instance.
:param name The filename of the input document.
:param table_row_path The path to the table row in the document tree.
:param index Object index.
:param folder Original document folder.
:param storage Original document storage.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, name, table_row_path, index, folder=None, storage=None, load_encoding=None, password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.name = name
self.table_row_path = table_row_path
self.index = index
self.folder = folder
self.storage = storage
self.load_encoding = load_encoding
self.password = password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
def create_http_request(self, api_client):
# verify the required parameter 'name' is set
if self.name is None:
raise ValueError("Missing the required parameter `name` when calling `delete_table_cell`") # noqa: E501
# verify the required parameter 'table_row_path' is set
if self.table_row_path is None:
raise ValueError("Missing the required parameter `table_row_path` when calling `delete_table_cell`") # noqa: E501
# verify the required parameter 'index' is set
if self.index is None:
raise ValueError("Missing the required parameter `index` when calling `delete_table_cell`") # noqa: E501
path = '/v4.0/words/{name}/{tableRowPath}/cells/{index}'
path_params = {}
if self.name is not None:
path_params['name'] = self.name # noqa: E501
else:
path_params['name'] = '' # noqa: E501
if self.table_row_path is not None:
path_params['tableRowPath'] = self.table_row_path # noqa: E501
else:
path_params['tableRowPath'] = '' # noqa: E501
if self.index is not None:
path_params['index'] = self.index # noqa: E501
else:
path_params['index'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.folder is not None:
query_params.append(('folder', self.folder)) # noqa: E501
if self.storage is not None:
query_params.append(('storage', self.storage)) # noqa: E501
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
if self.dest_file_name is not None:
query_params.append(('destFileName', self.dest_file_name)) # noqa: E501
if self.revision_author is not None:
query_params.append(('revisionAuthor', self.revision_author)) # noqa: E501
if self.revision_date_time is not None:
query_params.append(('revisionDateTime', self.revision_date_time)) # noqa: E501
header_params = {}
form_params = []
body_params = None
return {
"method": "DELETE",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": None # noqa: E501
}
def get_response_type(self):
return None # noqa: E501
| 48.793893
| 255
| 0.649718
|
794f194ecc5c8f0324cab6af5afd05837cda6521
| 11,749
|
py
|
Python
|
hihope_neptune-oh_hid/00_src/v0.1/test/xts/tools/lite/build/utils.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1
|
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/test/xts/tools/lite/build/utils.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/test/xts/tools/lite/build/utils.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Huawei Device Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import fnmatch
import sys
import argparse
import json
import platform
import subprocess
import distutils.dir_util as dir_util
import distutils.file_util as file_util
from distutils.errors import DistutilsError
# all sub system list, must be lowercase.
_SUB_SYSTEM_LIST = [
"kernel",
"hiviewdfx",
"communication",
"security",
"update",
"sstsutils",
"utils",
"uikit",
"multimedia",
"hdf",
"appexecfwk",
"distributed_schedule",
"startup",
"sensors",
"sample",
"iot_hardware",
"open_posix_testsuite",
"graphic",
"ace",
"applications",
"ai",
"global",
"telephony",
"dcts"
]
_NO_FILTE_SUB_SYSTEM_LIST = [
"kernel",
"open_posix_testsuite",
"sample",
"telephony",
"dcts"
]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--method_name', help='', required=True)
parser.add_argument('--arguments', help='',
required=True) # format key=value#key=value
args = parser.parse_args()
this_module = sys.modules[__name__]
method = getattr(this_module, args.method_name)
arguments = {}
for argument in args.arguments.split("#"):
key_value = argument.strip().split("=")
if len(key_value) != 2:
raise ValueError(
"The arguments' format is 'key=value#key=value'. Wrong format:"
" " + argument)
arguments.setdefault(key_value[0].strip(), key_value[1].strip())
method(**arguments)
return 0
def read_file(input_file):
if not os.path.exists(input_file):
return ""
with open(input_file, 'r') as input_f:
content = input_f.read().strip()
return content
def write_file(output_file, content, append):
file_dir = os.path.dirname(os.path.abspath(output_file))
if not os.path.exists(file_dir):
os.makedirs(file_dir)
mode = 'a+' if append else 'w'
with open(output_file, mode) as output_f:
output_f.write("%s\n" % content)
def copy_file(output, sources="", source_dirs="", to_dir=True):
"""
copy source files or source dir to output.
if sources is not empty, the output can be file(will be created
automatically)
or directory(must be exist).
:param output: If source_dirs is not empty, output must be directory.
:param sources: source files is separated by dot
:param source_dirs: source directory is separated by dot
:param to_dir: output is directory or not
:return:
"""
if not sources and not source_dirs:
raise Exception(
"sources or source_dirs parameter must be specified one")
_output = output.strip()
_sources = sources.strip()
_source_dirs = source_dirs.strip()
_parent_output = os.path.dirname(_output)
try:
if to_dir and not os.path.exists(_output):
os.makedirs(_output)
if not to_dir and not os.path.exists(_parent_output):
os.makedirs(_parent_output)
except OSError:
if not os.path.isdir(_output):
raise
try:
if _sources:
_copy_files(_sources.split(","), _output)
if _source_dirs:
_copy_dir(_source_dirs.split(","), _output)
except DistutilsError:
print("ignore file exist error")
return 0
def _copy_files(sources, output):
copy_set = set()
for source_file in sources:
source_file = source_file.strip()
if os.path.isfile(source_file) and os.path.exists(source_file):
# if same file name exist, add dir path
if os.path.basename(source_file) in copy_set:
new_output = os.path.join(output, os.path.dirname(source_file).
split(os.sep)[-1])
if not os.path.exists(new_output):
os.makedirs(new_output)
file_util.copy_file(source_file, new_output)
else:
file_util.copy_file(source_file, output)
copy_set.add(os.path.basename(source_file))
def _copy_dir(sources, output):
for source_file in sources:
source_file = source_file.strip()
if os.path.isdir(source_file):
dir_util.copy_tree(source_file, output)
def gen_suite_out(suite_output_prefix, suite_names, out_suffix):
outputs = []
_suite_output_prefix = suite_output_prefix.strip()
_dirname_suffix = out_suffix.strip().rstrip(os.sep)
for suite in suite_names.split(","):
path = "%s%s/%s" % (
_suite_output_prefix, suite.strip(), _dirname_suffix)
outputs.append(path)
print(path)
return outputs
def get_subsystem_name(path):
subsystem_name = ""
for subsystem in _SUB_SYSTEM_LIST:
subsystem_path = "/" + subsystem + "/"
_path = path.lower()
if subsystem_path in _path:
subsystem_name = subsystem
break
subsystem_path = "/" + subsystem + "_lite/"
if subsystem_path in _path:
subsystem_name = subsystem
break
sys.stdout.write(subsystem_name)
return subsystem_name
def get_modulename_by_buildtarget(module_list_file, build_target):
if not os.path.exists(module_list_file):
return ""
with open(module_list_file, "r") as module_file:
module_info_data = json.load(module_file)
for module in module_info_data:
if module_info_data[module]["build_target_name"] == build_target:
sys.stdout.write(module)
return module
return ""
def glob(path, filename_pattern):
files = []
for dir_path, _, files in os.walk(path):
for filename in fnmatch.filter(files, filename_pattern):
files.append(os.path.join(dir_path, filename))
return files
def filter_by_subsystem(testsuites, product_json):
product_info = {}
filtered_features = []
subsystem_names = set()
# parses product json to obtain all the subsystem name
if os.path.exists(product_json):
try:
with open(product_json, 'r') as product_info:
product_info = json.load(product_info)
except ValueError:
print("NO json object could be decoded.")
subsystem_info = product_info.get("subsystems")
for subsystem in subsystem_info:
subsystem_names.add(subsystem.get("subsystem"))
feature_list = testsuites.split(",")
for feature in feature_list:
# if subsytem name match
subsystem = get_subsystem_name_no_output(feature)
if subsystem in _NO_FILTE_SUB_SYSTEM_LIST or \
subsystem in subsystem_names:
filtered_features.append(feature)
print(feature)
return filtered_features
def get_subsystem_name_no_output(path):
subsystem_name = ""
for subsystem in _SUB_SYSTEM_LIST:
subsystem_path = "/" + subsystem
_path = path.lower()
if subsystem_path in _path:
subsystem_name = subsystem
break
subsystem_path = "/" + subsystem + "_lite"
if subsystem_path in _path:
subsystem_name = subsystem
break
return subsystem_name
def get_python_cmd():
major, _, _ = platform.python_version_tuple()
if major == "3":
return "python"
else:
return "python3"
def record_testmodule_info(build_target_name, module_name,
subsystem_name, suite_out_dir, same_file=False):
if not build_target_name or not subsystem_name:
print(
'build_target_name or subsystem_name of testmodule "%s" '
'is invalid!' % module_name)
return
if same_file:
module_info_list_file = os.path.join(suite_out_dir, 'module_info.json')
else:
module_info_list_file = os.path.join(suite_out_dir,
'{}_module_info.json'.format
(build_target_name))
module_info_data = {}
if os.path.exists(module_info_list_file):
try:
with open(module_info_list_file, 'r') as module_file:
module_info_data = json.load(module_file)
except ValueError:
print("NO json object could be decoded but continue")
module_info = {'subsystem': subsystem_name,
'build_target_name': build_target_name}
module_info_data[module_name] = module_info
with open(module_info_list_file, 'w') as out_file:
json.dump(module_info_data, out_file)
def record_test_component_info(out_dir, version):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
all_module_file = os.path.join(out_dir, 'module_info.json')
all_module_data = {}
for root, dirs, files in os.walk(out_dir):
for file in files:
if file.endswith("module_info.json"):
with open(os.path.join(root, file), 'r') as json_data:
module_data = json.load(json_data)
all_module_data.update(module_data)
os.remove(os.path.join(root, file))
with open(all_module_file, 'w') as out_file:
json.dump(all_module_data, out_file)
test_component_file = os.path.join(out_dir, 'test_component.json')
test_component_data = {'version': version, }
with open(test_component_file, 'w') as out_file:
json.dump(test_component_data, out_file)
def get_target_modules(all_features):
feature_list = []
if all_features:
for feature in all_features.split(","):
if feature:
feature_list.append(feature)
print(feature)
return feature_list
def cmd_popen(cmd):
proc = subprocess.Popen(cmd)
proc.wait()
ret_code = proc.returncode
if ret_code != 0:
raise Exception("{} failed, return code is {}".format(cmd, ret_code))
def build_js_hap(project_path, out_put_dir, hap_name):
if not check_env():
return
gradle_dir = os.path.join(project_path, "gradle")
os.chdir(gradle_dir)
build_clean = ["gradle", "clean"]
cmd_popen(build_clean)
build_cmd = ["gradle", "entry:packageDebugHap"]
cmd_popen(build_cmd)
gralde_output_dir = os.path.join(gradle_dir, "entry", "build", "outputs")
if os.path.exists(gralde_output_dir):
for root, _, files in os.walk(gralde_output_dir):
for file in files:
if file.endswith(".hap"):
file_util.copy_file(os.path.join(root, file),
os.path.join(out_put_dir.rstrip(','),
hap_name))
return
def check_env():
"""
check all the env for js hap build
return: return true if all env ready, otherwise return false
"""
env_list = ['OHOS_SDK_HOME', 'NODE_HOME', 'GRADLE_HOME']
for env in env_list:
if not os.environ.get(env):
print("the env {} not set, skip build!".format(env))
return False
else:
return True
if __name__ == '__main__':
sys.exit(main())
| 31.92663
| 79
| 0.631288
|
794f1a2d4558eba652b2f10da4b0068d53e926dc
| 15,614
|
py
|
Python
|
napari/plugins/io.py
|
noisysky/napari
|
6a3e11aa717e7928a0a5a3c7693577729a466ef1
|
[
"BSD-3-Clause"
] | null | null | null |
napari/plugins/io.py
|
noisysky/napari
|
6a3e11aa717e7928a0a5a3c7693577729a466ef1
|
[
"BSD-3-Clause"
] | null | null | null |
napari/plugins/io.py
|
noisysky/napari
|
6a3e11aa717e7928a0a5a3c7693577729a466ef1
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
import os
import pathlib
import warnings
from logging import getLogger
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, Tuple, Union
from napari_plugin_engine import HookImplementation, PluginCallError
from ..layers import Layer
from ..types import LayerData
from ..utils.misc import abspath_or_url
from ..utils.translations import trans
from . import _npe2, plugin_manager
logger = getLogger(__name__)
if TYPE_CHECKING:
from npe2.manifest.contributions import WriterContribution
def read_data_with_plugins(
path: Union[str, Sequence[str]],
plugin: Optional[str] = None,
) -> Tuple[Optional[List[LayerData]], Optional[HookImplementation]]:
"""Iterate reader hooks and return first non-None LayerData or None.
This function returns as soon as the path has been read successfully,
while catching any plugin exceptions, storing them for later retrieval,
providing useful error messages, and re-looping until either a read
operation was successful, or no valid readers were found.
Exceptions will be caught and stored as PluginErrors
(in plugins.exceptions.PLUGIN_ERRORS)
Parameters
----------
path : str
The path (file, directory, url) to open
plugin : str, optional
Name of a plugin to use. If provided, will force ``path`` to be read
with the specified ``plugin``. If the requested plugin cannot read
``path``, a PluginCallError will be raised.
Returns
-------
LayerData : list of tuples, or None
LayerData that can be passed to :func:`Viewer._add_layer_from_data()
<napari.components.viewer_model.ViewerModel._add_layer_from_data>`.
``LayerData`` is a list tuples, where each tuple is one of
``(data,)``, ``(data, meta)``, or ``(data, meta, layer_type)`` .
If no reader plugins were found (or they all failed), returns ``None``
Raises
------
PluginCallError
If ``plugin`` is specified but raises an Exception while reading.
"""
hookimpl: Optional[HookImplementation]
res = _npe2.read(path, plugin)
if res is not None:
_ld, hookimpl = res
return [] if _is_null_layer_sentinel(_ld) else _ld, hookimpl
hook_caller = plugin_manager.hook.napari_get_reader
path = abspath_or_url(path, must_exist=True)
if not plugin and isinstance(path, (str, pathlib.Path)):
extension = os.path.splitext(path)[-1]
plugin = plugin_manager.get_reader_for_extension(extension)
hookimpl = None
if plugin:
if plugin not in plugin_manager.plugins:
names = {i.plugin_name for i in hook_caller.get_hookimpls()}
raise ValueError(
trans._(
"There is no registered plugin named '{plugin}'.\nNames of plugins offering readers are: {names}",
deferred=True,
plugin=plugin,
names=names,
)
)
reader = hook_caller._call_plugin(plugin, path=path)
if not callable(reader):
raise ValueError(
trans._(
'Plugin {plugin!r} does not support file {path}',
deferred=True,
plugin=plugin,
path=path,
)
)
hookimpl = hook_caller.get_plugin_implementation(plugin)
layer_data = reader(path)
# if the reader returns a "null layer" sentinel indicating an empty
# file, return an empty list, otherwise return the result or None
if _is_null_layer_sentinel(layer_data):
return [], hookimpl
return layer_data or None, hookimpl
layer_data = None
result = hook_caller.call_with_result_obj(path=path)
reader = result.result # will raise exceptions if any occurred
try:
layer_data = reader(path) # try to read data
hookimpl = result.implementation
except Exception as exc:
raise PluginCallError(result.implementation, cause=exc)
if not layer_data:
# if layer_data is empty, it means no plugin could read path
# we just want to provide some useful feedback, which includes
# whether or not paths were passed to plugins as a list.
if isinstance(path, (tuple, list)):
message = trans._(
'No plugin found capable of reading [{repr_path}, ...] as stack.',
deferred=True,
repr_path=path[0],
)
else:
message = trans._(
'No plugin found capable of reading {repr_path}.',
deferred=True,
repr_path=repr(path),
)
# TODO: change to a warning notification in a later PR
raise ValueError(message)
# if the reader returns a "null layer" sentinel indicating an empty file,
# return an empty list, otherwise return the result or None
_data = [] if _is_null_layer_sentinel(layer_data) else layer_data or None
return _data, hookimpl
def save_layers(
path: str,
layers: List[Layer],
*,
plugin: Optional[str] = None,
_writer: Optional[WriterContribution] = None,
) -> List[str]:
"""Write list of layers or individual layer to a path using writer plugins.
If ``plugin`` is not provided and only one layer is passed, then we
directly call ``plugin_manager.hook.napari_write_<layer>()`` which
will loop through implementations and stop when the first one returns a
non-None result. The order in which implementations are called can be
changed with the hook ``bring_to_front`` method, for instance:
``plugin_manager.hook.napari_write_points.bring_to_front``
If ``plugin`` is not provided and multiple layers are passed, then
we call ``plugin_manager.hook.napari_get_writer()`` which loops through
plugins to find the first one that knows how to handle the combination of
layers and is able to write the file. If no plugins offer
``napari_get_writer`` for that combination of layers then the builtin
``napari_get_writer`` implementation will create a folder and call
``napari_write_<layer>`` for each layer using the ``layer.name`` variable
to modify the path such that the layers are written to unique files in the
folder.
If ``plugin`` is provided and a single layer is passed, then
we call the ``napari_write_<layer_type>`` for that plugin, and if it
fails we error.
If a ``plugin`` is provided and multiple layers are passed, then
we call we call ``napari_get_writer`` for that plugin, and if it
doesn’t return a WriterFunction we error, otherwise we call it and if
that fails if it we error.
Parameters
----------
path : str
A filepath, directory, or URL to open.
layers : List[layers.Layer]
List of layers to be saved. If only a single layer is passed then
we use the hook specification corresponding to its layer type,
``napari_write_<layer_type>``. If multiple layers are passed then we
use the ``napari_get_writer`` hook specification.
plugin : str, optional
Name of the plugin to use for saving. If None then all plugins
corresponding to appropriate hook specification will be looped
through to find the first one that can save the data.
Returns
-------
list of str
File paths of any files that were written.
"""
if len(layers) > 1:
written = _write_multiple_layers_with_plugins(
path, layers, plugin_name=plugin, _writer=_writer
)
elif len(layers) == 1:
_written = _write_single_layer_with_plugins(
path, layers[0], plugin_name=plugin, _writer=_writer
)
written = [_written] if _written else []
else:
written = []
if not written:
# if written is empty, it means no plugin could write the
# path/layers combination
# we just want to provide some useful feedback
warnings.warn(
trans._(
'No data written! There may be no plugins capable of writing these {length} layers to {path}.',
deferred=True,
length=len(layers),
path=path,
)
)
return written
def _is_null_layer_sentinel(layer_data: Any) -> bool:
"""Checks if the layer data returned from a reader function indicates an
empty file. The sentinel value used for this is ``[(None,)]``.
Parameters
----------
layer_data : LayerData
The layer data returned from a reader function to check
Returns
-------
bool
True, if the layer_data indicates an empty file, False otherwise
"""
return (
isinstance(layer_data, list)
and len(layer_data) == 1
and isinstance(layer_data[0], tuple)
and len(layer_data[0]) == 1
and layer_data[0][0] is None
)
def _write_multiple_layers_with_plugins(
path: str,
layers: List[Layer],
*,
plugin_name: Optional[str] = None,
_writer: Optional[WriterContribution] = None,
) -> List[str]:
"""Write data from multiple layers data with a plugin.
If a ``plugin_name`` is not provided we loop through plugins to find the
first one that knows how to handle the combination of layers and is able to
write the file. If no plugins offer ``napari_get_writer`` for that
combination of layers then the default ``napari_get_writer`` will create a
folder and call ``napari_write_<layer>`` for each layer using the
``layer.name`` variable to modify the path such that the layers are written
to unique files in the folder.
If a ``plugin_name`` is provided, then call ``napari_get_writer`` for that
plugin. If it doesn’t return a ``WriterFunction`` we error, otherwise we
call it and if that fails if it we error.
Exceptions will be caught and stored as PluginErrors
(in plugins.exceptions.PLUGIN_ERRORS)
Parameters
----------
path : str
The path (file, directory, url) to write.
layers : List of napari.layers.Layer
List of napari layers to write.
plugin_name : str, optional
If provided, force the plugin manager to use the ``napari_get_writer``
from the requested ``plugin_name``. If none is available, or if it is
incapable of handling the layers, this function will fail.
Returns
-------
list of str
A list of filenames, if any, that were written.
"""
# Try to use NPE2 first
written_paths = _npe2.write_layers(path, layers, plugin_name, _writer)
if written_paths:
return written_paths
logger.debug("Falling back to original plugin engine.")
layer_data = [layer.as_layer_data_tuple() for layer in layers]
layer_types = [ld[2] for ld in layer_data]
if not plugin_name and isinstance(path, (str, pathlib.Path)):
extension = os.path.splitext(path)[-1]
plugin_name = plugin_manager.get_writer_for_extension(extension)
hook_caller = plugin_manager.hook.napari_get_writer
path = abspath_or_url(path)
logger.debug(f"Writing to {path}. Hook caller: {hook_caller}")
if plugin_name:
# if plugin has been specified we just directly call napari_get_writer
# with that plugin_name.
if plugin_name not in plugin_manager.plugins:
names = {i.plugin_name for i in hook_caller.get_hookimpls()}
raise ValueError(
trans._(
"There is no registered plugin named '{plugin_name}'.\nNames of plugins offering writers are: {names}",
deferred=True,
plugin_name=plugin_name,
names=names,
)
)
implementation = hook_caller.get_plugin_implementation(plugin_name)
writer_function = hook_caller(
_plugin=plugin_name, path=path, layer_types=layer_types
)
else:
result = hook_caller.call_with_result_obj(
path=path, layer_types=layer_types, _return_impl=True
)
writer_function = result.result
implementation = result.implementation
if not callable(writer_function):
if plugin_name:
msg = trans._(
'Requested plugin "{plugin_name}" is not capable of writing this combination of layer types: {layer_types}',
deferred=True,
plugin_name=plugin_name,
layer_types=layer_types,
)
else:
msg = trans._(
'Unable to find plugin capable of writing this combination of layer types: {layer_types}',
deferred=True,
layer_types=layer_types,
)
raise ValueError(msg)
try:
return writer_function(abspath_or_url(path), layer_data)
except Exception as exc:
raise PluginCallError(implementation, cause=exc)
def _write_single_layer_with_plugins(
path: str,
layer: Layer,
*,
plugin_name: Optional[str] = None,
_writer: Optional[WriterContribution] = None,
) -> Optional[str]:
"""Write single layer data with a plugin.
If ``plugin_name`` is not provided then we just directly call
``plugin_manager.hook.napari_write_<layer>()`` which will loop through
implementations and stop when the first one returns a non-None result. The
order in which implementations are called can be changed with the
implementation sorter/disabler.
If ``plugin_name`` is provided, then we call the
``napari_write_<layer_type>`` for that plugin, and if it fails we error.
Exceptions will be caught and stored as PluginErrors
(in plugins.exceptions.PLUGIN_ERRORS)
Parameters
----------
path : str
The path (file, directory, url) to write.
layer : napari.layers.Layer
Layer to be written out.
plugin_name : str, optional
Name of the plugin to write data with. If None then all plugins
corresponding to appropriate hook specification will be looped
through to find the first one that can write the data.
Returns
-------
path : str or None
If data is successfully written, return the ``path`` that was written.
Otherwise, if nothing was done, return ``None``.
"""
# Try to use NPE2 first
written_paths = _npe2.write_layers(path, [layer], plugin_name, _writer)
if written_paths:
return written_paths[0]
logger.debug("Falling back to original plugin engine.")
hook_caller = getattr(
plugin_manager.hook, f'napari_write_{layer._type_string}'
)
if not plugin_name and isinstance(path, (str, pathlib.Path)):
extension = os.path.splitext(path)[-1]
plugin_name = plugin_manager.get_writer_for_extension(extension)
logger.debug(f"Writing to {path}. Hook caller: {hook_caller}")
if plugin_name and (plugin_name not in plugin_manager.plugins):
names = {i.plugin_name for i in hook_caller.get_hookimpls()}
raise ValueError(
trans._(
"There is no registered plugin named '{plugin_name}'.\nPlugins capable of writing layer._type_string layers are: {names}",
deferred=True,
plugin_name=plugin_name,
names=names,
)
)
# Call the hook_caller
return hook_caller(
_plugin=plugin_name,
path=abspath_or_url(path),
data=layer.data,
meta=layer._get_state(),
)
| 37.17619
| 138
| 0.650442
|
794f1b43a667b97b377cd18a5d5468013bf620d9
| 228
|
py
|
Python
|
website/models.py
|
cangkevin/asain-show-rss-website-wrapper
|
ca7953e2b764705d1a78c691e1920b2f632e6bc9
|
[
"MIT"
] | null | null | null |
website/models.py
|
cangkevin/asain-show-rss-website-wrapper
|
ca7953e2b764705d1a78c691e1920b2f632e6bc9
|
[
"MIT"
] | 46
|
2018-12-16T02:38:34.000Z
|
2021-06-01T22:44:45.000Z
|
website/models.py
|
cangkevin/asain-show-rss-website-wrapper
|
ca7953e2b764705d1a78c691e1920b2f632e6bc9
|
[
"MIT"
] | 1
|
2019-01-06T04:13:51.000Z
|
2019-01-06T04:13:51.000Z
|
"""Models used throughout the application.
This module holds various model classes that are
intended for internal use.
"""
from collections import namedtuple
RssResponse = namedtuple("RssResponse", "title items paginations")
| 22.8
| 66
| 0.79386
|
794f1cc7cc35108f8e9898b87780b14987e66811
| 4,296
|
py
|
Python
|
releasetool/circleci.py
|
michaelbausor/releasetool
|
5bac8a5ab8f9d993c0b52312ff04e847c322d525
|
[
"Apache-2.0"
] | 23
|
2018-10-09T15:14:21.000Z
|
2022-01-24T12:18:57.000Z
|
releasetool/circleci.py
|
quartzmo/releasetool
|
fca57b9e6caefe013444fa576bc3bba514942f97
|
[
"Apache-2.0"
] | 160
|
2018-09-21T22:16:02.000Z
|
2022-03-30T21:51:35.000Z
|
releasetool/circleci.py
|
quartzmo/releasetool
|
fca57b9e6caefe013444fa576bc3bba514942f97
|
[
"Apache-2.0"
] | 19
|
2018-10-08T20:39:39.000Z
|
2021-07-28T15:17:14.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterator, Optional
import time
from datetime import datetime
import requests
_CIRCLE_ROOT: str = "https://circleci.com/api/v1.1"
class CircleCI:
def __init__(self, repository: str, vcs: str = "github") -> None:
self.session: requests.Session = requests.Session()
self.vcs = vcs
self.repo = repository
def get_latest_build_by_tag(self, tag: str, retries: int = 15) -> Optional[dict]:
url = f"{_CIRCLE_ROOT}/project/{self.vcs}/{self.repo}"
for retry in range(1, retries):
response = self.session.get(url)
response.raise_for_status()
for build in response.json():
if "branch" in build.keys() and build["vcs_tag"] == tag:
return build
time.sleep(retry)
return None
def get_latest_build_by_branch(self, branch_name: str) -> Optional[dict]:
url = f"{_CIRCLE_ROOT}/project/{self.vcs}/{self.repo}"
response = self.session.get(url)
response.raise_for_status()
for build in response.json():
if "branch" in build.keys() and build["branch"] == branch_name:
return build
return None
def get_fresh_build_by_branch(
self, branch_name: str, seconds_fresh: int = 60, retries: int = 15
) -> Optional[dict]:
"""
Find a build that is less than seconds_fresh old. Useful if you
need to find a build that isn't an old run
"""
for retry in range(1, retries):
build = self.get_latest_build_by_branch(branch_name)
if not build:
continue
build_queued = build["queued_at"]
queued_time = datetime.strptime(build_queued, "%Y-%m-%dT%H:%M:%S.%fZ")
time_delta = datetime.utcnow() - queued_time
if time_delta.total_seconds() <= seconds_fresh:
return build
# we either didn't find a build (hasn't been queued) or we
# found a build but it was stale. Wait for new build to be queued.
time.sleep(retry)
return None
def get_build(self, build_num: str) -> dict:
url = f"{_CIRCLE_ROOT}/project/{self.vcs}/{self.repo}/{build_num}"
response = self.session.get(url)
response.raise_for_status()
return response.json()
def get_link_to_build(self, build_num: int):
# API vcs and FE vcs are different
vcs_map = {"github": "gh"}
if self.vcs in vcs_map.keys():
url_vcs = vcs_map[self.vcs]
else:
# if we don't have it in the mapping provide it directly.
url_vcs = self.vcs
# https://circleci.com/gh/GitHubUser/RepositoryName/1234
return f"https://circleci.com/{url_vcs}/{self.repo}/{build_num}"
def get_build_status_generator(self, build_num: str) -> Iterator[str]:
"""
Returns a generator that polls circle for the status of a branch. It
continues to return results until it enters a finished state
"""
"""
lifecycle_states = [
"queued", "scheduled", "not_run", "not_running", "running",
"finished" ]
build_status_states = [
"retried", "canceled", "infrastructure_fail", "timedout",
"not_run", "running", "failed", "queued", "scheduled",
"not_running", "no_tests", "fixed", "success" ]
"""
build = self.get_build(build_num)
while "lifecycle" in build.keys() and build["lifecycle"] != "finished":
yield build["status"]
time.sleep(10)
build = self.get_build(build_num)
yield build["status"]
return
| 37.034483
| 85
| 0.61406
|
794f1ce20e19ec8121eaa524481d526904e9285f
| 2,776
|
py
|
Python
|
sonic_installer/bootloader/grub.py
|
monipko/sonic-utilities
|
ad801bfb81633812b4aa25f45bdd555a27121845
|
[
"Apache-2.0"
] | 1
|
2021-02-03T06:28:38.000Z
|
2021-02-03T06:28:38.000Z
|
sonic_installer/bootloader/grub.py
|
monipko/sonic-utilities
|
ad801bfb81633812b4aa25f45bdd555a27121845
|
[
"Apache-2.0"
] | 5
|
2020-02-27T09:19:52.000Z
|
2021-05-24T16:04:51.000Z
|
sonic_installer/bootloader/grub.py
|
monipko/sonic-utilities
|
ad801bfb81633812b4aa25f45bdd555a27121845
|
[
"Apache-2.0"
] | null | null | null |
"""
Bootloader implementation for grub based platforms
"""
import os
import re
import subprocess
import click
from ..common import (
HOST_PATH,
IMAGE_DIR_PREFIX,
IMAGE_PREFIX,
run_command,
)
from .onie import OnieInstallerBootloader
class GrubBootloader(OnieInstallerBootloader):
NAME = 'grub'
def get_installed_images(self):
images = []
config = open(HOST_PATH + '/grub/grub.cfg', 'r')
for line in config:
if line.startswith('menuentry'):
image = line.split()[1].strip("'")
if IMAGE_PREFIX in image:
images.append(image)
config.close()
return images
def get_next_image(self):
images = self.get_installed_images()
grubenv = subprocess.check_output(["/usr/bin/grub-editenv", HOST_PATH + "/grub/grubenv", "list"], text=True)
m = re.search(r"next_entry=(\d+)", grubenv)
if m:
next_image_index = int(m.group(1))
else:
m = re.search(r"saved_entry=(\d+)", grubenv)
if m:
next_image_index = int(m.group(1))
else:
next_image_index = 0
return images[next_image_index]
def set_default_image(self, image):
images = self.get_installed_images()
command = 'grub-set-default --boot-directory=' + HOST_PATH + ' ' + str(images.index(image))
run_command(command)
return True
def set_next_image(self, image):
images = self.get_installed_images()
command = 'grub-reboot --boot-directory=' + HOST_PATH + ' ' + str(images.index(image))
run_command(command)
return True
def install_image(self, image_path):
run_command("bash " + image_path)
run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0')
def remove_image(self, image):
click.echo('Updating GRUB...')
config = open(HOST_PATH + '/grub/grub.cfg', 'r')
old_config = config.read()
menuentry = re.search("menuentry '" + image + "[^}]*}", old_config).group()
config.close()
config = open(HOST_PATH + '/grub/grub.cfg', 'w')
# remove menuentry of the image in grub.cfg
config.write(old_config.replace(menuentry, ""))
config.close()
click.echo('Done')
image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX)
click.echo('Removing image root filesystem...')
subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir])
click.echo('Done')
run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0')
click.echo('Image removed')
@classmethod
def detect(cls):
return os.path.isfile(os.path.join(HOST_PATH, 'grub/grub.cfg'))
| 31.908046
| 116
| 0.600865
|
794f1d69f1013ddcb5af85d315fab6bfa7272c9b
| 1,774
|
py
|
Python
|
ems/context_processors.py
|
fetux/django-ems
|
ac581ea3573c5fc9976303320f278bc2fa7cea6f
|
[
"MIT"
] | null | null | null |
ems/context_processors.py
|
fetux/django-ems
|
ac581ea3573c5fc9976303320f278bc2fa7cea6f
|
[
"MIT"
] | null | null | null |
ems/context_processors.py
|
fetux/django-ems
|
ac581ea3573c5fc9976303320f278bc2fa7cea6f
|
[
"MIT"
] | null | null | null |
from django.db.models import Q
from django.conf import settings
from ems import utils
from ems.crm.forms import QuickSearchForm
from ems.crm.models import Project
from ems.entries.models import Entry
def quick_search(request):
return {
'quick_search_form': QuickSearchForm(),
}
def quick_clock_in(request):
user = request.user
work_projects = []
leave_projects = []
if user.is_authenticated() and user.is_active:
# Display all active paid leave projects that the user is assigned to.
leave_ids = utils.get_setting('EMS_PAID_LEAVE_PROJECTS').values()
#TODO: Fix Project User relationship
lq = Q(user=user) & Q(id__in=leave_ids)
lq = Q(id__in=leave_ids)
leave_projects = Project.trackable.filter(lq).order_by('name')
# Get all projects this user has clocked in to.
entries = Entry.objects.filter(user=user)
project_ids = list(entries.values_list('project', flat=True))
# Narrow to projects which can still be clocked in to.
pq = Q(id__in=project_ids)
valid_projects = Project.trackable.filter(pq).exclude(id__in=leave_ids)
valid_ids = list(valid_projects.values_list('id', flat=True))
# Display the 10 projects this user most recently clocked into.
work_ids = []
for i in project_ids:
if len(work_ids) > 10:
break
if i in valid_ids and i not in work_ids:
work_ids.append(i)
work_projects = [valid_projects.get(pk=i) for i in work_ids]
return {
'leave_projects': leave_projects,
'work_projects': work_projects,
}
def extra_settings(request):
return {
'COMPRESS_ENABLED': settings.COMPRESS_ENABLED,
}
| 30.586207
| 79
| 0.660654
|
794f1dd058bebe483cfbac06c96103a22cf1d4ae
| 11,459
|
py
|
Python
|
src/outlier_hub/datasets/odr/preprocessor.py
|
ichbk/outlierhub
|
5cb1ac80a9ea73b5144644d82ba695f3574e6815
|
[
"Apache-2.0"
] | null | null | null |
src/outlier_hub/datasets/odr/preprocessor.py
|
ichbk/outlierhub
|
5cb1ac80a9ea73b5144644d82ba695f3574e6815
|
[
"Apache-2.0"
] | null | null | null |
src/outlier_hub/datasets/odr/preprocessor.py
|
ichbk/outlierhub
|
5cb1ac80a9ea73b5144644d82ba695f3574e6815
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import tempfile
import h5py
import glob
import csv
import numpy as np
import pandas as pd
import io
from natsort import natsorted, ns
from PIL import Image, ImageFile
from pathlib import Path
from typing import Tuple, List, Any
from data_stack.io.resources import StreamedResource, ResourceFactory
from data_stack.io.storage_connectors import StorageConnector
from data_stack.util.logger import logger
def _load_sample_paths(samples_identifier: str) -> List[str]:
"""
function to load folder content into arrays and then it returns that same array
@param samples_identifier: path to samples, here i.e. images
@return: sorted list of paths of raw samples
"""
# Put file paths into lists and return them:
raw_samples_paths = []
for file in sorted(glob.glob(samples_identifier + '/*.jpg')):
raw_samples_paths.append(file)
logger.debug(f'Length Check of raw sample paths, should be 7000 and result is: \n {len(raw_samples_paths)}')
logger.debug(f'raw_samples_paths on point 10: \n {raw_samples_paths[10]}')
return raw_samples_paths
def _load_metadata(targets_identifier: str) -> List[np.ndarray]:
"""
loads xlsx file, and creates a list - each item is a list with 15 items
@param: targets_identifier: path to data.xlsx file
@return: list of lists containing 15 items
"""
# Use pandas to read and manipulate metadata:
data_xls = pd.read_excel(targets_identifier, 'Sheet1', index_col=None, engine='openpyxl')
# merge diagnostics
data_xls["diagnostics"] = data_xls["Left-Diagnostic Keywords"] + ', ' + data_xls["Right-Diagnostic Keywords"]
data_xls.drop("Left-Diagnostic Keywords", inplace=True, axis=1)
data_xls.drop("Right-Diagnostic Keywords", inplace=True, axis=1)
# rearrange columns
cols = data_xls.columns.tolist()
columns = ['ID', 'Patient Age', 'Patient Sex', 'Left-Fundus', 'Right-Fundus', 'diagnostics',
'N', 'D', 'G', 'C', 'A', 'H', 'M', 'O']
data_xls = data_xls[columns]
# get a list of metadata
data_values = data_xls.values.tolist()
return data_values
def _get_most_common_res(samples: List[str]) -> Tuple[int, int]:
samples_amount = len(samples)
histo = list(range(samples_amount))
for entry in range(samples_amount):
with Image.open(samples[entry]) as img:
width, height = img.size
histo[entry] = (width, height)
most_common = max(histo, key=histo.count)
return most_common
def _get_clean_split_samples(resolution, split_samples) -> List[str]:
cleaned_split_samples = []
for entry in split_samples:
with Image.open(entry) as img:
if img.size == resolution:
cleaned_split_samples.append(entry)
return cleaned_split_samples
def _create_temp_list(split_targets, split_samples) -> List[Any]:
# comprehension list to initiate temp_list with meta data
temp_list = [[entry, [None, None]] for entry in split_targets]
for item in temp_list:
for sample_item in split_samples:
file = Path(sample_item).name
if file == item[0][3]:
item[1][0] = sample_item
elif file == item[0][4]:
item[1][1] = sample_item
return temp_list
def _clean_temp_list(temp_list):
clean_temp_list = []
for item in temp_list:
if not (item[1] == [None, None]):
clean_temp_list.append(item)
return clean_temp_list
def _preprocess_split(h5py_file: h5py.File,
split_name: str,
samples_identifier: str,
targets_identifier: str):
logger.debug(f"calling load_sample_paths(samples_identifier)")
split_samples = _load_sample_paths(samples_identifier)
logger.debug(f"calling load_metadata(targets_identifier)")
split_targets = _load_metadata(targets_identifier)
# sample information are paths to images
# samples are at the end images here, which can be interpreted as numpy 3D-arrays:
# so a colored image has height*width pixels, each pixel contains three values representing RGB-Color
# Each color of RGB is a so called channel [height, width, RGB-Value]
# necessary: find only samples, where the resolution is the same and the target is provided
# 1) find resolution and filter samples with estimated resolution
# 2) create new temp List for indexing targets to associated samples [meta,(sample1,sample2)]
# 3) clean temp List from empty samples
# 4) divide temp list into target and samples
# 5) create csv files for manual verification of data
# 6) prepare hdf datasets
# 7) enrich datasets with data
# 1) find resolution
# samples resolution are not equal -> get the most common resolution
logger.debug(f"calling _get_most_common_res(split_samples)")
resolution = _get_most_common_res(split_samples)
logger.debug(f"resolution {resolution}")
# filter split_samples, so only wanted resolution is provided
logger.debug(f"_get_clean_split_samples(resolution, split_samples) starts")
cleaned_split_samples = _get_clean_split_samples(resolution, split_samples)
logger.debug(f"len(cleaned_split_samples):{len(cleaned_split_samples)}")
# 2) create temp list : [meta,(sample1,sample2)]
# 1. item contains meta info, 2. info is a tuple inheriting associated sample paths
logger.debug(f"length & type of split_targets: {len(split_targets)}, {type(split_targets)} ")
logger.debug(f"function calling: _create_temp_list(split_targets)")
temp_list = _create_temp_list(split_targets, cleaned_split_samples)
# 3) clean temp List from empty samples
print(f"clean temp_list from empty targets without samples")
print(f"length of temp_list: {len(temp_list)}")
logger.debug(f"calling _clean_temp_list")
clean_temp_list = _clean_temp_list(temp_list)
print(f"length of clean_temp_list: {len(clean_temp_list)}")
# 4) divide the list into target and sample list
targets_list = [entry[0] for entry in clean_temp_list]
samples_list = [entry[1] for entry in clean_temp_list]
logger.debug(f"Create Pandas dataframe of clean_temp_list list and save it as csv")
df = pd.DataFrame(temp_list)
df.to_csv('clean_temp_list.csv', index=False, header=False)
# 5) create csv file with pandas - this is for manual verification
logger.debug(f"Create Pandas dataframe of target and samples list and save it as csv")
df = pd.DataFrame(targets_list)
df.to_csv('targets_list.csv', index=False, header=False)
df = pd.DataFrame(samples_list)
df.to_csv('samples_list.csv', index=False, header=False)
# create h5py groups, one for target and one for samples, every entry will be a dataset then
logger.debug(f"Create h5py groups")
sample_group = h5py_file.create_group('samples')
target_group = h5py_file.create_group('targets')
# saving the images in samples group
counter = 0
for entry in samples_list:
eyepair_sample_group = sample_group.create_group(str(counter))
counter = counter + 1
img_path_1 = entry[0]
img_name_1 = Path(entry[0]).name
img_path_2 = entry[1]
img_name_2 = Path(entry[1]).name
# open image, behind the path
with open(img_path_1, 'rb') as img:
binary_data_1 = img.read()
with open(img_path_2, 'rb') as img:
binary_data_2 = img.read()
binary_data_np_1 = np.asarray(binary_data_1)
binary_data_np_2 = np.asarray(binary_data_2)
# save it in the subgroup. each eyepair_sample_group contains images from one patient.
h5py_file = eyepair_sample_group.create_dataset(img_name_1, data=binary_data_np_1)
h5py_file = eyepair_sample_group.create_dataset(img_name_2, data=binary_data_np_2)
# saving the targets in targets group
# h5py cannot save np.ndarrays with strings by default, costum dtype must be created
utf8_type = h5py.string_dtype('utf-8')
# metadata_info_amount = 14
counter = 0
for entry in targets_list:
entry = [str(item) for item in entry]
h5py_file = target_group.create_dataset(str(counter),
data=entry,
dtype=utf8_type)
counter = counter + 1
'''
# paths for h5py
sample_location = os.path.join(split_name, "samples")
target_location = os.path.join(split_name, "targets")
# 6) prepare hdf datasets
# with open(samples_list[0][0], 'rb') as image:
# type_reference = image.read()
# print(f'type(image.read(): {type(image.read())}')
# data_shape = np.asarray(image.read())
# sample_dset = h5py_file.create_dataset(sample_location,
# shape=(len(clean_temp_list), [data_shape, data_shape]),
# dtype=np.void(type_reference))
# 7) enrich datasets with data
sample_dset = h5py_file.create_dataset(sample_location,
data=samples_list)
for cnt, sample in enumerate(samples_list):
tmp_sample_list = []
with open(sample[0], 'rb') as image_sample:
sample_left_bytes = image_sample.read()
tmp_sample_list.append(sample_left_bytes)
with open(sample[1], 'rb') as image_sample:
sample_right_bytes = image_sample.read()
tmp_sample_list.append(sample_right_bytes)
sample_pair_np = np.asarray(tmp_sample_list)
sample_dset[cnt, 1] = sample_pair_np
#logger.debug(f" testimage")
#sample_np = sample_dset[cnt, 1]
#sample_bytes = sample_np.tobytes()
#sample_bytes = io.BytesIO(sample_bytes)
#sample = Image.open(sample_bytes)
#sample.show()
for cnt, target in enumerate(targets_list):
target_dset[cnt] = np.array(target)
'''
class ODRPreprocessor:
def __init__(self, storage_connector: StorageConnector):
self.storage_connector = storage_connector
self.split_names = ["raw"]
def preprocess(self,
dataset_identifier: str,
samples_identifier: str,
targets_identifier: str) -> StreamedResource:
logger.debug(f"preprocess(dataset/samples/targets - identifier) starts"
f"{dataset_identifier, samples_identifier, targets_identifier}")
with tempfile.TemporaryFile() as temp_file:
with h5py.File(temp_file, 'w') as h5py_file:
for split_name in self.split_names:
_preprocess_split(h5py_file,
split_name,
samples_identifier,
targets_identifier)
h5py_file.flush()
temp_file.flush()
logger.debug(f"ResourceFactory.get_resource(dataset_identifier, temp_file) starts"
f"{dataset_identifier, samples_identifier, targets_identifier}")
streamed_resource = ResourceFactory.get_resource(dataset_identifier, temp_file)
self.storage_connector.set_resource(dataset_identifier, streamed_resource)
streamed_resource = self.storage_connector.get_resource(dataset_identifier)
return streamed_resource
| 38.844068
| 113
| 0.669517
|
794f1ea232cc9a9f0a9c4465c521331ebbf5e3a7
| 391
|
py
|
Python
|
config/components/database.py
|
HanSaloZu/drf-blog-api
|
966776e59ed7699a9e94aeb85fdd6785c2532a3a
|
[
"MIT"
] | null | null | null |
config/components/database.py
|
HanSaloZu/drf-blog-api
|
966776e59ed7699a9e94aeb85fdd6785c2532a3a
|
[
"MIT"
] | null | null | null |
config/components/database.py
|
HanSaloZu/drf-blog-api
|
966776e59ed7699a9e94aeb85fdd6785c2532a3a
|
[
"MIT"
] | null | null | null |
DATABASES = {
"default": {
"ENGINE": environ.get("DB_ENGINE", "django.db.backends.sqlite3"),
"NAME": environ.get("DB_DATABASE", BASE_DIR / "db.sqlite3"),
"USER": environ.get("DB_USER", "user"),
"PASSWORD": environ.get("DB_PASSWORD", "password"),
"HOST": environ.get("DB_HOST", "localhost"),
"PORT": environ.get("DB_PORT", "5432"),
}
}
| 35.545455
| 73
| 0.570332
|
794f1f4aea4cdc28e87ddcae810625d3908c2f6a
| 3,213
|
py
|
Python
|
venv/lib/python2.7/site-packages/ansible/utils/jsonrpc.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
venv/lib/python2.7/site-packages/ansible/utils/jsonrpc.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
venv/lib/python2.7/site-packages/ansible/utils/jsonrpc.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
# (c) 2017, Peter Sprygada <psprygad@redhat.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import traceback
from ansible.module_utils._text import to_text
from ansible.module_utils.six import binary_type
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class JsonRpcServer(object):
_objects = set()
def handle_request(self, request):
request = json.loads(to_text(request, errors='surrogate_then_replace'))
method = request.get('method')
if method.startswith('rpc.') or method.startswith('_'):
error = self.invalid_request()
return json.dumps(error)
params = request.get('params')
setattr(self, '_identifier', request.get('id'))
args = []
kwargs = {}
if all((params, isinstance(params, list))):
args = params
elif all((params, isinstance(params, dict))):
kwargs = params
rpc_method = None
for obj in self._objects:
rpc_method = getattr(obj, method, None)
if rpc_method:
break
if not rpc_method:
error = self.method_not_found()
response = json.dumps(error)
else:
try:
result = rpc_method(*args, **kwargs)
except Exception as exc:
display.vvv(traceback.format_exc())
error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace'))
response = json.dumps(error)
else:
if isinstance(result, dict) and 'jsonrpc' in result:
response = result
else:
response = self.response(result)
response = json.dumps(response)
delattr(self, '_identifier')
return response
def register(self, obj):
self._objects.add(obj)
def header(self):
return {'jsonrpc': '2.0', 'id': self._identifier}
def response(self, result=None):
response = self.header()
if isinstance(result, binary_type):
result = to_text(result)
response['result'] = result
return response
def error(self, code, message, data=None):
response = self.header()
error = {'code': code, 'message': message}
if data:
error['data'] = data
response['error'] = error
return response
# json-rpc standard errors (-32768 .. -32000)
def parse_error(self, data=None):
return self.error(-32700, 'Parse error', data)
def method_not_found(self, data=None):
return self.error(-32601, 'Method not found', data)
def invalid_request(self, data=None):
return self.error(-32600, 'Invalid request', data)
def invalid_params(self, data=None):
return self.error(-32602, 'Invalid params', data)
def internal_error(self, data=None):
return self.error(-32603, 'Internal error', data)
| 29.477064
| 95
| 0.601618
|
794f1f86e43b548f1d0cc91e168dc27a48517eec
| 433
|
py
|
Python
|
rest/conference/list-get-example-1/list-get-example-1.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 3
|
2020-05-05T10:01:02.000Z
|
2021-02-06T14:23:13.000Z
|
rest/conference/list-get-example-1/list-get-example-1.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
rest/conference/list-get-example-1/list-get-example-1.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 1
|
2019-10-02T14:36:36.000Z
|
2019-10-02T14:36:36.000Z
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
# A list of conference objects with the properties described above
for conference in client.conferences.list():
print(conference.status)
| 36.083333
| 72
| 0.810624
|
794f20a73b3f7eda01019bf7d1ee57e6394b37e1
| 1,770
|
py
|
Python
|
tests/test_paths.py
|
cuihantao/Andes
|
6cdc057986c4a8382194ef440b6e92b8dfb77e25
|
[
"Apache-2.0"
] | 16
|
2017-06-16T14:21:04.000Z
|
2018-08-18T08:52:27.000Z
|
tests/test_paths.py
|
cuihantao/Andes
|
6cdc057986c4a8382194ef440b6e92b8dfb77e25
|
[
"Apache-2.0"
] | 1
|
2017-12-12T07:51:16.000Z
|
2017-12-12T07:51:16.000Z
|
tests/test_paths.py
|
cuihantao/Andes
|
6cdc057986c4a8382194ef440b6e92b8dfb77e25
|
[
"Apache-2.0"
] | 7
|
2017-12-10T07:32:36.000Z
|
2018-09-19T16:38:30.000Z
|
import os
import unittest
import andes
from andes.utils.paths import list_cases
class TestPaths(unittest.TestCase):
def setUp(self) -> None:
self.kundur = 'kundur/'
self.matpower = 'matpower/'
self.ieee14 = andes.get_case("ieee14/ieee14.raw")
def test_tree(self):
list_cases(self.kundur, no_print=True)
list_cases(self.matpower, no_print=True)
def test_addfile_path(self):
path, case = os.path.split(self.ieee14)
ss = andes.load('ieee14.raw', addfile='ieee14.dyr',
input_path=path, default_config=True,
)
self.assertNotEqual(ss, None)
ss = andes.run('ieee14.raw', addfile='ieee14.dyr',
input_path=path,
no_output=True, default_config=True,
)
self.assertNotEqual(ss, None)
def test_relative_path(self):
ss = andes.run('ieee14.raw',
input_path=andes.get_case('ieee14/', check=False),
no_output=True, default_config=True,
)
self.assertNotEqual(ss, None)
def test_pert_file(self):
"""Test path of pert file"""
path, case = os.path.split(self.ieee14)
# --- with pert file ---
ss = andes.run('ieee14.raw', pert='pert.py',
input_path=path, no_output=True, default_config=True,
)
ss.TDS.init()
self.assertIsNotNone(ss.TDS.callpert)
# --- without pert file ---
ss = andes.run('ieee14.raw',
input_path=path, no_output=True, default_config=True,
)
ss.TDS.init()
self.assertIsNone(ss.TDS.callpert)
| 32.181818
| 76
| 0.547458
|
794f20c9eb7d3ac8bbaccfb73e8096a4b349a8b6
| 1,972
|
py
|
Python
|
src/App/avt_main.py
|
schmouk/ArcheryVideoTraining
|
8c7f5fadc485e0b3a0851d0227a26bd799d3eb69
|
[
"MIT"
] | null | null | null |
src/App/avt_main.py
|
schmouk/ArcheryVideoTraining
|
8c7f5fadc485e0b3a0851d0227a26bd799d3eb69
|
[
"MIT"
] | 65
|
2021-01-25T22:27:55.000Z
|
2021-03-05T10:19:49.000Z
|
src/App/avt_main.py
|
schmouk/ArcheryVideoTraining
|
8c7f5fadc485e0b3a0851d0227a26bd799d3eb69
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2021 Philippe Schmouker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
import cv2
from src.Display.main_window import MainWindow
#=============================================================================
def avt_main():
"""This is the main function of the Archery Video Training application.
"""
#-- creates the main window
main_window = MainWindow()
#-- shows the main window
main_window.draw()
#-- starts the cameras acquisition
main_window.run_views()
#-- interactions w. mouse and keyboard
while True:
if cv2.waitKey( 20 ) == 27:
break
#-- stops cameras acquisition
main_window.stop_views()
#-- releases all allocated resources
cv2.destroyAllWindows()
print( "\n-- done!" )
#===== end of src.App.avt_main =====#
| 34.596491
| 78
| 0.647566
|
794f218820dd397b924dfdb0b326990ab382ee14
| 50,039
|
py
|
Python
|
panel/pane/vtk/synchronizable_serializer.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | 1
|
2020-02-01T07:14:16.000Z
|
2020-02-01T07:14:16.000Z
|
panel/pane/vtk/synchronizable_serializer.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | null | null | null |
panel/pane/vtk/synchronizable_serializer.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | null | null | null |
import base64
import hashlib
import io
import struct
import sys
import time
import zipfile
from vtk.vtkCommonCore import vtkTypeUInt32Array, vtkTypeInt32Array
from vtk.vtkFiltersGeometry import vtkCompositeDataGeometryFilter, vtkGeometryFilter
from vtk.vtkRenderingCore import vtkColorTransferFunction
from vtk.vtkCommonDataModel import vtkDataObject
from .enums import TextPosition
# -----------------------------------------------------------------------------
# Python compatibility handling 2.6, 2.7, 3+
# -----------------------------------------------------------------------------
py3 = sys.version_info >= (3, 0)
if py3:
def iteritems(d, **kwargs):
return iter(d.items(**kwargs))
else:
def iteritems(d, **kwargs):
return d.iteritems(**kwargs)
if sys.version_info >= (2, 7):
buffer = memoryview
base64Encode = lambda x: base64.b64encode(x).decode('utf-8')
else:
buffer = buffer
base64Encode = lambda x: x.encode('base64')
# -----------------------------------------------------------------------------
# Array helpers
# -----------------------------------------------------------------------------
arrayTypesMapping = [
' ', # VTK_VOID 0
' ', # VTK_BIT 1
'b', # VTK_CHAR 2
'B', # VTK_UNSIGNED_CHAR 3
'h', # VTK_SHORT 4
'H', # VTK_UNSIGNED_SHORT 5
'i', # VTK_INT 6
'I', # VTK_UNSIGNED_INT 7
'l', # VTK_LONG 8
'L', # VTK_UNSIGNED_LONG 9
'f', # VTK_FLOAT 10
'd', # VTK_DOUBLE 11
'L', # VTK_ID_TYPE 12
' ', # VTK_STRING 13
' ', # VTK_OPAQUE 14
' ', # UNDEFINED
'l', # VTK_LONG_LONG 16
'L', # VTK_UNSIGNED_LONG_LONG 17
]
javascriptMapping = {
'b': 'Int8Array',
'B': 'Uint8Array',
'h': 'Int16Array',
'H': 'UInt16Array',
'i': 'Int32Array',
'I': 'Uint32Array',
'l': 'Int32Array',
'L': 'Uint32Array',
'f': 'Float32Array',
'd': 'Float64Array'
}
def hashDataArray(dataArray):
return hashlib.md5(buffer(dataArray)).hexdigest()
def getJSArrayType(dataArray):
return javascriptMapping[arrayTypesMapping[dataArray.GetDataType()]]
def zipCompression(name, data):
with io.BytesIO() as in_memory:
with zipfile.ZipFile(in_memory, mode="w") as zf:
zf.writestr('data/%s' % name,
data, zipfile.ZIP_DEFLATED)
in_memory.seek(0)
return in_memory.read()
def dataTableToList(dataTable):
dataType = arrayTypesMapping[dataTable.GetDataType()]
elementSize = struct.calcsize(dataType)
nbValues = dataTable.GetNumberOfValues()
nbComponents = dataTable.GetNumberOfComponents()
nbytes = elementSize * nbValues
if dataType != ' ':
with io.BytesIO(buffer(dataTable)) as stream:
data = list(struct.unpack(dataType*nbValues ,stream.read(nbytes)))
return [data[idx*nbComponents:(idx+1)*nbComponents]
for idx in range(nbValues//nbComponents)]
def getScalars(mapper, dataset):
scalars = None
cell_flag = 0
scalar_mode = mapper.GetScalarMode()
array_access_mode = mapper.GetArrayAccessMode()
array_id = mapper.GetArrayId()
array_name = mapper.GetArrayName()
pd = dataset.GetPointData()
cd = dataset.GetCellData()
fd = dataset.GetFieldData()
if scalar_mode == 0: # VTK_SCALAR_MODE_DEFAULT
scalars = pd.GetScalars()
cell_flag = 0
if scalars is None:
scalars = cd.GetScalars()
cell_flag = 1
elif scalar_mode == 1: # VTK_SCALAR_MODE_USE_POINT_DATA
scalars = pd.GetScalars()
cell_flag = 0
elif scalar_mode == 2: # VTK_SCALAR_MODE_USE_CELL_DATA
scalars = cd.GetScalars()
cell_flag = 1
elif scalar_mode == 3: # VTK_SCALAR_MODE_USE_POINT_FIELD_DATA
if array_access_mode == 0: # VTK_GET_ARRAY_BY_ID
scalars = pd.GetAbstractArray(array_id)
else: # VTK_GET_ARRAY_BY_NAME
scalars = pd.GetAbstractArray(array_name)
cell_flag = 0
elif scalar_mode == 4: # VTK_SCALAR_MODE_USE_CELL_FIELD_DATA
if array_access_mode == 0: # VTK_GET_ARRAY_BY_ID
scalars = cd.GetAbstractArray(array_id)
else: # VTK_GET_ARRAY_BY_NAME
scalars = cd.GetAbstractArray(array_name)
cell_flag = 1
else: # VTK_SCALAR_MODE_USE_FIELD_DATA
if array_access_mode == 0: # VTK_GET_ARRAY_BY_ID
scalars = fd.GetAbstractArray(array_id)
else: # VTK_GET_ARRAY_BY_NAME
scalars = fd.GetAbstractArray(array_name)
cell_flag = 2
return scalars, cell_flag
def retrieveArrayName(mapper_instance, scalar_mode):
colorArrayName = None
try:
ds = [deps for deps in mapper_instance['dependencies'] if deps['id'].endswith('dataset')][0]
location = "pointData" if scalar_mode in (1, 3) else "cellData"
for arrayMeta in ds['properties']['fields']:
if arrayMeta["location"] == location and arrayMeta.get("registration", None) == "setScalars":
colorArrayName = arrayMeta["name"]
except Exception:
pass
return colorArrayName
def linspace(start, stop, num):
delta = (stop - start)/(num-1)
return [start + i*delta for i in range(num)]
# -----------------------------------------------------------------------------
# Convenience class for caching data arrays, storing computed sha sums, keeping
# track of valid actors, etc...
# -----------------------------------------------------------------------------
class SynchronizationContext():
def __init__(self, id_root=None, serialize_all_data_arrays=False, debug=False):
self.serializeAllDataArrays = serialize_all_data_arrays
self.dataArrayCache = {}
self.lastDependenciesMapping = {}
self.ingoreLastDependencies = False
self.idRoot = id_root
self.debugSerializers = debug
self.debugAll = debug
self.annotations = {}
def getReferenceId(self, instance):
if not self.idRoot or (hasattr(instance, 'IsA') and instance.IsA('vtkCamera')):
return getReferenceId(instance)
else:
return self.idRoot + getReferenceId(instance)
def addAnnotation(self, parent, prop, propId):
if prop.GetClassName() == "vtkCornerAnnotation":
annotation = {
"id": propId,
"viewport": parent.GetViewport(),
"fontSize": prop.GetLinearFontScaleFactor() * 2,
"fontFamily": prop.GetTextProperty().GetFontFamilyAsString(),
"color": prop.GetTextProperty().GetColor(),
**{pos.name: prop.GetText(pos.value) for pos in TextPosition}
}
if self.annotations is None:
self.annotations = {propId: annotation}
else:
self.annotations.update({propId: annotation})
def getAnnotations(self):
return list(self.annotations.values())
def setIgnoreLastDependencies(self, force):
self.ingoreLastDependencies = force
def cacheDataArray(self, pMd5, data):
self.dataArrayCache[pMd5] = data
def getCachedDataArray(self, pMd5, binary=False, compression=False):
cacheObj = self.dataArrayCache[pMd5]
array = cacheObj['array']
cacheTime = cacheObj['mTime']
if cacheTime != array.GetMTime():
if context.debugAll:
print(' ***** ERROR: you asked for an old cache key! ***** ')
if array.GetDataType() in (12, 16, 17):
arraySize = array.GetNumberOfTuples() * array.GetNumberOfComponents()
if array.GetDataType() in (12, 17):
# IdType and unsigned long long need to be converted to Uint32
newArray = vtkTypeUInt32Array()
else:
# long long need to be converted to Int32
newArray = vtkTypeInt32Array()
newArray.SetNumberOfTuples(arraySize)
for i in range(arraySize):
newArray.SetValue(i, -1 if array.GetValue(i)
< 0 else array.GetValue(i))
pBuffer = buffer(newArray)
else:
pBuffer = buffer(array)
if binary:
# Convert the vtkUnsignedCharArray into a bytes object, required by
# Autobahn websockets
return pBuffer.tobytes() if not compression else zipCompression(pMd5, pBuffer.tobytes())
return base64Encode(pBuffer if not compression else zipCompression(pMd5, pBuffer.tobytes()))
def checkForArraysToRelease(self, timeWindow=20):
cutOffTime = time.time() - timeWindow
shasToDelete = []
for sha in self.dataArrayCache:
record = self.dataArrayCache[sha]
array = record['array']
count = array.GetReferenceCount()
if count == 1 and record['ts'] < cutOffTime:
shasToDelete.append(sha)
for sha in shasToDelete:
del self.dataArrayCache[sha]
def getLastDependencyList(self, idstr):
lastDeps = []
if idstr in self.lastDependenciesMapping and not self.ingoreLastDependencies:
lastDeps = self.lastDependenciesMapping[idstr]
return lastDeps
def setNewDependencyList(self, idstr, depList):
self.lastDependenciesMapping[idstr] = depList
def buildDependencyCallList(self, idstr, newList, addMethod, removeMethod):
oldList = self.getLastDependencyList(idstr)
calls = []
calls += [[addMethod, [wrapId(x)]]
for x in newList if x not in oldList]
calls += [[removeMethod, [wrapId(x)]]
for x in oldList if x not in newList]
self.setNewDependencyList(idstr, newList)
return calls
# -----------------------------------------------------------------------------
# Global variables
# -----------------------------------------------------------------------------
SERIALIZERS = {}
context = None
# -----------------------------------------------------------------------------
# Global API
# -----------------------------------------------------------------------------
def registerInstanceSerializer(name, method):
global SERIALIZERS
SERIALIZERS[name] = method
# -----------------------------------------------------------------------------
def serializeInstance(parent, instance, instanceId, context, depth):
instanceType = instance.GetClassName()
serializer = SERIALIZERS[
instanceType] if instanceType in SERIALIZERS else None
if serializer:
return serializer(parent, instance, instanceId, context, depth)
if context.debugSerializers:
print('%s!!!No serializer for %s with id %s' %
(pad(depth), instanceType, instanceId))
# -----------------------------------------------------------------------------
def initializeSerializers():
# Annotations
registerInstanceSerializer('vtkCornerAnnotation', annotationSerializer)
# Actors/viewProps
registerInstanceSerializer('vtkImageSlice', genericProp3DSerializer)
registerInstanceSerializer('vtkVolume', genericProp3DSerializer)
registerInstanceSerializer('vtkOpenGLActor', genericActorSerializer)
registerInstanceSerializer('vtkFollower', genericActorSerializer)
registerInstanceSerializer('vtkPVLODActor', genericActorSerializer)
# Mappers
registerInstanceSerializer(
'vtkOpenGLPolyDataMapper', genericPolyDataMapperSerializer)
registerInstanceSerializer(
'vtkCompositePolyDataMapper2', genericPolyDataMapperSerializer)
registerInstanceSerializer('vtkDataSetMapper', genericPolyDataMapperSerializer)
registerInstanceSerializer(
'vtkFixedPointVolumeRayCastMapper', genericVolumeMapperSerializer)
registerInstanceSerializer(
'vtkSmartVolumeMapper', genericVolumeMapperSerializer)
registerInstanceSerializer(
'vtkOpenGLImageSliceMapper', imageSliceMapperSerializer)
registerInstanceSerializer(
'vtkOpenGLGlyph3DMapper', glyph3DMapperSerializer)
# LookupTables/TransferFunctions
registerInstanceSerializer('vtkLookupTable', lookupTableSerializer)
registerInstanceSerializer(
'vtkPVDiscretizableColorTransferFunction', colorTransferFunctionSerializer)
registerInstanceSerializer(
'vtkColorTransferFunction', colorTransferFunctionSerializer)
# opacityFunctions
registerInstanceSerializer(
'vtkPiecewiseFunction', piecewiseFunctionSerializer)
# Textures
registerInstanceSerializer('vtkOpenGLTexture', textureSerializer)
# Property
registerInstanceSerializer('vtkOpenGLProperty', propertySerializer)
registerInstanceSerializer('vtkVolumeProperty', volumePropertySerializer)
registerInstanceSerializer('vtkImageProperty', imagePropertySerializer)
# Datasets
registerInstanceSerializer('vtkPolyData', polydataSerializer)
registerInstanceSerializer('vtkImageData', imageDataSerializer)
registerInstanceSerializer(
'vtkStructuredGrid', mergeToPolydataSerializer)
registerInstanceSerializer(
'vtkUnstructuredGrid', mergeToPolydataSerializer)
registerInstanceSerializer(
'vtkMultiBlockDataSet', mergeToPolydataSerializer)
# RenderWindows
registerInstanceSerializer('vtkCocoaRenderWindow', renderWindowSerializer)
registerInstanceSerializer(
'vtkXOpenGLRenderWindow', renderWindowSerializer)
registerInstanceSerializer(
'vtkWin32OpenGLRenderWindow', renderWindowSerializer)
registerInstanceSerializer('vtkEGLRenderWindow', renderWindowSerializer)
registerInstanceSerializer('vtkOpenVRRenderWindow', renderWindowSerializer)
registerInstanceSerializer(
'vtkGenericOpenGLRenderWindow', renderWindowSerializer)
registerInstanceSerializer(
'vtkOSOpenGLRenderWindow', renderWindowSerializer)
registerInstanceSerializer('vtkOpenGLRenderWindow', renderWindowSerializer)
registerInstanceSerializer('vtkIOSRenderWindow', renderWindowSerializer)
registerInstanceSerializer(
'vtkExternalOpenGLRenderWindow', renderWindowSerializer)
# Renderers
registerInstanceSerializer('vtkOpenGLRenderer', rendererSerializer)
# Cameras
registerInstanceSerializer('vtkOpenGLCamera', cameraSerializer)
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
def pad(depth):
padding = ''
for _ in range(depth):
padding += ' '
return padding
# -----------------------------------------------------------------------------
def wrapId(idStr):
return 'instance:${%s}' % idStr
# -----------------------------------------------------------------------------
def getReferenceId(ref):
if ref:
try:
return ref.__this__[1:17]
except Exception:
idStr = str(ref)[-12:-1]
print('====> fallback ID %s for %s' % (idStr, ref))
return idStr
return '0x0'
# -----------------------------------------------------------------------------
dataArrayShaMapping = {}
def digest(array):
objId = getReferenceId(array)
record = None
if objId in dataArrayShaMapping:
record = dataArrayShaMapping[objId]
if record and record['mtime'] == array.GetMTime():
return record['sha']
record = {
'sha': hashDataArray(array),
'mtime': array.GetMTime()
}
dataArrayShaMapping[objId] = record
return record['sha']
# -----------------------------------------------------------------------------
def getRangeInfo(array, component):
r = array.GetRange(component)
compRange = {}
compRange['min'] = r[0]
compRange['max'] = r[1]
compRange['component'] = array.GetComponentName(component)
return compRange
# -----------------------------------------------------------------------------
def getArrayDescription(array, context):
if not array:
return None
pMd5 = digest(array)
context.cacheDataArray(pMd5, {
'array': array,
'mTime': array.GetMTime(),
'ts': time.time()
})
root = {}
root['hash'] = pMd5
root['vtkClass'] = 'vtkDataArray'
root['name'] = array.GetName()
root['dataType'] = getJSArrayType(array)
root['numberOfComponents'] = array.GetNumberOfComponents()
root['size'] = array.GetNumberOfComponents() * array.GetNumberOfTuples()
root['ranges'] = []
if root['numberOfComponents'] > 1:
for i in range(root['numberOfComponents']):
root['ranges'].append(getRangeInfo(array, i))
root['ranges'].append(getRangeInfo(array, -1))
else:
root['ranges'].append(getRangeInfo(array, 0))
return root
# -----------------------------------------------------------------------------
def extractAllDataArrays(extractedFields, dataset, context):
pointData = dataset.GetPointData()
for id_arr in range(pointData.GetNumberOfArrays()):
arrayMeta = getArrayDescription(pointData.GetArray(id_arr), context)
if arrayMeta:
arrayMeta['location'] = 'pointData'
extractedFields.append(arrayMeta)
cellData = dataset.GetCellData()
for id_arr in range(cellData.GetNumberOfArrays()):
arrayMeta = getArrayDescription(cellData.GetArray(id_arr), context)
if arrayMeta:
arrayMeta['location'] = 'cellData'
extractedFields.append(arrayMeta)
fieldData = dataset.GetCellData()
for id_arr in range(fieldData.GetNumberOfArrays()):
arrayMeta = getArrayDescription(fieldData.GetArray(id_arr), context)
if arrayMeta:
arrayMeta['location'] = 'fieldData'
extractedFields.append(arrayMeta)
# -----------------------------------------------------------------------------
def extractRequiredFields(extractedFields, parent, dataset, context, requestedFields=['Normals', 'TCoords']):
# FIXME should evolve and support funky mapper which leverage many arrays
if any(parent.IsA(cls) for cls in ['vtkMapper', 'vtkVolumeMapper', 'vtkImageSliceMapper', 'vtkTexture']):
if parent.IsA("vtkAbstractMapper"): # GetScalars method should exists
scalarVisibility = 1 if not hasattr(parent, "GetScalarVisibility") else parent.GetScalarVisibility()
scalars, cell_flag = getScalars(parent, dataset)
if context.serializeAllDataArrays:
extractAllDataArrays(extractedFields, dataset, context)
if scalars:
for arrayMeta in extractedFields:
if arrayMeta['name'] == scalars.GetName():
arrayMeta['registration'] = 'setScalars'
elif scalars and scalarVisibility and not context.serializeAllDataArrays:
arrayMeta = getArrayDescription(scalars, context)
if cell_flag == 0:
arrayMeta['location'] = 'pointData'
elif cell_flag == 1:
arrayMeta['location'] = 'cellData'
else:
raise NotImplementedError("Scalars on field data not handled")
arrayMeta['registration'] = 'setScalars'
extractedFields.append(arrayMeta)
elif dataset.GetPointData().GetScalars():
arrayMeta = getArrayDescription(dataset.GetPointData().GetScalars(), context)
arrayMeta['location'] = 'pointData'
arrayMeta['registration'] = 'setScalars'
extractedFields.append(arrayMeta)
if parent.IsA("vtkGlyph3DMapper") and not context.serializeAllDataArrays:
scaleArrayName = parent.GetInputArrayInformation(parent.SCALE).Get(vtkDataObject.FIELD_NAME())
if scaleArrayName is not None and scaleArrayName not in [field['name'] for field in extractedFields]:
arrayMeta = getArrayDescription(dataset.GetPointData().GetAbstractArray(scaleArrayName), context)
if arrayMeta is not None:
arrayMeta['location'] = 'pointData'
arrayMeta['registration'] = 'addArray'
extractedFields.append(arrayMeta)
scaleOrientationArrayName = parent.GetInputArrayInformation(parent.ORIENTATION).Get(vtkDataObject.FIELD_NAME())
if scaleOrientationArrayName is not None and scaleOrientationArrayName not in [field['name'] for field in extractedFields]:
arrayMeta = getArrayDescription(dataset.GetPointData().GetAbstractArray(scaleOrientationArrayName), context)
if arrayMeta is not None:
arrayMeta['location'] = 'pointData'
arrayMeta['registration'] = 'addArray'
extractedFields.append(arrayMeta)
# Normal handling
if 'Normals' in requestedFields:
normals = dataset.GetPointData().GetNormals()
if normals:
arrayMeta = getArrayDescription(normals, context)
if arrayMeta:
arrayMeta['location'] = 'pointData'
arrayMeta['registration'] = 'setNormals'
extractedFields.append(arrayMeta)
# TCoord handling
if 'TCoords' in requestedFields:
tcoords = dataset.GetPointData().GetTCoords()
if tcoords:
arrayMeta = getArrayDescription(tcoords, context)
if arrayMeta:
arrayMeta['location'] = 'pointData'
arrayMeta['registration'] = 'setTCoords'
extractedFields.append(arrayMeta)
# -----------------------------------------------------------------------------
# Concrete instance serializers
# -----------------------------------------------------------------------------
def annotationSerializer(parent, prop, propId, context, depth):
if context.debugSerializers:
print('%s!!!Annotations are not handled directly by vtk.js but by bokeh model' % pad(depth))
context.addAnnotation(parent, prop, propId)
return None
def genericPropSerializer(parent, prop, popId, context, depth):
# This kind of actor has two "children" of interest, a property and a
# mapper (optionnaly a texture)
mapperInstance = None
propertyInstance = None
calls = []
dependencies = []
mapper = None
if not hasattr(prop, 'GetMapper'):
if context.debugAll:
print('This volume does not have a GetMapper method')
else:
mapper = prop.GetMapper()
if mapper:
mapperId = context.getReferenceId(mapper)
mapperInstance = serializeInstance(
prop, mapper, mapperId, context, depth + 1)
if mapperInstance:
dependencies.append(mapperInstance)
calls.append(['setMapper', [wrapId(mapperId)]])
properties = None
if hasattr(prop, 'GetProperty'):
properties = prop.GetProperty()
else:
if context.debugAll:
print('This image does not have a GetProperty method')
if properties:
propId = context.getReferenceId(properties)
propertyInstance = serializeInstance(
prop, properties, propId, context, depth + 1)
if propertyInstance:
dependencies.append(propertyInstance)
calls.append(['setProperty', [wrapId(propId)]])
# Handle texture if any
texture = None
if hasattr(prop, 'GetTexture'):
texture = prop.GetTexture()
if texture:
textureId = context.getReferenceId(texture)
textureInstance = serializeInstance(
prop, texture, textureId, context, depth + 1)
if textureInstance:
dependencies.append(textureInstance)
calls.append(['addTexture', [wrapId(textureId)]])
return {
'parent': context.getReferenceId(parent),
'id': popId,
'type': prop.GetClassName(),
'properties': {
# vtkProp
'visibility': prop.GetVisibility(),
'pickable': prop.GetPickable(),
'dragable': prop.GetDragable(),
'useBounds': prop.GetUseBounds(),
},
'calls': calls,
'dependencies': dependencies
}
# -----------------------------------------------------------------------------
def genericProp3DSerializer(parent, prop3D, prop3DId, context, depth):
# This kind of actor has some position properties to add
instance = genericPropSerializer(parent, prop3D, prop3DId, context, depth)
if not instance: return
instance['properties'].update({
# vtkProp3D
'origin': prop3D.GetOrigin(),
'position': prop3D.GetPosition(),
'scale': prop3D.GetScale(),
'orientation': prop3D.GetOrientation(),
})
if prop3D.GetUserMatrix():
instance['properties'].update({
'userMatrix': [prop3D.GetUserMatrix().GetElement(i%4,i//4) for i in range(16)],
})
return instance
# -----------------------------------------------------------------------------
def genericActorSerializer(parent, actor, actorId, context, depth):
# may have texture and
instance = genericProp3DSerializer(parent, actor, actorId, context, depth)
if not instance: return
# # actor may have a backface property instance (not used by vtkjs rendering)
# # https://github.com/Kitware/vtk-js/issues/1545
# backfaceProperties = actor.GetBackfaceProperty()
# if backfaceProperties:
# backfacePropId = context.getReferenceId(backfaceProperties)
# backPropertyInstance = serializeInstance(
# actor, backfaceProperties, backfacePropId, context, depth + 1)
# if backPropertyInstance:
# instance['dependencies'].append(backPropertyInstance)
# instance['calls'].append(['setBackfaceProperty', [wrapId(backfacePropId)]])
instance['properties'].update({
# vtkActor
'forceOpaque': actor.GetForceOpaque(),
'forceTranslucent': actor.GetForceTranslucent()
})
if actor.IsA('vtkFollower'):
camera = actor.GetCamera()
cameraId = context.getReferenceId(camera)
cameraInstance = serializeInstance(
actor, camera, cameraId, context, depth + 1)
if cameraInstance:
instance['dependencies'].append(cameraInstance)
instance['calls'].append(['setCamera', [wrapId(cameraId)]])
return instance
# -----------------------------------------------------------------------------
def genericMapperSerializer(parent, mapper, mapperId, context, depth):
# This kind of mapper requires us to get 2 items: input data and lookup
# table
dataObject = None
dataObjectInstance = None
lookupTableInstance = None
calls = []
dependencies = []
if not hasattr(mapper, 'GetInputDataObject'):
if context.debugAll:
print('This mapper does not have GetInputDataObject method')
else:
for port in range(mapper.GetNumberOfInputPorts()): # Glyph3DMapper can define input data objects on 2 ports (input, source)
dataObject = mapper.GetInputDataObject(port, 0)
if dataObject:
dataObjectId = '%s-dataset-%d' % (mapperId, port)
if parent.IsA('vtkActor') and not mapper.IsA('vtkTexture'):
# vtk-js actors can render only surfacic datasets
# => we ensure to convert the dataset in polydata
dataObjectInstance = mergeToPolydataSerializer(
mapper, dataObject, dataObjectId, context, depth + 1)
else:
dataObjectInstance = serializeInstance(
mapper, dataObject, dataObjectId, context, depth + 1)
if dataObjectInstance:
dependencies.append(dataObjectInstance)
calls.append(['setInputData', [wrapId(dataObjectId), port]])
lookupTable = None
if hasattr(mapper, 'GetLookupTable'):
lookupTable = mapper.GetLookupTable()
elif parent.IsA('vtkActor'):
if context.debugAll:
print('This mapper actor not have GetLookupTable method')
if lookupTable:
lookupTableId = context.getReferenceId(lookupTable)
lookupTableInstance = serializeInstance(
mapper, lookupTable, lookupTableId, context, depth + 1)
if lookupTableInstance:
dependencies.append(lookupTableInstance)
calls.append(['setLookupTable', [wrapId(lookupTableId)]])
if dataObjectInstance:
return {
'parent': context.getReferenceId(parent),
'id': mapperId,
'properties': {},
'calls': calls,
'dependencies': dependencies
}
# -----------------------------------------------------------------------------
def genericPolyDataMapperSerializer(parent, mapper, mapperId, context, depth):
instance = genericMapperSerializer(parent, mapper, mapperId, context, depth)
if not instance: return
instance['type'] = mapper.GetClassName()
instance['properties'].update({
'resolveCoincidentTopology': mapper.GetResolveCoincidentTopology(),
'renderTime': mapper.GetRenderTime(),
'arrayAccessMode': 1, # since we can't set mapper arrayId on vtkjs, we force acess mode by name and use retrieve name function
'scalarRange': mapper.GetScalarRange(),
'useLookupTableScalarRange': 1 if mapper.GetUseLookupTableScalarRange() else 0,
'scalarVisibility': mapper.GetScalarVisibility(),
'colorByArrayName': retrieveArrayName(instance, mapper.GetScalarMode()),
'colorMode': mapper.GetColorMode(),
'scalarMode': mapper.GetScalarMode(),
'interpolateScalarsBeforeMapping': 1 if mapper.GetInterpolateScalarsBeforeMapping() else 0
})
return instance
# -----------------------------------------------------------------------------
def genericVolumeMapperSerializer(parent, mapper, mapperId, context, depth):
instance = genericMapperSerializer(parent, mapper, mapperId, context, depth)
if not instance: return
imageSampleDistance = (
mapper.GetImageSampleDistance()
if hasattr(mapper, 'GetImageSampleDistance')
else 1
)
instance['type'] = mapper.GetClassName()
instance['properties'].update({
'sampleDistance': mapper.GetSampleDistance(),
'imageSampleDistance': imageSampleDistance,
# 'maximumSamplesPerRay',
'autoAdjustSampleDistances': mapper.GetAutoAdjustSampleDistances(),
'blendMode': mapper.GetBlendMode(),
})
return instance
# -----------------------------------------------------------------------------
def glyph3DMapperSerializer(parent, mapper, mapperId, context, depth):
instance = genericPolyDataMapperSerializer(parent, mapper, mapperId, context, depth)
if not instance: return
instance['type'] = mapper.GetClassName()
instance['properties'].update({
'orient': mapper.GetOrient(),
'orientationMode': mapper.GetOrientationMode(),
'scaling': mapper.GetScaling(),
'scaleFactor': mapper.GetScaleFactor(),
'scaleMode': mapper.GetScaleMode(),
'scaleArray': mapper.GetInputArrayInformation(mapper.SCALE).Get(vtkDataObject.FIELD_NAME()),
'orientationArray': mapper.GetInputArrayInformation(mapper.ORIENTATION).Get(vtkDataObject.FIELD_NAME()),
})
return instance
# -----------------------------------------------------------------------------
def textureSerializer(parent, texture, textureId, context, depth):
instance = genericMapperSerializer(parent, texture, textureId, context, depth)
if not instance: return
instance['type'] = texture.GetClassName()
instance['properties'].update({
'interpolate': texture.GetInterpolate(),
'repeat': texture.GetRepeat(),
'edgeClamp': texture.GetEdgeClamp(),
})
return instance
# -----------------------------------------------------------------------------
def imageSliceMapperSerializer(parent, mapper, mapperId, context, depth):
# On vtkjs side : vtkImageMapper connected to a vtkImageReslice filter
instance = genericMapperSerializer(parent, mapper, mapperId, context, depth)
if not instance: return
instance['type'] = mapper.GetClassName()
return instance
# -----------------------------------------------------------------------------
def lookupTableSerializer(parent, lookupTable, lookupTableId, context, depth):
# No children in this case, so no additions to bindings and return empty list
# But we do need to add instance
arrays = []
lookupTableRange = lookupTable.GetRange()
lookupTableHueRange = [0.5, 0]
if hasattr(lookupTable, 'GetHueRange'):
try:
lookupTable.GetHueRange(lookupTableHueRange)
except Exception:
pass
lutSatRange = lookupTable.GetSaturationRange()
# lutAlphaRange = lookupTable.GetAlphaRange()
if lookupTable.GetTable():
arrayMeta = getArrayDescription(lookupTable.GetTable(), context)
if arrayMeta:
arrayMeta['registration'] = 'setTable'
arrays.append(arrayMeta)
return {
'parent': context.getReferenceId(parent),
'id': lookupTableId,
'type': lookupTable.GetClassName(),
'properties': {
'numberOfColors': lookupTable.GetNumberOfColors(),
'valueRange': lookupTableRange,
'range': lookupTableRange,
'hueRange': lookupTableHueRange,
# 'alphaRange': lutAlphaRange, # Causes weird rendering artifacts on client
'saturationRange': lutSatRange,
'nanColor': lookupTable.GetNanColor(),
'belowRangeColor': lookupTable.GetBelowRangeColor(),
'aboveRangeColor': lookupTable.GetAboveRangeColor(),
'useAboveRangeColor': True if lookupTable.GetUseAboveRangeColor() else False,
'useBelowRangeColor': True if lookupTable.GetUseBelowRangeColor() else False,
'alpha': lookupTable.GetAlpha(),
'vectorSize': lookupTable.GetVectorSize(),
'vectorComponent': lookupTable.GetVectorComponent(),
'vectorMode': lookupTable.GetVectorMode(),
'indexedLookup': lookupTable.GetIndexedLookup(),
},
'arrays': arrays,
}
# -----------------------------------------------------------------------------
def lookupTableToColorTransferFunction(lookupTable):
dataTable = lookupTable.GetTable()
table = dataTableToList(dataTable)
if table:
ctf = vtkColorTransferFunction()
tableRange = lookupTable.GetTableRange()
points = linspace(*tableRange, num=len(table))
for x, rgba in zip(points, table):
ctf.AddRGBPoint(x, *[x/255 for x in rgba[:3]])
return ctf
# -----------------------------------------------------------------------------
def lookupTableSerializer2(parent, lookupTable, lookupTableId, context, depth):
ctf = lookupTableToColorTransferFunction(lookupTable)
if ctf:
return colorTransferFunctionSerializer(parent, ctf, lookupTableId, context, depth)
# -----------------------------------------------------------------------------
def propertySerializer(parent, propObj, propObjId, context, depth):
representation = propObj.GetRepresentation() if hasattr(
propObj, 'GetRepresentation') else 2
colorToUse = propObj.GetDiffuseColor() if hasattr(
propObj, 'GetDiffuseColor') else [1, 1, 1]
if representation == 1 and hasattr(propObj, 'GetColor'):
colorToUse = propObj.GetColor()
return {
'parent': context.getReferenceId(parent),
'id': propObjId,
'type': propObj.GetClassName(),
'properties': {
'representation': representation,
'diffuseColor': colorToUse,
'color': propObj.GetColor(),
'ambientColor': propObj.GetAmbientColor(),
'specularColor': propObj.GetSpecularColor(),
'edgeColor': propObj.GetEdgeColor(),
'ambient': propObj.GetAmbient(),
'diffuse': propObj.GetDiffuse(),
'specular': propObj.GetSpecular(),
'specularPower': propObj.GetSpecularPower(),
'opacity': propObj.GetOpacity(),
'interpolation': propObj.GetInterpolation(),
'edgeVisibility': 1 if propObj.GetEdgeVisibility() else 0,
'backfaceCulling': 1 if propObj.GetBackfaceCulling() else 0,
'frontfaceCulling': 1 if propObj.GetFrontfaceCulling() else 0,
'pointSize': propObj.GetPointSize(),
'lineWidth': propObj.GetLineWidth(),
'lighting': 1 if propObj.GetLighting() else 0,
}
}
# -----------------------------------------------------------------------------
def volumePropertySerializer(parent, propObj, propObjId, context, depth):
dependencies = []
calls = []
# TODO: for the moment only component 0 handle
#OpactiyFunction
ofun = propObj.GetScalarOpacity()
if ofun:
ofunId = context.getReferenceId(ofun)
ofunInstance = serializeInstance(
propObj, ofun, ofunId, context, depth + 1)
if ofunInstance:
dependencies.append(ofunInstance)
calls.append(['setScalarOpacity', [0, wrapId(ofunId)]])
# ColorTranferFunction
ctfun = propObj.GetRGBTransferFunction()
if ctfun:
ctfunId = context.getReferenceId(ctfun)
ctfunInstance = serializeInstance(
propObj, ctfun, ctfunId, context, depth + 1)
if ctfunInstance:
dependencies.append(ctfunInstance)
calls.append(['setRGBTransferFunction', [0, wrapId(ctfunId)]])
calls += [
['setScalarOpacityUnitDistance', [0, propObj.GetScalarOpacityUnitDistance(0)]],
['setComponentWeight', [0, propObj.GetComponentWeight(0)]],
['setUseGradientOpacity', [0, int(not propObj.GetDisableGradientOpacity())]],
]
return {
'parent': context.getReferenceId(parent),
'id': propObjId,
'type': propObj.GetClassName(),
'properties': {
'independentComponents': propObj.GetIndependentComponents(),
'interpolationType': propObj.GetInterpolationType(),
'ambient': propObj.GetAmbient(),
'diffuse': propObj.GetDiffuse(),
'shade': propObj.GetShade(),
'specular': propObj.GetSpecular(0),
'specularPower': propObj.GetSpecularPower(),
},
'dependencies': dependencies,
'calls': calls,
}
# -----------------------------------------------------------------------------
def imagePropertySerializer(parent, propObj, propObjId, context, depth):
calls = []
dependencies = []
lookupTable = propObj.GetLookupTable()
if lookupTable:
ctfun = lookupTableToColorTransferFunction(lookupTable)
ctfunId = context.getReferenceId(ctfun)
ctfunInstance = serializeInstance(
propObj, ctfun, ctfunId, context, depth + 1)
if ctfunInstance:
dependencies.append(ctfunInstance)
calls.append(['setRGBTransferFunction', [wrapId(ctfunId)]])
return {
'parent': context.getReferenceId(parent),
'id': propObjId,
'type': propObj.GetClassName(),
'properties': {
'interpolationType': propObj.GetInterpolationType(),
'colorWindow': propObj.GetColorWindow(),
'colorLevel': propObj.GetColorLevel(),
'ambient': propObj.GetAmbient(),
'diffuse': propObj.GetDiffuse(),
'opacity': propObj.GetOpacity(),
},
'dependencies': dependencies,
'calls': calls,
}
# -----------------------------------------------------------------------------
def imageDataSerializer(parent, dataset, datasetId, context, depth):
datasetType = dataset.GetClassName()
if hasattr(dataset, 'GetDirectionMatrix'):
direction = [dataset.GetDirectionMatrix().GetElement(0, i)
for i in range(9)]
else:
direction = [1, 0, 0,
0, 1, 0,
0, 0, 1]
# Extract dataset fields
arrays = []
extractRequiredFields(arrays, parent, dataset, context)
return {
'parent': context.getReferenceId(parent),
'id': datasetId,
'type': datasetType,
'properties': {
'spacing': dataset.GetSpacing(),
'origin': dataset.GetOrigin(),
'dimensions': dataset.GetDimensions(),
'direction': direction,
},
'arrays': arrays
}
# -----------------------------------------------------------------------------
def polydataSerializer(parent, dataset, datasetId, context, depth):
datasetType = dataset.GetClassName()
if dataset and dataset.GetPoints():
properties = {}
# Points
points = getArrayDescription(dataset.GetPoints().GetData(), context)
points['vtkClass'] = 'vtkPoints'
properties['points'] = points
# Verts
if dataset.GetVerts() and dataset.GetVerts().GetData().GetNumberOfTuples() > 0:
_verts = getArrayDescription(dataset.GetVerts().GetData(), context)
properties['verts'] = _verts
properties['verts']['vtkClass'] = 'vtkCellArray'
# Lines
if dataset.GetLines() and dataset.GetLines().GetData().GetNumberOfTuples() > 0:
_lines = getArrayDescription(dataset.GetLines().GetData(), context)
properties['lines'] = _lines
properties['lines']['vtkClass'] = 'vtkCellArray'
# Polys
if dataset.GetPolys() and dataset.GetPolys().GetData().GetNumberOfTuples() > 0:
_polys = getArrayDescription(dataset.GetPolys().GetData(), context)
properties['polys'] = _polys
properties['polys']['vtkClass'] = 'vtkCellArray'
# Strips
if dataset.GetStrips() and dataset.GetStrips().GetData().GetNumberOfTuples() > 0:
_strips = getArrayDescription(
dataset.GetStrips().GetData(), context)
properties['strips'] = _strips
properties['strips']['vtkClass'] = 'vtkCellArray'
# Fields
properties['fields'] = []
extractRequiredFields(properties['fields'], parent, dataset, context)
return {
'parent': context.getReferenceId(parent),
'id': datasetId,
'type': datasetType,
'properties': properties
}
if context.debugAll:
print('This dataset has no points!')
# -----------------------------------------------------------------------------
def mergeToPolydataSerializer(parent, dataObject, dataObjectId, context, depth):
dataset = None
if dataObject.IsA('vtkCompositeDataSet'):
gf = vtkCompositeDataGeometryFilter()
gf.SetInputData(dataObject)
gf.Update()
dataset = gf.GetOutput()
elif (dataObject.IsA('vtkUnstructuredGrid') or
dataObject.IsA('vtkStructuredGrid') or
dataObject.IsA('vtkImageData')):
gf = vtkGeometryFilter()
gf.SetInputData(dataObject)
gf.Update()
dataset = gf.GetOutput()
else:
dataset = dataObject
return polydataSerializer(parent, dataset, dataObjectId, context, depth)
# -----------------------------------------------------------------------------
def colorTransferFunctionSerializer(parent, instance, objId, context, depth):
nodes = []
for i in range(instance.GetSize()):
# x, r, g, b, midpoint, sharpness
node = [0, 0, 0, 0, 0, 0]
instance.GetNodeValue(i, node)
nodes.append(node)
return {
'parent': context.getReferenceId(parent),
'id': objId,
'type': instance.GetClassName(),
'properties': {
'clamping': 1 if instance.GetClamping() else 0,
'colorSpace': instance.GetColorSpace(),
'hSVWrap': 1 if instance.GetHSVWrap() else 0,
# 'nanColor': instance.GetNanColor(), # Breaks client
# 'belowRangeColor': instance.GetBelowRangeColor(), # Breaks client
# 'aboveRangeColor': instance.GetAboveRangeColor(), # Breaks client
# 'useAboveRangeColor': 1 if instance.GetUseAboveRangeColor() else 0,
# 'useBelowRangeColor': 1 if instance.GetUseBelowRangeColor() else 0,
'allowDuplicateScalars': 1 if instance.GetAllowDuplicateScalars() else 0,
'alpha': instance.GetAlpha(),
'vectorComponent': instance.GetVectorComponent(),
'vectorSize': instance.GetVectorSize(),
'vectorMode': instance.GetVectorMode(),
'indexedLookup': instance.GetIndexedLookup(),
'nodes': nodes
}
}
# -----------------------------------------------------------------------------
def piecewiseFunctionSerializer(parent, instance, objId, context, depth):
nodes = []
for i in range(instance.GetSize()):
# x, y, midpoint, sharpness
node = [0, 0, 0, 0]
instance.GetNodeValue(i, node)
nodes.append(node)
return {
'parent': context.getReferenceId(parent),
'id': objId,
'type': instance.GetClassName(),
'properties': {
'clamping': instance.GetClamping(),
'allowDuplicateScalars': instance.GetAllowDuplicateScalars(),
'nodes': nodes,
}
}
# -----------------------------------------------------------------------------
def rendererSerializer(parent, instance, objId, context, depth):
dependencies = []
viewPropIds = []
calls = []
# Camera
camera = instance.GetActiveCamera()
cameraId = context.getReferenceId(camera)
cameraInstance = serializeInstance(
instance, camera, cameraId, context, depth + 1)
if cameraInstance:
dependencies.append(cameraInstance)
calls.append(['setActiveCamera', [wrapId(cameraId)]])
# View prop as representation containers
viewPropCollection = instance.GetViewProps()
for rpIdx in range(viewPropCollection.GetNumberOfItems()):
viewProp = viewPropCollection.GetItemAsObject(rpIdx)
viewPropId = context.getReferenceId(viewProp)
viewPropInstance = serializeInstance(
instance, viewProp, viewPropId, context, depth + 1)
if viewPropInstance:
dependencies.append(viewPropInstance)
viewPropIds.append(viewPropId)
calls += context.buildDependencyCallList('%s-props' %
objId, viewPropIds, 'addViewProp', 'removeViewProp')
return {
'parent': context.getReferenceId(parent),
'id': objId,
'type': instance.GetClassName(),
'properties': {
'background': instance.GetBackground(),
'background2': instance.GetBackground2(),
'viewport': instance.GetViewport(),
# These commented properties do not yet have real setters in vtk.js
# 'gradientBackground': instance.GetGradientBackground(),
# 'aspect': instance.GetAspect(),
# 'pixelAspect': instance.GetPixelAspect(),
# 'ambient': instance.GetAmbient(),
'twoSidedLighting': instance.GetTwoSidedLighting(),
'lightFollowCamera': instance.GetLightFollowCamera(),
'layer': instance.GetLayer(),
'preserveColorBuffer': instance.GetPreserveColorBuffer(),
'preserveDepthBuffer': instance.GetPreserveDepthBuffer(),
'nearClippingPlaneTolerance': instance.GetNearClippingPlaneTolerance(),
'clippingRangeExpansion': instance.GetClippingRangeExpansion(),
'useShadows': instance.GetUseShadows(),
'useDepthPeeling': instance.GetUseDepthPeeling(),
'occlusionRatio': instance.GetOcclusionRatio(),
'maximumNumberOfPeels': instance.GetMaximumNumberOfPeels(),
'interactive': instance.GetInteractive(),
},
'dependencies': dependencies,
'calls': calls
}
# -----------------------------------------------------------------------------
def cameraSerializer(parent, instance, objId, context, depth):
return {
'parent': context.getReferenceId(parent),
'id': objId,
'type': instance.GetClassName(),
'properties': {
'focalPoint': instance.GetFocalPoint(),
'position': instance.GetPosition(),
'viewUp': instance.GetViewUp(),
'clippingRange': instance.GetClippingRange(),
}
}
# -----------------------------------------------------------------------------
def renderWindowSerializer(parent, instance, objId, context, depth):
dependencies = []
rendererIds = []
rendererCollection = instance.GetRenderers()
for rIdx in range(rendererCollection.GetNumberOfItems()):
# Grab the next vtkRenderer
renderer = rendererCollection.GetItemAsObject(rIdx)
rendererId = context.getReferenceId(renderer)
rendererInstance = serializeInstance(
instance, renderer, rendererId, context, depth + 1)
if rendererInstance:
dependencies.append(rendererInstance)
rendererIds.append(rendererId)
calls = context.buildDependencyCallList(
objId, rendererIds, 'addRenderer', 'removeRenderer')
return {
'parent': context.getReferenceId(parent),
'id': objId,
'type': instance.GetClassName(),
'properties': {
'numberOfLayers': instance.GetNumberOfLayers()
},
'dependencies': dependencies,
'calls': calls,
'mtime': instance.GetMTime(),
}
| 37.426328
| 134
| 0.597514
|
794f21beebbfd584cdf47f89d5186426ae2e226b
| 35,040
|
py
|
Python
|
tests/unit/modules/test_zpool.py
|
alexey-zhukovin/salt
|
87382072abf353f3da62ae4e2d9fe1ba14344efa
|
[
"Apache-2.0"
] | 1
|
2021-09-06T00:14:04.000Z
|
2021-09-06T00:14:04.000Z
|
tests/unit/modules/test_zpool.py
|
alexey-zhukovin/salt
|
87382072abf353f3da62ae4e2d9fe1ba14344efa
|
[
"Apache-2.0"
] | 2
|
2021-04-30T21:17:57.000Z
|
2021-12-13T20:40:23.000Z
|
tests/unit/modules/test_zpool.py
|
Kamatera/salt
|
ac960a3308617657d9d039dae9108e0045ab3929
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tests for salt.modules.zpool
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>, Jorge Schrauwen <sjorge@blackdot.be>
:maintainer: Jorge Schrauwen <sjorge@blackdot.be>
:maturity: new
:depends: salt.utils.zfs
:platform: illumos,freebsd,linux
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Utils
import salt.loader
import salt.modules.zpool as zpool
import salt.utils.decorators
import salt.utils.decorators.path
# Import Salt Execution module to test
import salt.utils.zfs
from salt.utils.odict import OrderedDict
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
# Import Salt Testing libs
from tests.support.zfs import ZFSMockData
# Skip this test case if we don't have access to mock!
class ZpoolTestCase(TestCase, LoaderModuleMockMixin):
"""
This class contains a set of functions that test salt.modules.zpool module
"""
def setup_loader_modules(self):
self.opts = opts = salt.config.DEFAULT_MINION_OPTS.copy()
self.utils_patch = ZFSMockData().get_patched_utils()
for key in ("opts", "utils_patch"):
self.addCleanup(delattr, self, key)
utils = salt.loader.utils(
opts, whitelist=["zfs", "args", "systemd", "path", "platform"]
)
zpool_obj = {zpool: {"__opts__": opts, "__utils__": utils}}
return zpool_obj
@skipIf(True, "SLOWTEST skip")
def test_exists_success(self):
"""
Tests successful return of exists function
"""
ret = {}
ret["stdout"] = (
"NAME SIZE ALLOC FREE CAP DEDUP HEALTH ALTROOT\n"
"myzpool 149G 128K 149G 0% 1.00x ONLINE -"
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertTrue(zpool.exists("myzpool"))
@skipIf(True, "SLOWTEST skip")
def test_exists_failure(self):
"""
Tests failure return of exists function
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'myzpool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertFalse(zpool.exists("myzpool"))
def test_healthy(self):
"""
Tests successful return of healthy function
"""
ret = {}
ret["stdout"] = "all pools are healthy"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
self.assertTrue(zpool.healthy())
def test_status(self):
"""
Tests successful return of status function
"""
ret = {}
ret["stdout"] = "\n".join(
[
" pool: mypool",
" state: ONLINE",
" scan: scrub repaired 0 in 0h6m with 0 errors on Mon Dec 21 02:06:17 2015",
"config:",
"",
"\tNAME STATE READ WRITE CKSUM",
"\tmypool ONLINE 0 0 0",
"\t mirror-0 ONLINE 0 0 0",
"\t c2t0d0 ONLINE 0 0 0",
"\t c2t1d0 ONLINE 0 0 0",
"",
"errors: No known data errors",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.status()
self.assertEqual("ONLINE", ret["mypool"]["state"])
@skipIf(True, "SLOWTEST skip")
def test_iostat(self):
"""
Tests successful return of iostat function
"""
ret = {}
ret["stdout"] = "\n".join(
[
" capacity operations bandwidth",
"pool alloc free read write read write",
"---------- ----- ----- ----- ----- ----- -----",
"mypool 46.7G 64.3G 4 19 113K 331K",
" mirror 46.7G 64.3G 4 19 113K 331K",
" c2t0d0 - - 1 10 114K 334K",
" c2t1d0 - - 1 10 114K 334K",
"---------- ----- ----- ----- ----- ----- -----",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.iostat("mypool", parsable=False)
self.assertEqual("46.7G", ret["mypool"]["capacity-alloc"])
def test_iostat_parsable(self):
"""
Tests successful return of iostat function
.. note:
The command output is the same as the non parsable!
There is no -p flag for zpool iostat, but our type
conversions can handle this!
"""
ret = {}
ret["stdout"] = "\n".join(
[
" capacity operations bandwidth",
"pool alloc free read write read write",
"---------- ----- ----- ----- ----- ----- -----",
"mypool 46.7G 64.3G 4 19 113K 331K",
" mirror 46.7G 64.3G 4 19 113K 331K",
" c2t0d0 - - 1 10 114K 334K",
" c2t1d0 - - 1 10 114K 334K",
"---------- ----- ----- ----- ----- ----- -----",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.iostat("mypool", parsable=True)
self.assertEqual(50143743180, ret["mypool"]["capacity-alloc"])
def test_list(self):
"""
Tests successful return of list function
"""
ret = {}
ret["stdout"] = "mypool\t1.81T\t661G\t1.17T\t35%\t11%\tONLINE"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.list_(parsable=False)
res = OrderedDict(
[
(
"mypool",
OrderedDict(
[
("size", "1.81T"),
("alloc", "661G"),
("free", "1.17T"),
("cap", "35%"),
("frag", "11%"),
("health", "ONLINE"),
]
),
)
]
)
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_list_parsable(self):
"""
Tests successful return of list function with parsable output
"""
ret = {}
ret["stdout"] = "mypool\t1.81T\t661G\t1.17T\t35%\t11%\tONLINE"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.list_(parsable=True)
res = OrderedDict(
[
(
"mypool",
OrderedDict(
[
("size", 1990116046274),
("alloc", 709743345664),
("free", 1286428604497),
("cap", "35%"),
("frag", "11%"),
("health", "ONLINE"),
]
),
)
]
)
self.assertEqual(ret, res)
def test_get(self):
"""
Tests successful return of get function
"""
ret = {}
ret["stdout"] = "mypool\tsize\t1.81T\t-\n"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.get("mypool", "size", parsable=False)
res = OrderedDict(OrderedDict([("size", "1.81T")]))
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_get_parsable(self):
"""
Tests successful return of get function with parsable output
"""
ret = {}
ret["stdout"] = "mypool\tsize\t1.81T\t-\n"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.get("mypool", "size", parsable=True)
res = OrderedDict(OrderedDict([("size", 1990116046274)]))
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_get_whitespace(self):
"""
Tests successful return of get function with a string with whitespaces
"""
ret = {}
ret["stdout"] = "mypool\tcomment\tmy testing pool\t-\n"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.get("mypool", "comment")
res = OrderedDict(OrderedDict([("comment", "my testing pool")]))
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_scrub_start(self):
"""
Tests start of scrub
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"cmd.run_all": mock_cmd}
), patch.dict(zpool.__utils__, self.utils_patch):
ret = zpool.scrub("mypool")
res = OrderedDict(OrderedDict([("scrubbing", True)]))
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_scrub_pause(self):
"""
Tests pause of scrub
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"cmd.run_all": mock_cmd}
), patch.dict(zpool.__utils__, self.utils_patch):
ret = zpool.scrub("mypool", pause=True)
res = OrderedDict(OrderedDict([("scrubbing", False)]))
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_scrub_stop(self):
"""
Tests pauze of scrub
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
mock_exists = MagicMock(return_value=True)
with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict(
zpool.__salt__, {"cmd.run_all": mock_cmd}
), patch.dict(zpool.__utils__, self.utils_patch):
ret = zpool.scrub("mypool", stop=True)
res = OrderedDict(OrderedDict([("scrubbing", False)]))
self.assertEqual(ret, res)
def test_split_success(self):
"""
Tests split on success
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict([("split", True)])
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_split_exist_new(self):
"""
Tests split on exising new pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "Unable to split datapool: pool already exists"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict(
[
("split", False),
("error", "Unable to split datapool: pool already exists"),
]
)
self.assertEqual(ret, res)
def test_split_missing_pool(self):
"""
Tests split on missing source pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'datapool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict(
[("split", False), ("error", "cannot open 'datapool': no such pool")]
)
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_split_not_mirror(self):
"""
Tests split on source pool is not a mirror
"""
ret = {}
ret["stdout"] = ""
ret[
"stderr"
] = "Unable to split datapool: Source pool must be composed only of mirrors"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.split("datapool", "backuppool")
res = OrderedDict(
[
("split", False),
(
"error",
"Unable to split datapool: Source pool must be composed only of mirrors",
),
]
)
self.assertEqual(ret, res)
def test_labelclear_success(self):
"""
Tests labelclear on succesful label removal
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict([("labelcleared", True)])
self.assertEqual(ret, res)
def test_labelclear_nodevice(self):
"""
Tests labelclear on non existing device
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "failed to open /dev/rdsk/c0t0d0: No such file or directory"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict(
[
("labelcleared", False),
(
"error",
"failed to open /dev/rdsk/c0t0d0: No such file or directory",
),
]
)
self.assertEqual(ret, res)
def test_labelclear_cleared(self):
"""
Tests labelclear on device with no label
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "failed to read label from /dev/rdsk/c0t0d0"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict(
[
("labelcleared", False),
("error", "failed to read label from /dev/rdsk/c0t0d0"),
]
)
self.assertEqual(ret, res)
def test_labelclear_exported(self):
"""
Tests labelclear on device with from exported pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "\n".join(
[
"use '-f' to override the following error:",
'/dev/rdsk/c0t0d0 is a member of exported pool "mypool"',
]
)
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.labelclear("/dev/rdsk/c0t0d0", force=False)
res = OrderedDict(
[
("labelcleared", False),
(
"error",
"use 'force=True' to override the following error:\n/dev/rdsk/c0t0d0 is a member of exported pool \"mypool\"",
),
]
)
self.assertEqual(ret, res)
@skipIf(not salt.utils.path.which("mkfile"), "Cannot find mkfile executable")
def test_create_file_vdev_success(self):
"""
Tests create_file_vdev when out of space
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.create_file_vdev("64M", "/vdisks/disk0")
res = OrderedDict([("/vdisks/disk0", "created")])
self.assertEqual(ret, res)
@skipIf(not salt.utils.path.which("mkfile"), "Cannot find mkfile executable")
def test_create_file_vdev_nospace(self):
"""
Tests create_file_vdev when out of space
"""
ret = {}
ret["stdout"] = ""
ret[
"stderr"
] = "/vdisks/disk0: initialized 10424320 of 67108864 bytes: No space left on device"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.create_file_vdev("64M", "/vdisks/disk0")
res = OrderedDict(
[
("/vdisks/disk0", "failed"),
(
"error",
OrderedDict(
[
(
"/vdisks/disk0",
" initialized 10424320 of 67108864 bytes: No space left on device",
),
]
),
),
]
)
self.assertEqual(ret, res)
def test_export_success(self):
"""
Tests export
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.export("mypool")
res = OrderedDict([("exported", True)])
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_export_nopool(self):
"""
Tests export when the pool does not exists
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.export("mypool")
res = OrderedDict(
[("exported", False), ("error", "cannot open 'mypool': no such pool")]
)
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_import_success(self):
"""
Tests import
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.import_("mypool")
res = OrderedDict([("imported", True)])
self.assertEqual(ret, res)
def test_import_duplicate(self):
"""
Tests import with already imported pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "\n".join(
[
"cannot import 'mypool': a pool with that name already exists",
"use the form 'zpool import <pool | id> <newpool>' to give it a new name",
]
)
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.import_("mypool")
res = OrderedDict(
[
("imported", False),
(
"error",
"cannot import 'mypool': a pool with that name already exists\nuse the form 'zpool import <pool | id> <newpool>' to give it a new name",
),
]
)
self.assertEqual(ret, res)
def test_import_nopool(self):
"""
Tests import
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot import 'mypool': no such pool available"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.import_("mypool")
res = OrderedDict(
[
("imported", False),
("error", "cannot import 'mypool': no such pool available"),
]
)
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_online_success(self):
"""
Tests online
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.online("mypool", "/dev/rdsk/c0t0d0")
res = OrderedDict([("onlined", True)])
self.assertEqual(ret, res)
def test_online_nodevice(self):
"""
Tests online
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot online /dev/rdsk/c0t0d1: no such device in pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.online("mypool", "/dev/rdsk/c0t0d1")
res = OrderedDict(
[
("onlined", False),
("error", "cannot online /dev/rdsk/c0t0d1: no such device in pool"),
]
)
self.assertEqual(ret, res)
def test_offline_success(self):
"""
Tests offline
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.offline("mypool", "/dev/rdsk/c0t0d0")
res = OrderedDict([("offlined", True)])
self.assertEqual(ret, res)
def test_offline_nodevice(self):
"""
Tests offline
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot offline /dev/rdsk/c0t0d1: no such device in pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.offline("mypool", "/dev/rdsk/c0t0d1")
res = OrderedDict(
[
("offlined", False),
(
"error",
"cannot offline /dev/rdsk/c0t0d1: no such device in pool",
),
]
)
self.assertEqual(ret, res)
def test_offline_noreplica(self):
"""
Tests offline
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot offline /dev/rdsk/c0t0d1: no valid replicas"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.offline("mypool", "/dev/rdsk/c0t0d1")
res = OrderedDict(
[
("offlined", False),
("error", "cannot offline /dev/rdsk/c0t0d1: no valid replicas"),
]
)
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_reguid_success(self):
"""
Tests reguid
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.reguid("mypool")
res = OrderedDict([("reguided", True)])
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_reguid_nopool(self):
"""
Tests reguid with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.reguid("mypool")
res = OrderedDict(
[("reguided", False), ("error", "cannot open 'mypool': no such pool")]
)
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_reopen_success(self):
"""
Tests reopen
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.reopen("mypool")
res = OrderedDict([("reopened", True)])
self.assertEqual(ret, res)
def test_reopen_nopool(self):
"""
Tests reopen with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.reopen("mypool")
res = OrderedDict(
[("reopened", False), ("error", "cannot open 'mypool': no such pool")]
)
self.assertEqual(ret, res)
def test_upgrade_success(self):
"""
Tests upgrade
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.upgrade("mypool")
res = OrderedDict([("upgraded", True)])
self.assertEqual(ret, res)
def test_upgrade_nopool(self):
"""
Tests upgrade with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.upgrade("mypool")
res = OrderedDict(
[("upgraded", False), ("error", "cannot open 'mypool': no such pool")]
)
self.assertEqual(ret, res)
@skipIf(True, "SLOWTEST skip")
def test_history_success(self):
"""
Tests history
"""
ret = {}
ret["stdout"] = "\n".join(
[
"History for 'mypool':",
"2018-01-18.16:56:12 zpool create -f mypool /dev/rdsk/c0t0d0",
"2018-01-19.16:01:55 zpool attach -f mypool /dev/rdsk/c0t0d0 /dev/rdsk/c0t0d1",
]
)
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.history("mypool")
res = OrderedDict(
[
(
"mypool",
OrderedDict(
[
(
"2018-01-18.16:56:12",
"zpool create -f mypool /dev/rdsk/c0t0d0",
),
(
"2018-01-19.16:01:55",
"zpool attach -f mypool /dev/rdsk/c0t0d0 /dev/rdsk/c0t0d1",
),
]
),
),
]
)
self.assertEqual(ret, res)
def test_history_nopool(self):
"""
Tests history with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.history("mypool")
res = OrderedDict([("error", "cannot open 'mypool': no such pool")])
self.assertEqual(ret, res)
def test_clear_success(self):
"""
Tests clear
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.clear("mypool")
res = OrderedDict([("cleared", True)])
self.assertEqual(ret, res)
def test_clear_nopool(self):
"""
Tests clear with missing pool
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "cannot open 'mypool': no such pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.clear("mypool")
res = OrderedDict(
[("cleared", False), ("error", "cannot open 'mypool': no such pool")]
)
def test_clear_nodevice(self):
"""
Tests clear with non existign device
"""
ret = {}
ret["stdout"] = ""
ret[
"stderr"
] = "cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool"
ret["retcode"] = 1
mock_cmd = MagicMock(return_value=ret)
with patch.dict(zpool.__salt__, {"cmd.run_all": mock_cmd}), patch.dict(
zpool.__utils__, self.utils_patch
):
ret = zpool.clear("mypool", "/dev/rdsk/c0t0d0")
res = OrderedDict(
[
("cleared", False),
(
"error",
"cannot clear errors for /dev/rdsk/c0t0d0: no such device in pool",
),
]
)
self.assertEqual(ret, res)
| 33.887814
| 160
| 0.486159
|
794f21de9ffe991ca10d143789420aa4397dfae3
| 5,038
|
py
|
Python
|
magenta/models/performance_rnn/performance_rnn_train.py
|
mikiec84/magenta
|
f10c6a52e22b9694542b419d20b64f2ace32ad70
|
[
"Apache-2.0"
] | 1
|
2020-03-22T09:02:26.000Z
|
2020-03-22T09:02:26.000Z
|
magenta/models/performance_rnn/performance_rnn_train.py
|
mikiec84/magenta
|
f10c6a52e22b9694542b419d20b64f2ace32ad70
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/performance_rnn/performance_rnn_train.py
|
mikiec84/magenta
|
f10c6a52e22b9694542b419d20b64f2ace32ad70
|
[
"Apache-2.0"
] | 1
|
2019-11-30T17:51:10.000Z
|
2019-11-30T17:51:10.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train and evaluate a performance RNN model."""
import os
# internal imports
import tensorflow as tf
import magenta
from magenta.models.performance_rnn import performance_model
from magenta.models.shared import events_rnn_graph
from magenta.models.shared import events_rnn_train
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('run_dir', '/tmp/performance_rnn/logdir/run1',
'Path to the directory where checkpoints and '
'summary events will be saved during training and '
'evaluation. Separate subdirectories for training '
'events and eval events will be created within '
'`run_dir`. Multiple runs can be stored within the '
'parent directory of `run_dir`. Point TensorBoard '
'to the parent directory of `run_dir` to see all '
'your runs.')
tf.app.flags.DEFINE_string('config', 'performance', 'The config to use')
tf.app.flags.DEFINE_string('sequence_example_file', '',
'Path to TFRecord file containing '
'tf.SequenceExample records for training or '
'evaluation.')
tf.app.flags.DEFINE_integer('num_training_steps', 0,
'The the number of global training steps your '
'model should take before exiting training. '
'Leave as 0 to run until terminated manually.')
tf.app.flags.DEFINE_integer('num_eval_examples', 0,
'The number of evaluation examples your model '
'should process for each evaluation step.'
'Leave as 0 to use the entire evaluation set.')
tf.app.flags.DEFINE_integer('summary_frequency', 10,
'A summary statement will be logged every '
'`summary_frequency` steps during training or '
'every `summary_frequency` seconds during '
'evaluation.')
tf.app.flags.DEFINE_integer('num_checkpoints', 10,
'The number of most recent checkpoints to keep in '
'the training directory. Keeps all if 0.')
tf.app.flags.DEFINE_boolean('eval', False,
'If True, this process only evaluates the model '
'and does not update weights.')
tf.app.flags.DEFINE_string('log', 'INFO',
'The threshold for what messages will be logged '
'DEBUG, INFO, WARN, ERROR, or FATAL.')
tf.app.flags.DEFINE_string(
'hparams', '',
'Comma-separated list of `name=value` pairs. For each pair, the value of '
'the hyperparameter named `name` is set to `value`. This mapping is merged '
'with the default hyperparameters.')
def main(unused_argv):
tf.logging.set_verbosity(FLAGS.log)
if not FLAGS.run_dir:
tf.logging.fatal('--run_dir required')
return
if not FLAGS.sequence_example_file:
tf.logging.fatal('--sequence_example_file required')
return
sequence_example_file_paths = tf.gfile.Glob(
os.path.expanduser(FLAGS.sequence_example_file))
run_dir = os.path.expanduser(FLAGS.run_dir)
config = performance_model.default_configs[FLAGS.config]
config.hparams.parse(FLAGS.hparams)
mode = 'eval' if FLAGS.eval else 'train'
graph = events_rnn_graph.build_graph(
mode, config, sequence_example_file_paths)
train_dir = os.path.join(run_dir, 'train')
tf.gfile.MakeDirs(train_dir)
tf.logging.info('Train dir: %s', train_dir)
if FLAGS.eval:
eval_dir = os.path.join(run_dir, 'eval')
tf.gfile.MakeDirs(eval_dir)
tf.logging.info('Eval dir: %s', eval_dir)
num_batches = (
(FLAGS.num_eval_examples if FLAGS.num_eval_examples else
magenta.common.count_records(sequence_example_file_paths)) //
config.hparams.batch_size)
events_rnn_train.run_eval(graph, train_dir, eval_dir, num_batches)
else:
events_rnn_train.run_training(graph, train_dir, FLAGS.num_training_steps,
FLAGS.summary_frequency,
checkpoints_to_keep=FLAGS.num_checkpoints)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 43.059829
| 80
| 0.63557
|
794f234bc6ac828332479f3322cabff49ec56819
| 4,786
|
py
|
Python
|
v1.0.0.test/toontown/suit/DistributedSuitPlanner.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v1.0.0.test/toontown/suit/DistributedSuitPlanner.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v1.0.0.test/toontown/suit/DistributedSuitPlanner.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from panda3d.core import *
from direct.distributed import DistributedObject
import SuitPlannerBase
from toontown.toonbase import ToontownGlobals
class DistributedSuitPlanner(DistributedObject.DistributedObject, SuitPlannerBase.SuitPlannerBase):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
SuitPlannerBase.SuitPlannerBase.__init__(self)
self.suitList = []
self.buildingList = [0,
0,
0,
0]
self.pathViz = None
return
def generate(self):
self.notify.info('DistributedSuitPlanner %d: generating' % self.getDoId())
DistributedObject.DistributedObject.generate(self)
base.cr.currSuitPlanner = self
def disable(self):
self.notify.info('DistributedSuitPlanner %d: disabling' % self.getDoId())
self.hidePaths()
DistributedObject.DistributedObject.disable(self)
base.cr.currSuitPlanner = None
return
def d_suitListQuery(self):
self.sendUpdate('suitListQuery')
def suitListResponse(self, suitList):
self.suitList = suitList
messenger.send('suitListResponse')
def d_buildingListQuery(self):
self.sendUpdate('buildingListQuery')
def buildingListResponse(self, buildingList):
self.buildingList = buildingList
messenger.send('buildingListResponse')
def hidePaths(self):
if self.pathViz:
self.pathViz.detachNode()
self.pathViz = None
return
def showPaths(self):
self.hidePaths()
vizNode = GeomNode(self.uniqueName('PathViz'))
lines = LineSegs()
self.pathViz = render.attachNewNode(vizNode)
points = self.frontdoorPointList + self.sidedoorPointList + self.cogHQDoorPointList + self.streetPointList
while len(points) > 0:
self.__doShowPoints(vizNode, lines, None, points)
cnode = CollisionNode('battleCells')
cnode.setCollideMask(BitMask32.allOff())
for zoneId, cellPos in self.battlePosDict.items():
cnode.addSolid(CollisionSphere(cellPos, 9))
text = '%s' % zoneId
self.__makePathVizText(text, cellPos[0], cellPos[1], cellPos[2] + 9, (1,
1,
1,
1))
self.pathViz.attachNewNode(cnode).show()
return
def __doShowPoints(self, vizNode, lines, p, points):
if p == None:
pi = len(points) - 1
if pi < 0:
return
p = points[pi]
del points[pi]
else:
if p not in points:
return
pi = points.index(p)
del points[pi]
text = '%s' % p.getIndex()
pos = p.getPos()
if p.getPointType() == DNASuitPoint.FRONTDOORPOINT:
color = (1, 0, 0, 1)
else:
if p.getPointType() == DNASuitPoint.SIDEDOORPOINT:
color = (0, 0, 1, 1)
else:
color = (0, 1, 0, 1)
self.__makePathVizText(text, pos[0], pos[1], pos[2], color)
adjacent = self.dnaStore.getAdjacentPoints(p)
numPoints = adjacent.getNumPoints()
for i in xrange(numPoints):
qi = adjacent.getPointIndex(i)
q = self.dnaStore.getSuitPointWithIndex(qi)
pp = p.getPos()
qp = q.getPos()
v = Vec3(qp - pp)
v.normalize()
c = v.cross(Vec3.up())
p1a = pp + v * 2 + c * 0.5
p1b = pp + v * 3
p1c = pp + v * 2 - c * 0.5
lines.reset()
lines.moveTo(pp)
lines.drawTo(qp)
lines.moveTo(p1a)
lines.drawTo(p1b)
lines.drawTo(p1c)
lines.create(vizNode, 0)
self.__doShowPoints(vizNode, lines, q, points)
return
def __makePathVizText(self, text, x, y, z, color):
if not hasattr(self, 'debugTextNode'):
self.debugTextNode = TextNode('debugTextNode')
self.debugTextNode.setAlign(TextNode.ACenter)
self.debugTextNode.setFont(ToontownGlobals.getSignFont())
self.debugTextNode.setTextColor(*color)
self.debugTextNode.setText(text)
np = self.pathViz.attachNewNode(self.debugTextNode.generate())
np.setPos(x, y, z + 1)
np.setScale(1.0)
np.setBillboardPointEye(2)
np.node().setAttrib(TransparencyAttrib.make(TransparencyAttrib.MDual), 2)
| 36.815385
| 114
| 0.552027
|
794f23d8a11d09db2958a3850747d42a582a0070
| 6,242
|
py
|
Python
|
datasets.py
|
ymym3412/textcnn-conv-deconv-pytorch
|
f0101160b8bd2de0f2a0718a9053ad85dc5f695f
|
[
"Apache-2.0"
] | 60
|
2018-01-24T19:01:43.000Z
|
2021-10-09T08:44:17.000Z
|
datasets.py
|
ymym3412/textcnn-conv-deconv-pytorch
|
f0101160b8bd2de0f2a0718a9053ad85dc5f695f
|
[
"Apache-2.0"
] | 3
|
2018-02-23T15:35:07.000Z
|
2020-04-19T12:17:09.000Z
|
datasets.py
|
ymym3412/textcnn-conv-deconv-pytorch
|
f0101160b8bd2de0f2a0718a9053ad85dc5f695f
|
[
"Apache-2.0"
] | 14
|
2018-02-18T17:33:18.000Z
|
2021-07-14T04:45:58.000Z
|
from torch.utils.data import Dataset
import torch
import numpy as np
from tqdm import tqdm
from collections import Counter
from copy import deepcopy
def load_hotel_review_data(path, sentence_len):
"""
Load Hotel Reviews data from pickle distributed in https://drive.google.com/file/d/0B52eYWrYWqIpQzhBNkVxaV9mMjQ/view
This file is published in https://github.com/dreasysnail/textCNN_public
:param path: pickle path
:return:
"""
import _pickle as cPickle
with open(path, "rb") as f:
data = cPickle.load(f, encoding="latin1")
train_data, test_data = HotelReviewsDataset(data[0], deepcopy(data[2]), deepcopy(data[3]), sentence_len, transform=ToTensor()), \
HotelReviewsDataset(data[1], deepcopy(data[2]), deepcopy(data[3]), sentence_len, transform=ToTensor())
return train_data, test_data
class HotelReviewsDataset(Dataset):
"""
Hotel Reviews Dataset
"""
def __init__(self, data_list, word2index, index2word, sentence_len, transform=None):
self.word2index = word2index
self.index2word = index2word
self.n_words = len(self.word2index)
self.data = data_list
self.sentence_len = sentence_len
self.transform = transform
self.word2index["<PAD>"] = self.n_words
self.index2word[self.n_words] = "<PAD>"
self.n_words += 1
temp_list = []
for sentence in tqdm(self.data):
if len(sentence) > self.sentence_len:
# truncate sentence if sentence length is longer than `sentence_len`
temp_list.append(np.array(sentence[:self.sentence_len]))
else:
# pad sentence with '<PAD>' token if sentence length is shorter than `sentence_len`
sent_array = np.lib.pad(np.array(sentence),
(0, self.sentence_len - len(sentence)),
"constant",
constant_values=(self.n_words-1, self.n_words-1))
temp_list.append(sent_array)
self.data = np.array(temp_list, dtype=np.int32)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
data = self.data[idx]
if self.transform:
data = self.transform(data)
return data
def vocab_lennght(self):
return len(self.word2index)
class TextClassificationDataset(Dataset):
def __init__(self, data_path, label_path, tokenized, sentence_len=60, transoform=None):
self.word2index = {"<PAD>": 0, "<UNK>": 1}
self.index2word = {0: "<PAD>", 1: "<UNK>"}
self.n_words = 2
self.sentence_len = sentence_len
# Data load
with open(data_path, encoding="utf-8") as f:
data = [line.split() for line in f]
if tokenized == "mecab":
# replace low frequency word to UNK token
word_bucket = []
for sentence in data:
word_bucket.extend(sentence)
cnt = Counter(word_bucket)
rare_word = []
for common in cnt.most_common():
if common[1] <= 2:
rare_word.append(common[0])
print("Rare word")
rare_word = set(rare_word)
print(len(rare_word))
for sentence in data:
for word in sentence:
if word in rare_word:
continue
elif word not in self.word2index:
self.word2index[word] = self.n_words
self.index2word[self.n_words] = word
self.n_words += 1
# Transform to idx
self.data = np.array([[self.word2index[word]
if word not in rare_word
else self.word2index["<UNK>"] for word in sentence]
for sentence in tqdm(data)])
elif tokenized == "sentencepiece":
for sentence in data:
# remove meta symbol
# TODO:this process remove blank which in sentene. Are there other method?
for word in map(lambda word: word.replace("▁", ""), sentence):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.index2word[self.n_words] = word
self.n_words += 1
self.data = np.array([[self.word2index[word] for word in map(lambda word: word.replace("▁", ""), sentence)]
for sentence in tqdm(data)])
temp_list = []
for sentence in self.data:
if len(sentence) > self.sentence_len:
# truncate sentence if sentence length is longer than `sentence_len`
temp_list.append(np.array(sentence[:self.sentence_len]))
else:
# pad sentence with '<PAD>' token if sentence length is shorter than `sentence_len`
sent_array = np.lib.pad(np.array(sentence),
(0, self.sentence_len - len(sentence)),
"constant",
constant_values=(0, 0))
temp_list.append(sent_array)
self.data = np.array(temp_list, dtype=np.int32)
with open(label_path, encoding="utf-8") as f:
self.labels = np.array([np.array([int(label)]) for label in f], dtype=np.int32)
self.transform = transoform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sentence = self.data[idx]
label = self.labels[idx]
sample = {"sentence": sentence, "label": label}
if self.transform:
sample = {"sentence": self.transform(sample["sentence"]),
"label": self.transform(sample["label"])}
return sample
def vocab_length(self):
return self.n_words
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, data):
return torch.from_numpy(data).type(torch.LongTensor)
| 39.757962
| 133
| 0.559116
|
794f23df763e33e7ae39cadd8c8878b31853fa0b
| 3,854
|
py
|
Python
|
Study_5/Main.py
|
LeeDaeil/PyQt5_study
|
ecdd22ce2809ce6f01c8691a7ca75ef1771b7202
|
[
"MIT"
] | 1
|
2020-03-22T14:35:11.000Z
|
2020-03-22T14:35:11.000Z
|
Study_5/Main.py
|
LeeDaeil/PyQt5_study
|
ecdd22ce2809ce6f01c8691a7ca75ef1771b7202
|
[
"MIT"
] | null | null | null |
Study_5/Main.py
|
LeeDaeil/PyQt5_study
|
ecdd22ce2809ce6f01c8691a7ca75ef1771b7202
|
[
"MIT"
] | null | null | null |
from collections import deque
from multiprocessing import Manager
from Study_5.CNS_UDP import *
from Study_5.CNS_Fun import *
# from .CNS_GFun import * # 인터페이스 설계로 인한 제외
from Study_5.CNS_CFun import *
class body:
def __init__(self):
#==== Initial part for testing===========================================================#
self.a3c_test_mode = True
self.shut_up = True
#========================================================================================#
self.shared_mem = generate_mem().make_mem_structure()
self.UDP_net = [UDPSocket(self.shared_mem, IP='', Port=7001, shut_up=self.shut_up)]
if self.a3c_test_mode:
self.process_list = [
clean_mem(self.shared_mem, shut_up=self.shut_up),
interface_function(self.shared_mem),
# function1(self.shared_mem),
# function2(self.shared_mem),
# function3(self.shared_mem),
# gfunction(self.shared_mem), # 인터페이스 설계로 인한 제외
# gfunction2(self.shared_mem), # 인터페이스 설계로 인한 제외
]
else:
self.process_list = [
clean_mem(self.shared_mem, shut_up=self.shut_up),
]
def start(self):
print('A3C test mode : {}'.format(self.a3c_test_mode))
job_list = []
for __ in self.UDP_net:
__.start()
job_list.append(__)
time.sleep(1)
for __ in self.process_list:
__.start()
job_list.append(__)
for job in job_list:
job.join()
class generate_mem:
def make_test_mem(self):
memory_dict = {'Test': 0, 'List_Test': []}
return memory_dict
def make_test_list_mem(self):
memory_list = []
return memory_list
def make_CNS_time_mem(self):
memory_list = []
return memory_list
def make_clean_mem(self):
memory_dict = {'Clean': True}
return memory_dict
def make_main_mem_structure(self, max_len_deque=10, show_main_mem=False):
memory_dict = {}
with open('./db.txt', 'r') as f:
while True:
temp_ = f.readline().split('\t')
if temp_[0] == '': # if empty space -> break
break
sig = 0 if temp_[1] == 'INTEGER' else 1
memory_dict[temp_[0]] = {'V': 0, 'L': [], 'D': deque(maxlen=max_len_deque), "type": sig}
# memory_dict[temp_[0]] = {'V': 0, 'L': [], 'D': deque(maxlen=max_len_deque), "type": sig,
# 'N_V': 0, 'N_L': [], 'N_D': deque(maxlen=max_len_deque)} # Noise parameter
if show_main_mem:
print(memory_dict)
return memory_dict
def make_mem_structure(self, show_mem_list=False):
memory_list = [Manager().dict(self.make_main_mem_structure(max_len_deque=10)), # [0]
Manager().dict(self.make_test_mem()),
Manager().list(self.make_test_list_mem()),
Manager().list(self.make_CNS_time_mem()), # [-2]
Manager().dict(self.make_clean_mem()), # [-1]
]
'''
개인이 설계한 메모리를 추가로 집어 넣을 것.
ex)
memory_list = [Manager().dict(self.make_main_mem_structure(max_len_deque=10)),
Manager().dict(자신이 설계한 메모리 구조)),
...
Manager().dict(self.make_clean_mem()),]
'''
if show_mem_list:
i = 0
for __ in memory_list:
print('{}번째 리스트|{}'.format(i, __))
i += 1
return memory_list
if __name__ == '__main__':
main_process = body()
main_process.start()
| 36.704762
| 118
| 0.504152
|
794f2413f52881a6abf70b233ba26dd2f9341036
| 280
|
py
|
Python
|
Mundo 1/Tratando Dados e fazendo contas/Desafio#11.py
|
kaio358/Python
|
16f55e6ff056d97b2f28e68f95eafc9ab7d4e2b1
|
[
"MIT"
] | null | null | null |
Mundo 1/Tratando Dados e fazendo contas/Desafio#11.py
|
kaio358/Python
|
16f55e6ff056d97b2f28e68f95eafc9ab7d4e2b1
|
[
"MIT"
] | null | null | null |
Mundo 1/Tratando Dados e fazendo contas/Desafio#11.py
|
kaio358/Python
|
16f55e6ff056d97b2f28e68f95eafc9ab7d4e2b1
|
[
"MIT"
] | null | null | null |
largura = float(input("Insira a largura da parede : "))
altura = float(input("Insira a altura da parede : "))
print("O tamanho da parede e de {} X {} e sua area é de {} m²".format(largura,
altura,altura*largura))
print ('A tinta gasta será de {} l'.format((altura*largura)/2))
| 56
| 78
| 0.675
|
794f25c0806df93c1da8fcda5cadc68c588aa667
| 6,777
|
py
|
Python
|
metadata-ingestion/src/datahub/emitter/mce_builder.py
|
shakti-garg-saxo/datahub
|
ed38168f54dd96b2b237f5df30a43769df4ec400
|
[
"Apache-2.0"
] | 1
|
2021-04-29T17:40:02.000Z
|
2021-04-29T17:40:02.000Z
|
metadata-ingestion/src/datahub/emitter/mce_builder.py
|
shakti-garg-saxo/datahub
|
ed38168f54dd96b2b237f5df30a43769df4ec400
|
[
"Apache-2.0"
] | 4
|
2022-03-02T03:01:24.000Z
|
2022-03-23T00:57:33.000Z
|
metadata-ingestion/src/datahub/emitter/mce_builder.py
|
Affirm/datahub
|
bd5e3b174a82b6d2b0d3fc9c036570dfd5bbadd0
|
[
"Apache-2.0"
] | null | null | null |
"""Convenience functions for creating MCEs"""
import logging
import re
import time
from typing import List, Optional, Type, TypeVar, cast, get_type_hints
import typing_inspect
from avrogen.dict_wrapper import DictWrapper
from datahub.metadata.com.linkedin.pegasus2avro.metadata.key import DatasetKey
from datahub.metadata.schema_classes import (
DatasetLineageTypeClass,
DatasetSnapshotClass,
MetadataChangeEventClass,
OwnershipTypeClass,
UpstreamClass,
UpstreamLineageClass,
)
DEFAULT_ENV = "PROD"
DEFAULT_FLOW_CLUSTER = "prod"
UNKNOWN_USER = "urn:li:corpuser:unknown"
logger = logging.getLogger(__name__)
def get_sys_time() -> int:
# TODO deprecate this
return int(time.time() * 1000)
def _check_data_platform_name(platform_name: str) -> None:
if not platform_name.isalpha():
logger.warning(f"improperly formatted data platform: {platform_name}")
def make_data_platform_urn(platform: str) -> str:
if platform.startswith("urn:li:dataPlatform:"):
return platform
_check_data_platform_name(platform)
return f"urn:li:dataPlatform:{platform}"
def make_dataset_urn(platform: str, name: str, env: str = DEFAULT_ENV) -> str:
return f"urn:li:dataset:({make_data_platform_urn(platform)},{name},{env})"
def dataset_urn_to_key(dataset_urn: str) -> Optional[DatasetKey]:
pattern = r"urn:li:dataset:\(urn:li:dataPlatform:(.*),(.*),(.*)\)"
results = re.search(pattern, dataset_urn)
if results is not None:
return DatasetKey(
platform=results.group(1), name=results.group(2), origin=results.group(3)
)
return None
def make_user_urn(username: str) -> str:
return f"urn:li:corpuser:{username}"
def make_group_urn(groupname: str) -> str:
return f"urn:li:corpGroup:{groupname}"
def make_tag_urn(tag: str) -> str:
return f"urn:li:tag:{tag}"
def make_data_flow_urn(
orchestrator: str, flow_id: str, cluster: str = DEFAULT_FLOW_CLUSTER
) -> str:
return f"urn:li:dataFlow:({orchestrator},{flow_id},{cluster})"
def make_data_job_urn_with_flow(flow_urn: str, job_id: str) -> str:
return f"urn:li:dataJob:({flow_urn},{job_id})"
def make_data_job_urn(
orchestrator: str, flow_id: str, job_id: str, cluster: str = DEFAULT_FLOW_CLUSTER
) -> str:
return make_data_job_urn_with_flow(
make_data_flow_urn(orchestrator, flow_id, cluster), job_id
)
def make_dashboard_urn(platform: str, name: str) -> str:
# FIXME: dashboards don't currently include data platform urn prefixes.
_check_data_platform_name(platform)
return f"urn:li:dashboard:({platform},{name})"
def make_chart_urn(platform: str, name: str) -> str:
# FIXME: charts don't currently include data platform urn prefixes.
_check_data_platform_name(platform)
return f"urn:li:chart:({platform},{name})"
def make_ml_primary_key_urn(feature_table_name: str, primary_key_name: str) -> str:
return f"urn:li:mlPrimaryKey:({feature_table_name},{primary_key_name})"
def make_ml_feature_urn(
feature_table_name: str,
feature_name: str,
) -> str:
return f"urn:li:mlFeature:({feature_table_name},{feature_name})"
def make_ml_feature_table_urn(platform: str, feature_table_name: str) -> str:
return f"urn:li:mlFeatureTable:({make_data_platform_urn(platform)},{feature_table_name})"
def make_ml_model_urn(platform: str, model_name: str, env: str) -> str:
return f"urn:li:mlModel:({make_data_platform_urn(platform)},{model_name},{env})"
def make_ml_model_deployment_urn(platform: str, deployment_name: str, env: str) -> str:
return f"urn:li:mlModelDeployment:({make_data_platform_urn(platform)},{deployment_name},{env})"
def make_ml_model_group_urn(platform: str, group_name: str, env: str) -> str:
return (
f"urn:li:mlModelGroup:({make_data_platform_urn(platform)},{group_name},{env})"
)
def is_valid_ownership_type(ownership_type: Optional[str]) -> bool:
return ownership_type is not None and ownership_type in [
OwnershipTypeClass.DEVELOPER,
OwnershipTypeClass.DATAOWNER,
OwnershipTypeClass.DELEGATE,
OwnershipTypeClass.PRODUCER,
OwnershipTypeClass.CONSUMER,
OwnershipTypeClass.STAKEHOLDER,
]
def validate_ownership_type(ownership_type: Optional[str]) -> str:
if is_valid_ownership_type(ownership_type):
return cast(str, ownership_type)
else:
raise ValueError(f"Unexpected ownership type: {ownership_type}")
def make_lineage_mce(
upstream_urns: List[str],
downstream_urn: str,
lineage_type: str = DatasetLineageTypeClass.TRANSFORMED,
) -> MetadataChangeEventClass:
mce = MetadataChangeEventClass(
proposedSnapshot=DatasetSnapshotClass(
urn=downstream_urn,
aspects=[
UpstreamLineageClass(
upstreams=[
UpstreamClass(
dataset=upstream_urn,
type=lineage_type,
)
for upstream_urn in upstream_urns
]
)
],
)
)
return mce
# This bound isn't tight, but it's better than nothing.
Aspect = TypeVar("Aspect", bound=DictWrapper)
def can_add_aspect(mce: MetadataChangeEventClass, AspectType: Type[Aspect]) -> bool:
SnapshotType = type(mce.proposedSnapshot)
constructor_annotations = get_type_hints(SnapshotType.__init__)
aspect_list_union = typing_inspect.get_args(constructor_annotations["aspects"])[0]
if not isinstance(aspect_list_union, tuple):
supported_aspect_types = typing_inspect.get_args(aspect_list_union)
else:
# On Python 3.6, the union type is represented as a tuple, where
# the first item is typing.Union and the subsequent elements are
# the types within the union.
supported_aspect_types = aspect_list_union[1:]
return issubclass(AspectType, supported_aspect_types)
def get_aspect_if_available(
mce: MetadataChangeEventClass, AspectType: Type[Aspect]
) -> Optional[Aspect]:
assert can_add_aspect(mce, AspectType)
all_aspects = mce.proposedSnapshot.aspects
aspects: List[Aspect] = [
aspect for aspect in all_aspects if isinstance(aspect, AspectType)
]
if len(aspects) > 1:
raise ValueError(
f"MCE contains multiple aspects of type {AspectType}: {aspects}"
)
if aspects:
return aspects[0]
return None
def get_or_add_aspect(mce: MetadataChangeEventClass, default: Aspect) -> Aspect:
existing = get_aspect_if_available(mce, type(default))
if existing is not None:
return existing
mce.proposedSnapshot.aspects.append(default) # type: ignore
return default
| 30.945205
| 99
| 0.701343
|
794f26698d1104ff54a38dfa17617709ef4f72fe
| 218
|
py
|
Python
|
rtcclient/exception.py
|
sadikkuzu-mba/rtcclient
|
831d9eef57f0daca86728ea38743925f6db017fc
|
[
"Apache-2.0"
] | 37
|
2015-07-31T03:21:55.000Z
|
2021-08-04T10:30:13.000Z
|
rtcclient/exception.py
|
sadikkuzu-mba/rtcclient
|
831d9eef57f0daca86728ea38743925f6db017fc
|
[
"Apache-2.0"
] | 94
|
2015-07-29T14:27:13.000Z
|
2022-03-10T16:53:30.000Z
|
rtcclient/exception.py
|
sadikkuzu-mba/rtcclient
|
831d9eef57f0daca86728ea38743925f6db017fc
|
[
"Apache-2.0"
] | 37
|
2015-11-11T15:06:39.000Z
|
2022-03-01T12:21:48.000Z
|
class RTCException(Exception):
"""Base exception class for all errors
"""
pass
class BadValue(RTCException):
pass
class NotFound(RTCException):
pass
class EmptyAttrib(RTCException):
pass
| 12.111111
| 42
| 0.688073
|
794f272b6059f4e70da3e79674aa72c6390f024d
| 8,856
|
py
|
Python
|
qa/rpc-tests/rawtransactions.py
|
ericramos1980/energi
|
aadc44f714f9d52433ab3595a9f33a61433c60c9
|
[
"MIT"
] | 2
|
2021-12-28T21:47:07.000Z
|
2022-02-09T21:04:29.000Z
|
qa/rpc-tests/rawtransactions.py
|
reddragon34/energi
|
4cc6c426d9d4b6b9053912de9b2197eba071201e
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/rawtransactions.py
|
reddragon34/energi
|
4cc6c426d9d4b6b9053912de9b2197eba071201e
|
[
"MIT"
] | 1
|
2019-10-07T19:17:55.000Z
|
2019-10-07T19:17:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""rawtranscation RPCs QA test.
# Tests the following RPCs:
# - createrawtransaction
# - signrawtransaction
# - sendrawtransaction
# - decoderawtransaction
# - getrawtransaction
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
#connect to a local machine for debugging
#url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
#proxy = AuthServiceProxy(url)
#proxy.url = url # store URL on proxy for info
#self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransaction(rawtx)
try:
rawtx = self.nodes[2].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("Missing inputs" in e.error['message'])
else:
assert(False)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
addr3Obj = self.nodes[2].validateaddress(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])
mSigObjValid = self.nodes[2].validateaddress(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
sPK = rawTx['vout'][0]['scriptPubKey']['hex']
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS A INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransaction(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('500.00000000')+Decimal('2.19000000')) #block reward + tx
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["txid"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises(JSONRPCException, self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises(JSONRPCException, self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.954315
| 147
| 0.647245
|
794f2833ee6d6df6702c546a201816fb89d00b11
| 1,159
|
py
|
Python
|
nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | 2
|
2017-10-05T21:08:38.000Z
|
2018-10-09T23:01:23.000Z
|
nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | 1
|
2016-10-11T19:18:53.000Z
|
2016-10-11T19:18:53.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..morphology import GrayscaleFillHoleImageFilter
def test_GrayscaleFillHoleImageFilter_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='%s',
position=-2,
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
terminal_output=dict(nohash=True,
),
)
inputs = GrayscaleFillHoleImageFilter.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_GrayscaleFillHoleImageFilter_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = GrayscaleFillHoleImageFilter.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 28.268293
| 78
| 0.679896
|
794f285d9ce646d199aab7912f7713f032f5b0ce
| 4,230
|
py
|
Python
|
temboo/core/Library/RunKeeper/Nutrition/RetrieveLatestEntry.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/RunKeeper/Nutrition/RetrieveLatestEntry.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/RunKeeper/Nutrition/RetrieveLatestEntry.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# RetrieveLatestEntry
# Returns the lastest entry from a user's nutrition history.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RetrieveLatestEntry(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RetrieveLatestEntry Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RetrieveLatestEntry, self).__init__(temboo_session, '/Library/RunKeeper/Nutrition/RetrieveLatestEntry')
def new_input_set(self):
return RetrieveLatestEntryInputSet()
def _make_result_set(self, result, path):
return RetrieveLatestEntryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RetrieveLatestEntryChoreographyExecution(session, exec_id, path)
class RetrieveLatestEntryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RetrieveLatestEntry
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved after the final step in the OAuth process.)
"""
super(RetrieveLatestEntryInputSet, self)._set_input('AccessToken', value)
class RetrieveLatestEntryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RetrieveLatestEntry Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from RunKeeper.)
"""
return self._output.get('Response', None)
def get_Calories(self):
"""
Retrieve the value for the "Calories" output from this Choreo execution. ((integer) The number of calories associated with the latest nutrition entry.)
"""
return self._output.get('Calories', None)
def get_Meal(self):
"""
Retrieve the value for the "Meal" output from this Choreo execution. ((string) The meal name of the latest nutrition entry.)
"""
return self._output.get('Meal', None)
def get_Timestamp(self):
"""
Retrieve the value for the "Timestamp" output from this Choreo execution. ((date) The timestamp of the entry.)
"""
return self._output.get('Timestamp', None)
def get_URI(self):
"""
Retrieve the value for the "URI" output from this Choreo execution. ((string) TThe URI of the nutrition entry.)
"""
return self._output.get('URI', None)
def get_Water(self):
"""
Retrieve the value for the "Water" output from this Choreo execution. ((decimal) The water measurement from the latest nutrition entry.)
"""
return self._output.get('Water', None)
class RetrieveLatestEntryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RetrieveLatestEntryResultSet(response, path)
| 39.166667
| 159
| 0.680851
|
794f28d29c760733ddb2d94c9c5345e470ce41a6
| 22,515
|
py
|
Python
|
pythran/tests/test_optimizations.py
|
sthagen/serge-sans-paille-pythran
|
fe41dcd069404a75ff56b537b7ac87a0e3c04dc2
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/test_optimizations.py
|
sthagen/serge-sans-paille-pythran
|
fe41dcd069404a75ff56b537b7ac87a0e3c04dc2
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/test_optimizations.py
|
sthagen/serge-sans-paille-pythran
|
fe41dcd069404a75ff56b537b7ac87a0e3c04dc2
|
[
"BSD-3-Clause"
] | null | null | null |
from pythran.tests import TestEnv
from pythran.typing import List
import unittest
import pythran
class TestOptimization(TestEnv):
def test_constant_fold_nan(self):
code = "def constant_fold_nan(a): from numpy import nan; a[0] = nan; return a"
self.run_test(code, [1., 2.], constant_fold_nan=[List[float]])
def test_constant_fold_empty_array(self):
code = "def constant_fold_empty_array(): from numpy import ones; return ones((0,0,0)).shape"
self.run_test(code, constant_fold_empty_array=[])
def test_constant_fold_divide_by_zero(self):
code = "def constant_fold_divide_by_zero(): return 1/0"
with self.assertRaises(pythran.syntax.PythranSyntaxError):
self.check_ast(code, "syntax error anyway", ["pythran.optimizations.ConstantFolding"])
def test_genexp(self):
self.run_test("def test_genexp(n): return sum((x*x for x in range(n)))", 5, test_genexp=[int])
def test_genexp_2d(self):
self.run_test("def test_genexp_2d(n1, n2): return sum((x*y for x in range(n1) for y in range(n2)))", 2, 3, test_genexp_2d=[int, int])
def test_genexp_if(self):
self.run_test("def test_genexp_if(n): return sum((x*x for x in range(n) if x < 4))", 5, test_genexp_if=[int])
def test_genexp_mixedif(self):
self.run_test("def test_genexp_mixedif(m, n): return sum((x*y for x in range(m) for y in range(n) if x < 4))", 2, 3, test_genexp_mixedif=[int, int])
def test_genexp_triangular(self):
self.run_test("def test_genexp_triangular(n): return sum((x*y for x in range(n) for y in range(x)))", 2, test_genexp_triangular=[int])
def test_aliased_readonce(self):
self.run_test("""
def foo(f,l):
return map(f,l[1:])
def alias_readonce(n):
map = foo
return list(map(lambda t: (t[0]*t[1] < 50), list(zip(range(n), range(n)))))
""", 10, alias_readonce=[int])
def test_replace_aliased_map(self):
self.run_test("""
def alias_replaced(n):
map = filter
return list(map(lambda x : x < 5, range(n)))
""", 10, alias_replaced=[int])
def test_listcomptomap_alias(self):
self.run_test("""
def foo(f,l):
return map(f,l[3:])
def listcomptomap_alias(n):
map = foo
return list([x for x in range(n)])
""", 10, listcomptomap_alias=[int])
def test_readonce_nested_calls(self):
self.run_test("""
def readonce_nested_calls(Lq):
import numpy as np
return np.prod(np.sign(Lq))
""", [-5.], readonce_nested_calls=[List[float]])
def test_readonce_return(self):
self.run_test("""
def foo(l):
return l
def readonce_return(n):
l = list(foo(range(n)))
return l[:]
""", 5, readonce_return=[int])
def test_readonce_assign(self):
self.run_test("""
def foo(l):
l[2] = 5
return list(range(10))
def readonce_assign(n):
return foo(list(range(n)))
""", 5, readonce_assign=[int])
def test_readonce_assignaug(self):
self.run_test("""
def foo(l):
l += [2,3]
return range(10)
def readonce_assignaug(n):
return list(foo(list(range(n))))
""", 5, readonce_assignaug=[int])
def test_readonce_for(self):
self.run_test("""
def foo(l):
s = []
for x in range(10):
s.extend(list(l))
return s
def readonce_for(n):
return foo(range(n))
""", 5, readonce_for=[int])
def test_readonce_2for(self):
self.run_test("""
def foo(l):
s = 0
for x in l:
s += x
for x in l:
s += x
return list(range(s))
def readonce_2for(n):
return foo(range(n))
""", 5, readonce_2for=[int])
def test_readonce_while(self):
self.run_test("""
def foo(l):
r = []
while (len(r) < 50):
r.extend(list(l))
return r
def readonce_while(n):
return foo(range(n))
""", 5, readonce_while=[int])
def test_readonce_if(self):
self.run_test("""
def h(l):
return sum(l)
def g(l):
return sum(l)
def foo(l):
if True:
return g(l)
else:
return h(l)
def readonce_if(n):
return foo(range(n))
""", 5, readonce_if=[int])
def test_readonce_if2(self):
self.run_test("""
def h(l):
return sum(l)
def g(l):
return max(l[1:])
def foo(l):
if True:
return g(l)
else:
return h(l)
def readonce_if2(n):
return foo(list(range(n)))
""", 5, readonce_if2=[int])
def test_readonce_slice(self):
self.run_test("""
def foo(l):
return list(l[:])
def readonce_slice(n):
return foo(list(range(n)))
""", 5, readonce_slice=[int])
def test_readonce_listcomp(self):
self.run_test("""
def foo(l):
return [z for x in l for y in l for z in range(x+y)]
def readonce_listcomp(n):
return foo(range(n))
""", 5, readonce_listcomp=[int])
def test_readonce_genexp(self):
self.run_test("""
def foo(l):
return (z for x in l for y in l for z in range(x+y))
def readonce_genexp(n):
return list(foo(range(n)))
""", 5, readonce_genexp=[int])
def test_readonce_recursive(self):
self.run_test("""
def foo(l,n):
if n < 5:
return foo(l,n+1)
else:
return sum(l)
def readonce_recursive(n):
return foo(range(n),0)
""", 5, readonce_recursive=[int])
def test_readonce_recursive2(self):
self.run_test("""
def foo(l,n):
if n < 5:
return foo(l,n+1)
else:
return sum(l[1:])
def readonce_recursive2(n):
return foo(list(range(n)),0)
""", 5, readonce_recursive2=[int])
def test_readonce_cycle(self):
self.run_test("""
def foo(l,n):
if n < 5:
return bar(l,n)
else:
return sum(l)
def bar(l,n):
return foo(l, n+1)
def readonce_cycle(n):
return foo(range(n),0)
""", 5, readonce_cycle=[int])
def test_readonce_cycle2(self):
self.run_test("""
def foo(l,n):
if n < 5:
return bar(l,n)
else:
return sum(l)
def bar(l,n):
return foo(l, n+1)
def readonce_cycle2(n):
return foo(range(n),0)
""", 5, readonce_cycle2=[int])
def test_readonce_list(self):
init = "def foo(l): return sum(list(l))"
ref = """def foo(l):
return builtins.sum(l)"""
self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"])
def test_readonce_tuple(self):
init = "def foo(l): return sum(tuple(l))"
ref = """def foo(l):
return builtins.sum(l)"""
self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"])
def test_readonce_array(self):
init = "def foo(l): import numpy as np; return sum(np.array(l))"
ref = """import numpy as __pythran_import_numpy
def foo(l):
return builtins.sum(l)"""
self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"])
def test_readonce_np_sum_copy(self):
init = "def foo(l): import numpy as np; return np.sum(np.copy(l))"
ref = """import numpy as __pythran_import_numpy
def foo(l):
return __pythran_import_numpy.sum(l)"""
self.check_ast(init, ref, ["pythran.optimizations.IterTransformation"])
def test_omp_forwarding(self):
init = """
def foo():
a = 2
#omp parallel
if 1:
builtins.print(a)
"""
ref = """\
def foo():
a = 2
'omp parallel'
if 1:
builtins.print(a)
return builtins.None"""
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"])
def test_omp_forwarding2(self):
init = """
def foo():
#omp parallel
if 1:
a = 2
builtins.print(a)
"""
ref = """\
def foo():
'omp parallel'
if 1:
pass
builtins.print(2)
return builtins.None"""
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"])
def test_omp_forwarding3(self):
init = """
def foo():
#omp parallel
if 1:
a = 2
builtins.print(a)
"""
ref = """\
def foo():
'omp parallel'
if 1:
a = 2
builtins.print(a)
return builtins.None"""
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"])
def test_forwarding0(self):
init = '''
def foo(x):
for i in x:
if i:
j = i
return j'''
ref = init
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"])
def test_forwarding1(self):
init = 'def f(i):\n while i:\n if i > 3: x=1; continue\n x=2\n return x'
ref = 'def f(i):\n while i:\n if (i > 3):\n x = 1\n continue\n x = 2\n return x'
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution"])
def test_full_unroll0(self):
init = """
def full_unroll0():
k = []
for i,j in zip([1,2,3],[4,5,6]): k.append((i,j))
return k"""
ref = '''def full_unroll0():
k = []
__tuple0 = (1, 4)
j = __tuple0[1]
i = __tuple0[0]
builtins.list.append(k, (i, j))
__tuple0 = (2, 5)
j = __tuple0[1]
i = __tuple0[0]
builtins.list.append(k, (i, j))
__tuple0 = (3, 6)
j = __tuple0[1]
i = __tuple0[0]
builtins.list.append(k, (i, j))
return k'''
self.check_ast(init, ref, ["pythran.optimizations.ConstantFolding", "pythran.optimizations.LoopFullUnrolling"])
def test_full_unroll1(self):
self.run_test("""
def full_unroll1():
c = 0
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
c += 1
return c""", full_unroll1=[])
def test_deadcodeelimination(self):
init = """
def bar(a):
builtins.print(a)
return 10
def foo(a):
if 1 < bar(a):
b = 2
return b"""
ref = """\
def bar(a):
builtins.print(a)
return 10
def foo(a):
(1 < bar(a))
return 2"""
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution", "pythran.optimizations.DeadCodeElimination"])
def test_deadcodeelimination2(self):
init = """
def foo(a):
if 1 < max(a, 2):
b = 2
return b"""
ref = """def foo(a):
return 2"""
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution", "pythran.optimizations.DeadCodeElimination"])
def test_deadcodeelimination3(self):
init = """
def bar(a):
return a
def foo(a):
"omp flush"
bar(a)
return 2"""
ref = """def bar(a):
return a
def foo(a):
'omp flush'
pass
return 2"""
self.check_ast(init, ref, ["pythran.optimizations.DeadCodeElimination"])
def test_deadcodeelimination4(self):
init = 'def noeffect(i): a=[];b=[a]; builtins.list.append(b[0],i); return 1'
ref = 'def noeffect(i):\n return 1'
self.check_ast(init, ref, ["pythran.optimizations.ForwardSubstitution",
"pythran.optimizations.ConstantFolding",
"pythran.optimizations.DeadCodeElimination"])
def test_patternmatching(self):
init = """
def foo(a):
return len(set(range(len(set(a)))))"""
ref = """def foo(a):
return builtins.pythran.len_set(builtins.range(builtins.pythran.len_set(a)))"""
self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"])
def test_patternmatching2(self):
init = """
def foo(a):
return reversed(range(len(set(a))))"""
ref = """def foo(a):
return builtins.range((builtins.pythran.len_set(a) - 1), (-1), (-1))"""
self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"])
def test_patternmatching3(self):
init = """
def foo(a):
return a * a"""
ref = """def foo(a):
return (a ** 2)"""
self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"])
def test_patternmatching4(self):
init = """
def foo(a):
return a ** .5"""
ref = """import numpy as __pythran_import_numpy
def foo(a):
return __pythran_import_numpy.sqrt(a)"""
self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"])
def test_patternmatching5(self):
init = """
def foo(a):
return a ** (1./3.)"""
ref = """import numpy as __pythran_import_numpy
def foo(a):
return __pythran_import_numpy.cbrt(a)"""
self.check_ast(init, ref, ["pythran.optimizations.ConstantFolding",
"pythran.optimizations.PatternTransform"])
def test_lambda_patterns0(self):
init = """
def foo(a):
return lambda x, y: x + y"""
ref = """import operator as __pythran_import_operator
def foo(a):
return __pythran_import_operator.add"""
self.check_ast(init, ref, ["pythran.transformations.RemoveLambdas"])
def test_lambda_patterns1(self):
init = """
def foo(a):
return (lambda x, y: x + 1), (lambda z, w: z + 1)"""
ref = """def foo(a):
return (foo_lambda0, foo_lambda0)
def foo_lambda0(x, y):
return (x + 1)"""
self.check_ast(init, ref, ["pythran.transformations.RemoveLambdas"])
def test_inline_builtins_broadcasting0(self):
init = """
import numpy as np
def foo(a):
return np.array([a, 1]) == 1"""
ref = """import numpy as __pythran_import_numpy
def foo(a):
return __pythran_import_numpy.array(((a == 1), (1 == 1)))"""
self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"])
def test_inline_builtins_broadcasting1(self):
init = """
import numpy as np
def foo(a):
return np.asarray([a, 1]) + 1"""
ref = """import numpy as __pythran_import_numpy
def foo(a):
return __pythran_import_numpy.array(((a + 1), (1 + 1)))"""
self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"])
def test_inline_builtins_broadcasting2(self):
init = """
import numpy as np
def foo(a):
return - np.asarray([a, 1])"""
ref = """import numpy as __pythran_import_numpy
def foo(a):
return __pythran_import_numpy.array(((- a), (- (1))))"""
self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"])
def test_inline_builtins_broadcasting3(self):
init = """
import numpy as np
def foo(a):
return np.asarray([a, 1]) + (3, 3)"""
ref = """import numpy as __pythran_import_numpy
def foo(a):
return __pythran_import_numpy.array(((a + 3), (1 + 3)))"""
self.check_ast(init, ref, ["pythran.optimizations.InlineBuiltins"])
def test_patternmatching3(self):
init = """
def foo(a):
return a * a"""
ref = """def foo(a):
return (a ** 2)"""
self.check_ast(init, ref, ["pythran.optimizations.PatternTransform"])
class TestConstantUnfolding(TestEnv):
def test_constant_folding_int_literals(self):
self.run_test("def constant_folding_int_literals(): return 1+2*3.5", constant_folding_int_literals=[])
def test_constant_folding_str_literals(self):
self.run_test("def constant_folding_str_literals(): return \"1\"+'2'*3", constant_folding_str_literals=[])
def test_constant_folding_list_literals(self):
self.run_test("def constant_folding_list_literals(): return [1]+[2]*3", constant_folding_list_literals=[])
def test_constant_folding_set_literals(self):
self.run_test("def constant_folding_set_literals(): return {1,2,3,3}", constant_folding_set_literals=[])
def test_constant_folding_builtins(self):
self.run_test("def constant_folding_builtins(): return list(map(len,zip(range(2), range(2))))", constant_folding_builtins=[])
def test_constant_folding_imported_functions(self):
self.run_test("def constant_folding_imported_functions(): from math import cos ; return float(int(10*cos(1)))", constant_folding_imported_functions=[])
def test_constant_folding_list_method_calls(self):
self.run_test("def foo(n): l=[] ; l.append(n) ; return l\ndef constant_folding_list_method_calls(n): return foo(n)", 1, constant_folding_list_method_calls=[int])
def test_constant_folding_complex_calls(self):
self.run_test("def constant_folding_complex_calls(): return complex(1,1)", constant_folding_complex_calls=[])
def test_constant_folding_expansive_calls(self):
self.run_test("def constant_folding_expansive_calls(): return list(range(2**6))", constant_folding_expansive_calls=[])
def test_constant_folding_too_expansive_calls(self):
self.run_test("def constant_folding_too_expansive_calls(): return list(range(2**16))", constant_folding_too_expansive_calls=[])
def test_constant_folding_bool_array(self):
self.run_test("def constant_folding_bool_array(): import numpy as np; return np.concatenate([np.array([True]),np.array([True])])", constant_folding_bool_array=[])
class TestAnalyses(TestEnv):
def test_imported_ids_shadow_intrinsic(self):
self.run_test("def imported_ids_shadow_intrinsic(range): return [ i*range for i in [1,2,3] ]", 2, imported_ids_shadow_intrinsic=[int])
def test_shadowed_variables(self):
self.run_test("def shadowed_variables(a): b=1 ; b+=a ; a= 2 ; b+=a ; return a,b", 18, shadowed_variables=[int])
def test_decl_shadow_intrinsic(self):
self.run_test("def decl_shadow_intrinsic(l): len=lambda l:1 ; return len(l)", [1,2,3], decl_shadow_intrinsic=[List[int]])
def test_used_def_chains(self):
self.run_test("def use_def_chain(a):\n i=a\n for i in range(4):\n print(i)\n i=5.4\n print(i)\n break\n i = 4\n return i", 3, use_def_chain=[int])
def test_used_def_chains2(self):
self.run_test("def use_def_chain2(a):\n i=a\n for i in range(4):\n print(i)\n i='lala'\n print(i)\n i = 4\n return i", 3, use_def_chain2=[int])
@unittest.skip("Variable defined in a branch in loops are not accepted.")
def test_importedids(self):
self.run_test("def importedids(a):\n i=a\n for i in range(4):\n if i==0:\n b = []\n else:\n b.append(i)\n return b", 3, importedids=[int])
def test_falsepoly(self):
self.run_test("def falsepoly():\n i = 2\n if i:\n i='ok'\n else:\n i='lolo'\n return i", falsepoly=[])
def test_global_effects_unknown(self):
code = '''
def bb(x):
return x[0]()
def ooo(a):
def aa():
return a
return aa,
def global_effects_unknown(a):
return bb(ooo(a))'''
self.run_test(code,
1,
global_effects_unknown=[int])
def test_argument_effects_unknown(self):
code = '''
def int_datatype(n):
return list, str, n
def list_datatype(parent):
def parser(value):
return parent[0](value)
def formatter(value):
return parent[1](value)
return parser, formatter
def argument_effects_unknown(n):
list_datatype(int_datatype(n))'''
self.run_test(code,
1,
argument_effects_unknown=[int])
def test_inlining_globals_side_effect(self):
code = '''
import random
r = random.random()
def inlining_globals_side_effect():
return r == r == r
'''
self.run_test(code,
inlining_globals_side_effect=[])
def test_subscript_function_aliasing(self):
code = '''
SP = 0x20
STX = 0x02
ETX = 0x03
def _div_tuple(base, div):
a = base // div
b = base % div
return a, b
def number_datatype(base, dc, fs=6):
def parser(value):
if not value.isdigit():
raise ValueError("Invalid number")
value = int(value)
ret = []
while value > 0:
a, b = _div_tuple(value, len(base))
ret.insert(0, ord(base[b]))
value = a
ret = [ord('0')] * (dc - len(ret)) + ret
ret = [SP] * (fs - len(ret)) + ret
return ret
def formatter(v):
ret = 0
for a in [chr(c) for c in v][-dc:]:
ret = ret * len(base) + base.index(a)
return str(int(ret))
return parser, formatter
def int_datatype(dc, fs=6):
return number_datatype(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], dc, fs)
def hex_datatype(dc, fs=6):
return number_datatype(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'], dc, fs)
simple_commands = [('aa', 107, int_datatype(4)),
('bb', 112, int_datatype(1)),
]
str_commands = {c: (c, v, f) for c, v, f in simple_commands}
def subscript_function_aliasing(id, ai, pfc, value):
data = [0x0] * 16
_, pfc, fcts = str_commands[pfc]
data[5:9] = int_datatype(4, 4)[0](str(pfc))
data[9:15] = fcts[0](value)
return data'''
self.run_test(code, 'aa', 2, 'bb', '3', subscript_function_aliasing=[str, int, str, str])
def test_range_simplify_jl(self):
code = '''
import numpy as np
silent = 0
def B(n):
TS = 10
outSig = []
while n:
outSamps = np.zeros((10, 2))
outSig.append(outSamps.copy())
outSamps = np.zeros((10, 2))
outSig.append(outSamps.copy())
return outSig, TS
def range_simplify_jl(n):
outSignal, TS = B(n)
return (outSignal)'''
self.run_test(code, 0, range_simplify_jl=[int])
def test_range_simplify_subscript(self):
code = '''
def LooperMaster___init__():
self_userUseTempo = 1
self = [self_userUseTempo]
return self
def range_simplify_subscript(n):
ML = LooperMaster___init__()
ML[0] = n
return ML'''
self.run_test(code, 1, range_simplify_subscript=[int])
def test_insert_none0(self):
code = '''
def insert_none0(x):
for ii in range(len(x)):
if x[ii]: return x[ii]
else:
return 0'''
self.run_test(code, [], insert_none0=[List[int]])
| 30.50813
| 170
| 0.587608
|
794f2b4d157d2b7e2e2778c0c492ba5af4fa657b
| 1,712
|
py
|
Python
|
configs/swinunetv2gtv8crossattentionupsample/swinunetv2_g10_gtv8_cross_attention_upsample_patch_expand_base_patch4_window7_512x512_160k_ade20k.py
|
Myyyr/transseg2d
|
7664653dec0bf63d96ad3c76fc225d2d7f607e41
|
[
"Apache-2.0"
] | null | null | null |
configs/swinunetv2gtv8crossattentionupsample/swinunetv2_g10_gtv8_cross_attention_upsample_patch_expand_base_patch4_window7_512x512_160k_ade20k.py
|
Myyyr/transseg2d
|
7664653dec0bf63d96ad3c76fc225d2d7f607e41
|
[
"Apache-2.0"
] | null | null | null |
configs/swinunetv2gtv8crossattentionupsample/swinunetv2_g10_gtv8_cross_attention_upsample_patch_expand_base_patch4_window7_512x512_160k_ade20k.py
|
Myyyr/transseg2d
|
7664653dec0bf63d96ad3c76fc225d2d7f607e41
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/swin_unet_v2_gtv8_cross_attention_upsample.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
model = dict(
backbone=dict(
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
num_classes=150,
use_cross_attention_by_layer=[True, True, True, True],
residual_patch_expand=True,
gt_num=10
),
decode_head=dict(
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
num_classes=150
)
#,
# auxiliary_head=dict(
# in_channels=512,
# num_classes=150
# )
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
| 31.127273
| 101
| 0.568925
|
794f2b5d089da1680cfa6520c43a30396b24a03a
| 15,879
|
py
|
Python
|
pybind/slxos/v16r_1_00b/interface/port_channel/spanning_tree/vlan/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/interface/port_channel/spanning_tree/vlan/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/interface/port_channel/spanning_tree/vlan/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import guard
class vlan(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/port-channel/spanning-tree/vlan. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__id','__cost','__priority','__guard',)
_yang_name = 'vlan'
_rest_name = 'vlan'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..240']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Port priority for a bridge', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
self.__guard = YANGDynClass(base=guard.guard, is_container='container', presence=False, yang_name="guard", rest_name="guard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Change an interface's spanning tree guard \nmode", u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
self.__cost = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..200000000']}), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Path cost for a port', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
self.__id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}), is_leaf=True, yang_name="id", rest_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-suppress-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'port-channel', u'spanning-tree', u'vlan']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Port-channel', u'spanning-tree', u'vlan']
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/id (uint32)
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}), is_leaf=True, yang_name="id", rest_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-suppress-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}), is_leaf=True, yang_name="id", rest_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-suppress-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)""",
})
self.__id = t
if hasattr(self, '_set'):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..4090']}), is_leaf=True, yang_name="id", rest_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-range': None, u'cli-suppress-no': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
def _get_cost(self):
"""
Getter method for cost, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/cost (uint32)
"""
return self.__cost
def _set_cost(self, v, load=False):
"""
Setter method for cost, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/cost (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_cost is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cost() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..200000000']}), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Path cost for a port', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cost must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..200000000']}), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Path cost for a port', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)""",
})
self.__cost = t
if hasattr(self, '_set'):
self._set()
def _unset_cost(self):
self.__cost = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..200000000']}), is_leaf=True, yang_name="cost", rest_name="cost", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Path cost for a port', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/priority (uint32)
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/priority (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..240']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Port priority for a bridge', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..240']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Port priority for a bridge', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)""",
})
self.__priority = t
if hasattr(self, '_set'):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..240']}), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Port priority for a bridge', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='uint32', is_config=True)
def _get_guard(self):
"""
Getter method for guard, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/guard (container)
"""
return self.__guard
def _set_guard(self, v, load=False):
"""
Setter method for guard, mapped from YANG variable /interface/port_channel/spanning_tree/vlan/guard (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_guard is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_guard() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=guard.guard, is_container='container', presence=False, yang_name="guard", rest_name="guard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Change an interface's spanning tree guard \nmode", u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """guard must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=guard.guard, is_container='container', presence=False, yang_name="guard", rest_name="guard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Change an interface's spanning tree guard \nmode", u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__guard = t
if hasattr(self, '_set'):
self._set()
def _unset_guard(self):
self.__guard = YANGDynClass(base=guard.guard, is_container='container', presence=False, yang_name="guard", rest_name="guard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Change an interface's spanning tree guard \nmode", u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
id = __builtin__.property(_get_id, _set_id)
cost = __builtin__.property(_get_cost, _set_cost)
priority = __builtin__.property(_get_priority, _set_priority)
guard = __builtin__.property(_get_guard, _set_guard)
_pyangbind_elements = {'id': id, 'cost': cost, 'priority': priority, 'guard': guard, }
| 67.858974
| 582
| 0.720071
|
794f2b621831af6296c973bddca62e641eaa072e
| 4,421
|
py
|
Python
|
xarray/core/dtypes.py
|
DocOtak/xarray
|
01a9baa01b1378cbf3f324ea3c27150a3860d3d1
|
[
"Apache-2.0"
] | 1
|
2020-09-14T02:32:54.000Z
|
2020-09-14T02:32:54.000Z
|
xarray/core/dtypes.py
|
DocOtak/xarray
|
01a9baa01b1378cbf3f324ea3c27150a3860d3d1
|
[
"Apache-2.0"
] | null | null | null |
xarray/core/dtypes.py
|
DocOtak/xarray
|
01a9baa01b1378cbf3f324ea3c27150a3860d3d1
|
[
"Apache-2.0"
] | 2
|
2019-08-22T21:07:03.000Z
|
2020-03-30T10:25:00.000Z
|
import functools
import numpy as np
from . import utils
# Use as a sentinel value to indicate a dtype appropriate NA value.
NA = utils.ReprObject('<NA>')
@functools.total_ordering
class AlwaysGreaterThan:
def __gt__(self, other):
return True
def __eq__(self, other):
return isinstance(other, type(self))
@functools.total_ordering
class AlwaysLessThan:
def __lt__(self, other):
return True
def __eq__(self, other):
return isinstance(other, type(self))
# Equivalence to np.inf (-np.inf) for object-type
INF = AlwaysGreaterThan()
NINF = AlwaysLessThan()
# Pairs of types that, if both found, should be promoted to object dtype
# instead of following NumPy's own type-promotion rules. These type promotion
# rules match pandas instead. For reference, see the NumPy type hierarchy:
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.scalars.html
PROMOTE_TO_OBJECT = [
{np.number, np.character}, # numpy promotes to character
{np.bool_, np.character}, # numpy promotes to character
{np.bytes_, np.unicode_}, # numpy promotes to unicode
]
def maybe_promote(dtype):
"""Simpler equivalent of pandas.core.common._maybe_promote
Parameters
----------
dtype : np.dtype
Returns
-------
dtype : Promoted dtype that can hold missing values.
fill_value : Valid missing value for the promoted dtype.
"""
# N.B. these casting rules should match pandas
if np.issubdtype(dtype, np.floating):
fill_value = np.nan
elif np.issubdtype(dtype, np.timedelta64):
# See https://github.com/numpy/numpy/issues/10685
# np.timedelta64 is a subclass of np.integer
# Check np.timedelta64 before np.integer
fill_value = np.timedelta64('NaT')
elif np.issubdtype(dtype, np.integer):
if dtype.itemsize <= 2:
dtype = np.float32
else:
dtype = np.float64
fill_value = np.nan
elif np.issubdtype(dtype, np.complexfloating):
fill_value = np.nan + np.nan * 1j
elif np.issubdtype(dtype, np.datetime64):
fill_value = np.datetime64('NaT')
else:
dtype = object
fill_value = np.nan
return np.dtype(dtype), fill_value
NAT_TYPES = (np.datetime64('NaT'), np.timedelta64('NaT'))
def get_fill_value(dtype):
"""Return an appropriate fill value for this dtype.
Parameters
----------
dtype : np.dtype
Returns
-------
fill_value : Missing value corresponding to this dtype.
"""
_, fill_value = maybe_promote(dtype)
return fill_value
def get_pos_infinity(dtype):
"""Return an appropriate positive infinity for this dtype.
Parameters
----------
dtype : np.dtype
Returns
-------
fill_value : positive infinity value corresponding to this dtype.
"""
if issubclass(dtype.type, (np.floating, np.integer)):
return np.inf
if issubclass(dtype.type, np.complexfloating):
return np.inf + 1j * np.inf
return INF
def get_neg_infinity(dtype):
"""Return an appropriate positive infinity for this dtype.
Parameters
----------
dtype : np.dtype
Returns
-------
fill_value : positive infinity value corresponding to this dtype.
"""
if issubclass(dtype.type, (np.floating, np.integer)):
return -np.inf
if issubclass(dtype.type, np.complexfloating):
return -np.inf - 1j * np.inf
return NINF
def is_datetime_like(dtype):
"""Check if a dtype is a subclass of the numpy datetime types
"""
return (np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64))
def result_type(*arrays_and_dtypes):
"""Like np.result_type, but with type promotion rules matching pandas.
Examples of changed behavior:
number + string -> object (not string)
bytes + unicode -> object (not unicode)
Parameters
----------
*arrays_and_dtypes : list of arrays and dtypes
The dtype is extracted from both numpy and dask arrays.
Returns
-------
numpy.dtype for the result.
"""
types = {np.result_type(t).type for t in arrays_and_dtypes}
for left, right in PROMOTE_TO_OBJECT:
if (any(issubclass(t, left) for t in types) and
any(issubclass(t, right) for t in types)):
return np.dtype(object)
return np.result_type(*arrays_and_dtypes)
| 26.005882
| 77
| 0.654377
|
794f2b66c200b853e36fd5fbb5b5ea8eb9c2a6b4
| 179
|
py
|
Python
|
tests/utils.py
|
black-cape/cast-iron-worker
|
023df792ade1813ff2fe851d39cba76d3a12e770
|
[
"MIT"
] | 1
|
2021-04-28T21:32:49.000Z
|
2021-04-28T21:32:49.000Z
|
tests/utils.py
|
black-cape/cast-iron-worker
|
023df792ade1813ff2fe851d39cba76d3a12e770
|
[
"MIT"
] | 5
|
2021-04-29T14:20:53.000Z
|
2022-01-06T17:09:13.000Z
|
tests/utils.py
|
black-cape/cast-iron-worker
|
023df792ade1813ff2fe851d39cba76d3a12e770
|
[
"MIT"
] | null | null | null |
"""Common test module"""
class DummyClass:
def setup(self, **kwargs):
pass
def run(self, data, **kwargs):
pass
def test_run(data, **kwargs):
pass
| 12.785714
| 34
| 0.569832
|
794f2dd075b8a4570c986f0a28f5b8541b381398
| 1,350
|
py
|
Python
|
candle.py
|
damonthecricket/py-stock
|
469ba6a53e091f36b336e39c5b6b6934adfab1cd
|
[
"MIT"
] | null | null | null |
candle.py
|
damonthecricket/py-stock
|
469ba6a53e091f36b336e39c5b6b6934adfab1cd
|
[
"MIT"
] | 8
|
2019-08-10T20:57:43.000Z
|
2019-08-15T15:06:14.000Z
|
candle.py
|
damonthecricket/pystock
|
469ba6a53e091f36b336e39c5b6b6934adfab1cd
|
[
"MIT"
] | null | null | null |
import csv
# Candle
def create(date, open, high, low, close, volume):
return Candle(date, open, high, low, close, volume)
class Candle:
def __init__(self, date, open_, high, low, close, volume):
self._date = date
self._open = open_
self._high = high
self._low = low
self._close = close
self._volume = volume
def date(self):
return self._date
def open(self):
return self._open
def high(self):
return self._high
def low(self):
return self._low
def close(self):
return self._close
def volume(self):
return self._volume
def is_bullish(self):
return self._low < self._high
def is_bearish(self):
return self._low > self._high
def is_unit(self):
return self._low == self._high
def current(self):
if self.is_bullish():
return self._high
elif self.is_bearish():
return self._low
else:
return self._high
def __eq__(self, other):
return self._date == other.date() and self._open == other.open() and self._high == other.high() and \
self._low == other.low() and self._close == other.close() and self._volume == other.volume() and \
self._volume == other.volume()
def __str__(self):
return "Japan candle, data: %s, open: %s, high: %s, low: %s, close: %s, volume: %s" % \
(self._date, self._open, self._high, self._low, self._close, self._volume)
| 16.875
| 103
| 0.660741
|
794f2dec6a07b2e30f788fb6de0f58d4220e2d83
| 918
|
py
|
Python
|
tests/test_optimizers/test_parameter/test_stochastic_hill_climbing_para_init.py
|
gtr8/Gradient-Free-Optimizers
|
19dcde35d93f048721bac3600f33696ca21ec669
|
[
"MIT"
] | 860
|
2020-06-10T08:53:41.000Z
|
2022-03-30T14:22:20.000Z
|
tests/test_optimizers/test_parameter/test_stochastic_hill_climbing_para_init.py
|
gtr8/Gradient-Free-Optimizers
|
19dcde35d93f048721bac3600f33696ca21ec669
|
[
"MIT"
] | 24
|
2021-01-25T08:06:54.000Z
|
2022-01-24T13:46:48.000Z
|
tests/test_optimizers/test_parameter/test_stochastic_hill_climbing_para_init.py
|
gtr8/Gradient-Free-Optimizers
|
19dcde35d93f048721bac3600f33696ca21ec669
|
[
"MIT"
] | 52
|
2020-06-25T09:36:15.000Z
|
2022-03-18T18:11:52.000Z
|
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License
import pytest
import numpy as np
from gradient_free_optimizers import StochasticHillClimbingOptimizer
from .test_hill_climbing_para_init import hill_climbing_para
from ._base_para_test import _base_para_test_func
def objective_function(para):
score = -para["x1"] * para["x1"]
return score
search_space = {"x1": np.arange(-100, 101, 1)}
stochastic_hill_climbing_para = hill_climbing_para + [
({"p_accept": 0.01}),
({"p_accept": 0.5}),
({"p_accept": 1}),
({"p_accept": 10}),
({"norm_factor": 0.1}),
({"norm_factor": 0.5}),
({"norm_factor": 0.9}),
({"norm_factor": "adaptive"}),
]
pytest_wrapper = ("opt_para", stochastic_hill_climbing_para)
@pytest.mark.parametrize(*pytest_wrapper)
def test_hill_climbing_para(opt_para):
_base_para_test_func(opt_para, StochasticHillClimbingOptimizer)
| 23.538462
| 68
| 0.715686
|
794f2f69b9b7678cacd02f5655016bfc47b2d4a6
| 1,230
|
py
|
Python
|
redlure-client.py
|
redlure/redlure-client
|
4441950ac41c16830d1bfea7b9fc98025429c38a
|
[
"BSD-3-Clause"
] | 5
|
2020-08-09T16:09:58.000Z
|
2020-11-01T13:45:03.000Z
|
redlure-client.py
|
redlure/redlure-client
|
4441950ac41c16830d1bfea7b9fc98025429c38a
|
[
"BSD-3-Clause"
] | 19
|
2021-04-01T12:25:50.000Z
|
2022-03-02T13:25:32.000Z
|
redlure-client.py
|
redlure/redlure-client
|
4441950ac41c16830d1bfea7b9fc98025429c38a
|
[
"BSD-3-Clause"
] | 2
|
2020-12-09T05:00:54.000Z
|
2022-01-13T15:48:50.000Z
|
#!/usr/bin/env python3
from config import Config
import subprocess
import os
import shlex
def gen_certs():
proc = subprocess.Popen(shlex.split('openssl req -x509 -newkey rsa:4096 -nodes -subj "/" -out redlure-cert.pem -keyout redlure-key.pem -days 365'))
proc.wait()
def main():
# base ng command that will start the client
cmd = f'ng serve --disable-host-check --host {Config.HOST} --port {Config.PORT}'
# if SSL add ssl flag
if Config.SSL:
cmd += f' --ssl --ssl-cert {Config.CERT_PATH} --ssl-key {Config.KEY_PATH}'
if Config.CERT_PATH == 'redlure-cert.pem' and Config.KEY_PATH == 'redlure-key.pem':
# if not generated, run OpenSSL
if not os.path.isfile('redlure-cert.pem') or not os.path.isfile('redlure-key.pem'):
gen_certs()
else:
if not os.path.isfile(Config.CERT_PATH) or not os.path.isfile(Config.KEY_PATH):
print('[!] CERT_PATH or KEY_PATH file does not exist')
exit()
try:
# start the webserver
client = subprocess.Popen(shlex.split(cmd))
client.wait()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| 29.285714
| 151
| 0.611382
|
794f2f8791b2b8b3850c1eae1b3db050aa76e192
| 2,799
|
py
|
Python
|
tests/unit/test_rdf_utils.py
|
gouttegd/kgx
|
1efa0bfaf62113855ffc1d002903236e4ff8706a
|
[
"BSD-3-Clause"
] | 32
|
2020-10-21T17:35:27.000Z
|
2022-03-17T02:40:08.000Z
|
tests/unit/test_rdf_utils.py
|
gouttegd/kgx
|
1efa0bfaf62113855ffc1d002903236e4ff8706a
|
[
"BSD-3-Clause"
] | 136
|
2018-04-24T02:15:39.000Z
|
2020-10-02T00:14:13.000Z
|
tests/unit/test_rdf_utils.py
|
gouttegd/kgx
|
1efa0bfaf62113855ffc1d002903236e4ff8706a
|
[
"BSD-3-Clause"
] | 19
|
2018-05-03T17:03:08.000Z
|
2020-07-15T22:12:40.000Z
|
import os
import pytest
from rdflib import URIRef, Graph
from kgx.prefix_manager import PrefixManager
from kgx.utils.rdf_utils import infer_category, process_predicate
from tests import RESOURCE_DIR
@pytest.mark.parametrize(
"query",
[
(URIRef("http://purl.obolibrary.org/obo/GO_0007267"), "biological_process"),
(URIRef("http://purl.obolibrary.org/obo/GO_0019899"), "molecular_function"),
(URIRef("http://purl.obolibrary.org/obo/GO_0005739"), "cellular_component"),
],
)
def test_infer_category(query):
"""
Test inferring of biolink category for a given IRI.
"""
graph = Graph()
graph.parse(os.path.join(RESOURCE_DIR, "goslim_generic.owl"))
[c] = infer_category(query[0], graph)
assert c == query[1]
@pytest.mark.parametrize(
"query",
[
(
"http://purl.org/oban/association_has_object",
"biolink:object",
"rdf:object",
"OBAN:association_has_object",
"association_has_object",
),
(
"http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
"biolink:type",
"rdf:type",
"rdf:type",
"type",
),
(
"https://monarchinitiative.org/frequencyOfPhenotype",
None,
None,
"MONARCH:frequencyOfPhenotype",
"frequencyOfPhenotype",
),
(
"http://purl.obolibrary.org/obo/RO_0002200",
"biolink:has_phenotype",
"biolink:has_phenotype",
"RO:0002200",
"0002200",
),
(
"http://www.w3.org/2002/07/owl#equivalentClass",
"biolink:same_as",
"biolink:same_as",
"owl:equivalentClass",
"equivalentClass",
),
(
"https://www.example.org/UNKNOWN/new_prop",
None,
None,
":new_prop",
"new_prop",
),
(
"http://purl.obolibrary.org/obo/RO_0000091",
None,
None,
"RO:0000091",
"0000091",
),
("RO:0000091", None, None, "RO:0000091", "0000091"),
("category", "biolink:category", "biolink:category", ":category", "category"),
("predicate", "biolink:predicate", "rdf:predicate", ":predicate", "predicate"),
("type", "biolink:type", "rdf:type", ":type", "type"),
("name", "biolink:name", "rdfs:label", ":name", "name"),
],
)
def test_process_predicate(query):
"""
Test behavior of process_predicate method.
"""
pm = PrefixManager()
x = process_predicate(pm, query[0])
assert x[0] == query[1]
assert x[1] == query[2]
assert x[2] == query[3]
assert x[3] == query[4]
| 28.85567
| 87
| 0.538049
|
794f2f884908b33449a379effe4ce4343f0dd322
| 5,830
|
py
|
Python
|
random-forests.py
|
saksham-mittal/CS6510-Kaggle-Challenge
|
01cf220a826649fc7341c057a2175c98acf025ba
|
[
"MIT"
] | null | null | null |
random-forests.py
|
saksham-mittal/CS6510-Kaggle-Challenge
|
01cf220a826649fc7341c057a2175c98acf025ba
|
[
"MIT"
] | null | null | null |
random-forests.py
|
saksham-mittal/CS6510-Kaggle-Challenge
|
01cf220a826649fc7341c057a2175c98acf025ba
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
training_set = pd.read_csv("train.csv")
# Extracting labels from training set
training_labels = training_set['pricing_category']
print(training_labels)
# Dropping the last column and id from training set
training_set = training_set.drop(labels='pricing_category', axis=1)
training_set = training_set.drop(labels='id', axis=1)
# print(training_set)
training_set['taxi_type'].fillna('O', inplace=True)
training_set['customer_score'].fillna(training_set['customer_score'].mean(), inplace=True)
training_set['customer_score_confidence'].fillna('O', inplace=True)
training_set['months_of_activity'].fillna(0.0, inplace=True)
labelEnc = LabelEncoder()
male = labelEnc.fit_transform(training_set['sex'])
oneHotEnc = OneHotEncoder(categorical_features=[0])
male = oneHotEnc.fit_transform(male.reshape(-1, 1)).toarray()
# print(male)
training_temp = {}
for i in range(len(training_set.taxi_type.unique())):
training_temp["taxi_type_{}".format(sorted(training_set.taxi_type.unique())[i])] = np.zeros(training_set.shape[0], dtype="float32")
for i, taxi in enumerate(training_set['taxi_type']):
training_temp['taxi_type_{}'.format(taxi)][i] = 1.0
for key in training_temp.keys():
training_set[key] = training_temp[key]
training_set = training_set.drop(labels='taxi_type', axis=1)
training_temp = {}
for i in range(len(training_set.customer_score_confidence.unique())):
training_temp["customer_score_confidence_{}".format(sorted(training_set.customer_score_confidence.unique())[i])] = np.zeros(training_set.shape[0], dtype="float32")
for i, taxi in enumerate(training_set['customer_score_confidence']):
training_temp['customer_score_confidence_{}'.format(taxi)][i] = 1.0
for key in training_temp.keys():
training_set[key] = training_temp[key]
training_set = training_set.drop(labels='customer_score_confidence', axis=1)
training_temp = {}
for i in range(len(training_set.drop_location_type.unique())):
training_temp["drop_location_type_{}".format(sorted(training_set.drop_location_type.unique())[i])] = np.zeros(training_set.shape[0], dtype="float32")
for i, taxi in enumerate(training_set['drop_location_type']):
training_temp['drop_location_type_{}'.format(taxi)][i] = 1.0
for key in training_temp.keys():
training_set[key] = training_temp[key]
training_set = training_set.drop(labels='drop_location_type', axis=1)
training_set = training_set.drop(labels='sex', axis=1)
training_set.insert(training_set.shape[1], "male", male[:, 0], True)
training_set.insert(training_set.shape[1], "female", male[:, 1], True)
print(training_set)
training_set1 = training_set
training_set['anon_var_1'].fillna(training_set['anon_var_1'].mean(), inplace=True)
# print(training_set)
training_set1 = training_set1.drop(labels='anon_var_1', axis=1)
# print(training_set1)
test_set = pd.read_csv("test.csv")
# Dropping is column
test_id = test_set['id']
test_id = np.asarray(test_id)
test_set = test_set.drop(labels='id', axis=1)
test_set['taxi_type'].fillna('O', inplace=True)
test_set['customer_score'].fillna(training_set['customer_score'].mean(), inplace=True)
test_set['customer_score_confidence'].fillna('O', inplace=True)
test_set['months_of_activity'].fillna(0.0, inplace=True)
labelEnc = LabelEncoder()
male = labelEnc.fit_transform(test_set['sex'])
oneHotEnc = OneHotEncoder(categorical_features=[0])
male = oneHotEnc.fit_transform(male.reshape(-1, 1)).toarray()
# print(male)
test_temp = {}
for i in range(len(test_set.taxi_type.unique())):
test_temp["taxi_type_{}".format(sorted(test_set.taxi_type.unique())[i])] = np.zeros(test_set.shape[0], dtype="float32")
for i, taxi in enumerate(test_set['taxi_type']):
test_temp['taxi_type_{}'.format(taxi)][i] = 1.0
for key in test_temp.keys():
test_set[key] = test_temp[key]
test_set = test_set.drop(labels='taxi_type', axis=1)
test_temp = {}
for i in range(len(test_set.customer_score_confidence.unique())):
test_temp["customer_score_confidence_{}".format(sorted(test_set.customer_score_confidence.unique())[i])] = np.zeros(test_set.shape[0], dtype="float32")
for i, taxi in enumerate(test_set['customer_score_confidence']):
test_temp['customer_score_confidence_{}'.format(taxi)][i] = 1.0
for key in test_temp.keys():
test_set[key] = test_temp[key]
test_set = test_set.drop(labels='customer_score_confidence', axis=1)
test_temp = {}
for i in range(len(test_set.drop_location_type.unique())):
test_temp["drop_location_type_{}".format(sorted(test_set.drop_location_type.unique())[i])] = np.zeros(test_set.shape[0], dtype="float32")
for i, taxi in enumerate(test_set['drop_location_type']):
test_temp['drop_location_type_{}'.format(taxi)][i] = 1.0
for key in test_temp.keys():
test_set[key] = test_temp[key]
test_set = test_set.drop(labels='drop_location_type', axis=1)
test_set = test_set.drop(labels='sex', axis=1)
test_set.insert(test_set.shape[1], "male", male[:, 0], True)
test_set.insert(test_set.shape[1], "female", male[:, 1], True)
test_set1 = test_set
print(test_set)
test_set['anon_var_1'].fillna(test_set['anon_var_1'].mean(), inplace=True)
test_set1 = test_set1.drop(labels='anon_var_1', axis=1)
print(training_labels.sum(axis=0))
# Using sklearn random forest classifier
clf = RandomForestClassifier(n_estimators=1000)
# Fitting the training data
clf.fit(training_set, training_labels)
print("Data fitting completed")
ans = clf.predict(test_set)
print("Data prediction completed")
print(test_id.shape)
print(ans.shape)
print(ans)
with open("output-random-forests.csv", "w") as fp:
fp.write("id,pricing_category\n")
for i in range(test_id.shape[0]):
fp.write("{},{}.0\n".format(test_id[i], ans[i]))
| 34.294118
| 167
| 0.750943
|
794f2fd2dd74d546dd881e0abc0fca6f405f1a94
| 6,707
|
py
|
Python
|
utilities/get_ifmtu.py
|
xod442/hpe-composable-fabric-sidekick
|
55c20b082c2ecacd8633bf3d085159100802ec2b
|
[
"Apache-2.0"
] | null | null | null |
utilities/get_ifmtu.py
|
xod442/hpe-composable-fabric-sidekick
|
55c20b082c2ecacd8633bf3d085159100802ec2b
|
[
"Apache-2.0"
] | 3
|
2019-06-27T18:29:44.000Z
|
2020-02-04T23:40:14.000Z
|
utilities/get_ifmtu.py
|
xod442/hpe-composable-fabric-sidekick
|
55c20b082c2ecacd8633bf3d085159100802ec2b
|
[
"Apache-2.0"
] | 2
|
2019-06-02T23:04:39.000Z
|
2019-08-07T17:20:08.000Z
|
# -*-coding: utf-8 -*-
# (C) Copyright 2019 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["Rick Kauffman"]
# __license__ = "Apache2.0"
# __version__ = "1.0.0"
# __maintainer__ = "Rick Kauffman"
# __email__ = "rick.a.kauffman@hpe.com"
#SNMPv2c
# Send SNMP GET request using the following options:
#* with SNMPv2c, community 'public'
# * over IPv4/UDP
# * to an Agent at demo.snmplabs.com:161
# * for two OIDs in string form
# Functionally similar to:
# | $ snmpget -v2c -c public demo.snmplabs.com 1.3.6.1.2.1.1.1.0 1.3.6.1.2.1.1.6.0
from pysnmp.hlapi import *
from pysnmp.smi import *
from pysnmp.proto import *
from pysnmp.entity import *
from pysnmp.carrier import *
def get_ifMtu_oids(switch_ipaddress):
counter=0
ifmtu_list=[]
errorIndication, errorStatus, errorIndex, varBinds = next(
getCmd(SnmpEngine(),
CommunityData('public'),
UdpTransportTarget((switch_ipaddress, 161)),
ContextData(),
# THis OID gets the number of interfaces
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.4')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.5')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.6')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.7')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.8')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.9')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.10')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.11')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.12')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.13')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.14')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.15')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.16')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.17')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.18')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.19')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.20')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.21')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.22')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.23')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.24')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.25')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.26')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.27')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.28')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.29')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.30')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.31')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.32')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.33')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.34')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.35')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.36')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.37')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.38')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.39')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.40')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.41')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.42')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.43')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.44')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.45')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.46')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.47')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.48')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.49')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.50')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.51')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.52')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.53')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.54')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.55')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.56')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.57')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.58')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.59')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.60')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.61')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.62')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.63')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.64')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.65')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.66')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.67')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.68')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.69')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.70')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.71')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.72')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.73')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.74')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.4.75'))
)
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
ifmtu_list.append(str(varBinds[counter][1]))
counter=counter+1
return ifmtu_list
| 49.316176
| 110
| 0.586104
|
794f2fe458f7a6d4ce34c809346d6c32c6818ddf
| 899
|
py
|
Python
|
torchbenchmark/util/framework/timm/timm_config.py
|
LaudateCorpus1/benchmark
|
2a8528f91dfbbd880e514b4deefa692a48c32af8
|
[
"BSD-3-Clause"
] | null | null | null |
torchbenchmark/util/framework/timm/timm_config.py
|
LaudateCorpus1/benchmark
|
2a8528f91dfbbd880e514b4deefa692a48c32af8
|
[
"BSD-3-Clause"
] | null | null | null |
torchbenchmark/util/framework/timm/timm_config.py
|
LaudateCorpus1/benchmark
|
2a8528f91dfbbd880e514b4deefa692a48c32af8
|
[
"BSD-3-Clause"
] | null | null | null |
import torch.nn as nn
import dataclasses
from timm.optim import create_optimizer
@dataclasses.dataclass
class OptimizerOption:
lr: float
opt: str
weight_decay: float
momentum: float
class TimmConfig:
def __init__(self, model, device):
self.model = model
self.device = device
# Configurations
self.num_classes = self.model.num_classes
self.loss = nn.CrossEntropyLoss().to(self.device)
self.target_shape = tuple()
self.input_size = self.model.default_cfg["input_size"]
# Default optimizer configurations borrowed from:
# https://github.com/rwightman/pytorch-image-models/blob/779107b693010934ac87c8cecbeb65796e218488/timm/optim/optim_factory.py#L78
opt_args = OptimizerOption(lr=1e-4, opt="sgd", weight_decay = 0.0001, momentum = 0.9)
self.optimizer = create_optimizer(opt_args, self.model)
| 35.96
| 137
| 0.707453
|
794f31047c5e87cabab1108511ed44da753531e1
| 14,383
|
py
|
Python
|
django/utils/functional.py
|
Dragon2zhao/Study
|
397b3705c5f762b359cdb494f9447c8a60685adf
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/utils/functional.py
|
Dragon2zhao/Study
|
397b3705c5f762b359cdb494f9447c8a60685adf
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/utils/functional.py
|
Dragon2zhao/Study
|
397b3705c5f762b359cdb494f9447c8a60685adf
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import copy
import operator
from functools import total_ordering, wraps
from django.utils import six
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, cls=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __repr__(self):
return repr(self.__cast())
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__.keys():
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
cls.__str__ = cls.__bytes_cast_encoded
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode('utf-8')
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def lazystr(text):
"""
Shortcut for the common case of a lazy callable that returns str.
"""
from django.utils.encoding import force_text # Avoid circular import
return lazy(force_text, six.text_type)(text)
def keep_lazy(*resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
if not resultclasses:
raise TypeError("You must pass at least one argument to keep_lazy().")
def decorator(func):
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy_func(*args, **kwargs)
return wrapper
return decorator
def keep_lazy_text(func):
"""
A decorator for functions that accept lazy arguments and return text.
"""
return keep_lazy(six.text_type)(func)
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
# Note: if a subclass overrides __init__(), it will likely need to
# override __copy__() and __deepcopy__() as well.
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. We're going to have to initialize the wrapped
# object to successfully pickle it, so we might as well just pickle the
# wrapped object since they're supposed to act the same way.
#
# Unfortunately, if we try to simply act like the wrapped object, the ruse
# will break down when pickle gets our id(). Thus we end up with pickle
# thinking, in effect, that we are a distinct object from the wrapped
# object, but with the same __dict__. This can cause problems (see #25389).
#
# So instead, we define our own __reduce__ method and custom unpickler. We
# pickle the wrapped object as the unpickler's argument, so that pickle
# will pickle it normally, and then the unpickler simply returns its
# argument.
def __reduce__(self):
if self._wrapped is empty:
self._setup()
return (unpickle_lazyobject, (self._wrapped,))
def __getstate__(self):
"""
Prevent older versions of pickle from trying to pickle the __dict__
(which in the case of a SimpleLazyObject may contain a lambda). The
value will be ignored by __reduce__() and the custom unpickler.
"""
return {}
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use type(self), not
# self.__class__, because the latter is proxied.
return type(self)()
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode) # NOQA: unicode undefined on PY3
__nonzero__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# List/Tuple/Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__iter__ = new_method_proxy(iter)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
def unpickle_lazyobject(wrapped):
"""
Used to unpickle lazy objects. Just return its argument, which will be the
wrapped object.
"""
return wrapped
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use SimpleLazyObject, not
# self.__class__, because the latter is proxied.
return SimpleLazyObject(self._setupfunc)
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
| 34.491607
| 92
| 0.619203
|
794f3112b8c60e538c72f065c7bcf110f84a45b5
| 1,151
|
py
|
Python
|
cappa/private/pip3.py
|
Captricity/cappa
|
6fa541e145c7fac7c499a63f7625be0dc8d3ea5d
|
[
"MIT"
] | 3
|
2016-12-10T23:10:01.000Z
|
2017-12-28T19:38:49.000Z
|
cappa/private/pip3.py
|
Captricity/cappa
|
6fa541e145c7fac7c499a63f7625be0dc8d3ea5d
|
[
"MIT"
] | 33
|
2015-01-08T01:32:39.000Z
|
2020-04-01T21:46:17.000Z
|
cappa/private/pip3.py
|
Captricity/cappa
|
6fa541e145c7fac7c499a63f7625be0dc8d3ea5d
|
[
"MIT"
] | 1
|
2019-12-13T19:53:32.000Z
|
2019-12-13T19:53:32.000Z
|
from __future__ import print_function, absolute_import
import os
from ..pip3 import Pip3
class PrivatePip3(Pip3):
def __init__(self, org, *flags):
super(PrivatePip3, self).__init__(*flags)
self.org = org
def install(self, packages):
packages = self._private_package_dict(packages)
super(PrivatePip3, self).install(packages)
def _private_package_dict(self, packages):
def repo_url(repo_string):
repo_split = repo_string.split('@')
if len(repo_split) > 1:
repo, version = repo_split
else:
repo = repo_split[0]
version = 'master'
if self.private_https_oauth:
# Use https with oauth. Pulls token from env
token = os.environ['GITHUB_TOKEN']
return 'git+https://{}@github.com/{}/{}.git@{}'.format(token, self.org, repo, version)
else:
return 'git+ssh://git@github.com/{}/{}.git@{}'.format(self.org, repo, version)
private_package_dict = {repo_url(repo): None for repo in packages}
return private_package_dict
| 32.885714
| 102
| 0.596003
|
794f321c650bb51e2ee8ee6b0a927d3afe0ca254
| 937
|
py
|
Python
|
submission/migrations/0028_auto_20181108_2301.py
|
jxtxzzw/eoj3
|
468c16ed6de8b9b542972d0e83b02fd2cfa35e4f
|
[
"MIT"
] | 1
|
2020-11-17T13:08:07.000Z
|
2020-11-17T13:08:07.000Z
|
submission/migrations/0028_auto_20181108_2301.py
|
zerolfx/eoj3
|
156060399d1c3e5f7bcdbf34eaffbe2be66e1b20
|
[
"MIT"
] | 2
|
2020-09-23T21:27:55.000Z
|
2021-06-25T15:24:46.000Z
|
submission/migrations/0028_auto_20181108_2301.py
|
zerolfx/eoj3
|
156060399d1c3e5f7bcdbf34eaffbe2be66e1b20
|
[
"MIT"
] | 1
|
2019-07-13T00:44:39.000Z
|
2019-07-13T00:44:39.000Z
|
# Generated by Django 2.1.3 on 2018-11-08 23:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submission', '0027_auto_20181106_1212'),
]
operations = [
migrations.AddField(
model_name='submission',
name='contest_time',
field=models.DurationField(blank=True, null=True),
),
migrations.AlterField(
model_name='submission',
name='status',
field=models.IntegerField(choices=[(-4, 'Submitted'), (-3, 'In queue'), (-2, 'Running'), (-1, 'Wrong answer'), (0, 'Accepted'), (1, 'Time limit exceeded'), (2, 'Idleness limit exceeded'), (3, 'Memory limit exceeded'), (4, 'Runtime error'), (5, 'Denial of judgement'), (6, 'Compilation error'), (7, 'Partial score'), (10, 'Rejected'), (11, 'Checker error'), (12, 'Pretest passed')], db_index=True, default=-4),
),
]
| 39.041667
| 421
| 0.595518
|
794f3282f5414773851a73482ee0b1b5bca5b17d
| 305
|
py
|
Python
|
examples/example_1.py
|
tgsmith61591/gh_automation
|
2c2168019571e4f8c6f71bc00ee092cf2240ccfd
|
[
"MIT"
] | null | null | null |
examples/example_1.py
|
tgsmith61591/gh_automation
|
2c2168019571e4f8c6f71bc00ee092cf2240ccfd
|
[
"MIT"
] | null | null | null |
examples/example_1.py
|
tgsmith61591/gh_automation
|
2c2168019571e4f8c6f71bc00ee092cf2240ccfd
|
[
"MIT"
] | null | null | null |
"""
=======
Example
=======
Here is an example "example" script (so meta!). You'd create examples like this
and the documentation Makefile will run them during the doc build.
.. raw:: html
<br/>
"""
print(__doc__)
# Author: Taylor Smith <taylor.smith@alkaline-ml.com>
print("Here's an example!")
| 16.052632
| 79
| 0.665574
|
794f33150646f19bef216142286d69c2e3a66798
| 547
|
py
|
Python
|
buzzer.py
|
stijojoseph/SMART-AI-BLIND-STICK-WITH-OBJECT-RECOGNITION-VOICEFEEDBACK-USING-RASPBERRY-PI
|
a41e4c3fb88edafbae98649af09471485e9bcdd7
|
[
"MIT"
] | 3
|
2020-06-05T07:57:42.000Z
|
2021-11-11T19:00:10.000Z
|
buzzer.py
|
stijojoseph/SMART-AI-BLIND-STICK-WITH-OBJECT-RECOGNITION-VOICEFEEDBACK-USING-RASPBERRY-PI
|
a41e4c3fb88edafbae98649af09471485e9bcdd7
|
[
"MIT"
] | 1
|
2022-02-19T08:44:56.000Z
|
2022-02-19T13:42:15.000Z
|
buzzer.py
|
stijojoseph/SMART-AI-BLIND-STICK-WITH-OBJECT-RECOGNITION-VOICEFEEDBACK-USING-RASPBERRY-PI
|
a41e4c3fb88edafbae98649af09471485e9bcdd7
|
[
"MIT"
] | 3
|
2020-11-04T15:02:06.000Z
|
2022-02-19T13:44:00.000Z
|
import RPi.GPIO as GPIO
from time import sleep
import time
#Disable warnings (optional)
GPIO.setmode(GPIO.BCM) # choose BCM or BOARD numbering schemes. I use BCM
GPIO.setup(25, GPIO.OUT)# set GPIO 25 as an output. You can use any GPIO port
p = GPIO.PWM(25, 50) # create an object p for PWM on port 25 at 50 Hertz
def buz():
p.start(70) # start the PWM on 70 percent duty cycle
p.ChangeFrequency(800)
time.sleep(1)
# change the frequency to x Hz (
def buztop():
p.stop()
# stop the PWM output
| 30.388889
| 79
| 0.661792
|
794f33c2c4a771c15046b94c39103212954cb617
| 36
|
py
|
Python
|
Hello_World_NFT.py
|
tbotskina/Hello_World_NFT
|
c4f06f17a776c605c7eb1649464929d9fe6a34fe
|
[
"Apache-2.0"
] | null | null | null |
Hello_World_NFT.py
|
tbotskina/Hello_World_NFT
|
c4f06f17a776c605c7eb1649464929d9fe6a34fe
|
[
"Apache-2.0"
] | null | null | null |
Hello_World_NFT.py
|
tbotskina/Hello_World_NFT
|
c4f06f17a776c605c7eb1649464929d9fe6a34fe
|
[
"Apache-2.0"
] | null | null | null |
print("Hello Decentralised World!")
| 18
| 35
| 0.777778
|
794f34866d1852bae90774cf16ba4a69e4eaac77
| 5,968
|
py
|
Python
|
src/ingest-pipeline/airflow/dags/validate_upload.py
|
AustinHartman/ingest-pipeline
|
788d9310792c9396a38650deda3dad11483b368c
|
[
"MIT"
] | 6
|
2020-02-18T19:09:59.000Z
|
2021-10-07T20:38:46.000Z
|
src/ingest-pipeline/airflow/dags/validate_upload.py
|
AustinHartman/ingest-pipeline
|
788d9310792c9396a38650deda3dad11483b368c
|
[
"MIT"
] | 324
|
2020-02-06T22:08:50.000Z
|
2022-03-24T20:44:33.000Z
|
src/ingest-pipeline/airflow/dags/validate_upload.py
|
AustinHartman/ingest-pipeline
|
788d9310792c9396a38650deda3dad11483b368c
|
[
"MIT"
] | 2
|
2020-07-20T14:43:49.000Z
|
2021-10-29T18:24:36.000Z
|
import sys
import os
import ast
import json
from pathlib import Path
from pprint import pprint
from datetime import datetime, timedelta
from airflow import DAG
from airflow.configuration import conf as airflow_conf
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
from airflow.exceptions import AirflowException
from airflow.hooks.http_hook import HttpHook
import utils
from utils import (
get_tmp_dir_path, get_auth_tok,
map_queue_name, pythonop_get_dataset_state,
localized_assert_json_matches_schema as assert_json_matches_schema
)
sys.path.append(airflow_conf.as_dict()['connections']['SRC_PATH']
.strip("'").strip('"'))
from submodules import (ingest_validation_tools_upload, # noqa E402
ingest_validation_tools_error_report,
ingest_validation_tests)
sys.path.pop()
# Following are defaults which can be overridden later on
default_args = {
'owner': 'hubmap',
'depends_on_past': False,
'start_date': datetime(2019, 1, 1),
'email': ['joel.welling@gmail.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1),
'xcom_push': True,
'queue': map_queue_name('general')
}
with DAG('validate_upload',
schedule_interval=None,
is_paused_upon_creation=False,
user_defined_macros={'tmp_dir_path' : get_tmp_dir_path},
default_args=default_args,
) as dag:
def find_uuid(**kwargs):
uuid = kwargs['dag_run'].conf['uuid']
def my_callable(**kwargs):
return uuid
ds_rslt = pythonop_get_dataset_state(
dataset_uuid_callable=my_callable,
http_conn_id='ingest_api_connection',
**kwargs
)
if not ds_rslt:
raise AirflowException(f'Invalid uuid/doi for group: {uuid}')
print('ds_rslt:')
pprint(ds_rslt)
for key in ['status', 'uuid', 'data_types',
'local_directory_full_path']:
assert key in ds_rslt, f"Dataset status for {uuid} has no {key}"
if False: # not ds_rslt['status'] in ['Processing']:
raise AirflowException(f'Dataset {uuid} is not Processing')
lz_path = ds_rslt['local_directory_full_path']
uuid = ds_rslt['uuid'] # 'uuid' may actually be a DOI
print(f'Finished uuid {uuid}')
print(f'lz path: {lz_path}')
kwargs['ti'].xcom_push(key='lz_path', value=lz_path)
kwargs['ti'].xcom_push(key='uuid', value=uuid)
t_find_uuid = PythonOperator(
task_id='find_uuid',
python_callable=find_uuid,
provide_context=True,
op_kwargs={
}
)
def run_validation(**kwargs):
lz_path = kwargs['ti'].xcom_pull(key='lz_path')
uuid = kwargs['ti'].xcom_pull(key='uuid')
plugin_path = [path for path in ingest_validation_tests.__path__][0]
ignore_globs = [uuid, 'extras', '*metadata.tsv',
'validation_report.txt']
#
# Uncomment offline=True below to avoid validating orcid_id URLs &etc
#
upload = ingest_validation_tools_upload.Upload(
directory_path=Path(lz_path),
dataset_ignore_globs=ignore_globs,
upload_ignore_globs='*',
plugin_directory=plugin_path,
#offline=True, # noqa E265
add_notes=False
)
# Scan reports an error result
report = ingest_validation_tools_error_report.ErrorReport(
upload.get_errors(plugin_kwargs=kwargs)
)
validation_file_path = Path(get_tmp_dir_path(kwargs['run_id'])) / 'validation_report.txt'
with open(validation_file_path, 'w') as f:
f.write(report.as_text())
kwargs['ti'].xcom_push(key='validation_file_path', value=str(validation_file_path))
t_run_validation = PythonOperator(
task_id='run_validation',
python_callable=run_validation,
provide_context=True,
queue=utils.map_queue_name('validate'),
op_kwargs={
}
)
def send_status_msg(**kwargs):
validation_file_path = Path(kwargs['ti'].xcom_pull(key='validation_file_path'))
uuid = kwargs['ti'].xcom_pull(key='uuid')
conn_id = ''
endpoint = f'/entities/{uuid}'
headers = {
'authorization': 'Bearer ' + get_auth_tok(**kwargs),
'X-Hubmap-Application':'ingest-pipeline',
'content-type': 'application/json',
}
extra_options = []
http_conn_id='entity_api_connection'
http_hook = HttpHook('PUT', http_conn_id=http_conn_id)
with open(validation_file_path) as f:
report_txt = f.read()
if report_txt.startswith('No errors!'):
data = {
"status":"Valid",
}
else:
data = {
"status":"Invalid",
"validation_message" : report_txt
}
print('data: ')
pprint(data)
response = http_hook.run(
endpoint,
json.dumps(data),
headers,
extra_options,
)
print('response: ')
pprint(response.json())
t_send_status = PythonOperator(
task_id='send_status',
python_callable=send_status_msg,
provide_context=True,
op_kwargs={
}
)
t_create_tmpdir = BashOperator(
task_id='create_temp_dir',
bash_command='mkdir {{tmp_dir_path(run_id)}}'
)
t_cleanup_tmpdir = BashOperator(
task_id='cleanup_temp_dir',
bash_command='rm -r {{tmp_dir_path(run_id)}}',
trigger_rule='all_success'
)
(dag >> t_create_tmpdir >> t_find_uuid >> t_run_validation
>> t_send_status >> t_cleanup_tmpdir)
| 32.434783
| 97
| 0.614779
|
794f34c2cc2fd73295d7684e3dc2a1b416e6fc8d
| 5,028
|
py
|
Python
|
augmentation/proxy_rep_augmentation.py
|
akashsengupta1997/STRAPS-3DHumanShapePose
|
f649a5790cd1ace15d2fa2e06908f0633ee24097
|
[
"MIT"
] | 118
|
2020-08-08T06:35:12.000Z
|
2022-03-27T09:55:45.000Z
|
augmentation/proxy_rep_augmentation.py
|
akashsengupta1997/STRAPS-3DHumanShapePose
|
f649a5790cd1ace15d2fa2e06908f0633ee24097
|
[
"MIT"
] | 23
|
2020-09-25T09:29:00.000Z
|
2022-02-16T07:51:33.000Z
|
augmentation/proxy_rep_augmentation.py
|
akashsengupta1997/STRAPS-3DHumanShapePose
|
f649a5790cd1ace15d2fa2e06908f0633ee24097
|
[
"MIT"
] | 17
|
2020-08-16T16:47:41.000Z
|
2022-03-27T09:35:07.000Z
|
import torch
import numpy as np
def random_verts2D_deviation(vertices, delta_verts2d_dev_range=[-0.01, 0.01]):
"""
Randomly add 2D uniform noise to vertices to create silhouettes/part segmentations with
corrupted edges.
:param vertices: (bs, 6890, 3)
:param delta_verts2d_dev_range: range of uniform noise.
"""
batch_size = vertices.shape[0]
num_verts = vertices.shape[1]
device = vertices.device
noisy_vertices = vertices.clone()
l, h = delta_verts2d_dev_range
delta_verts2d_dev = (h - l) * torch.rand(batch_size, num_verts, 2, device=device) + l
noisy_vertices[:, :, :2] = noisy_vertices[:, :, :2] + delta_verts2d_dev
return noisy_vertices
def random_joints2D_deviation(joints2D,
delta_j2d_dev_range=[-5, 5],
delta_j2d_hip_dev_range=[-15, 15]):
"""
Deviate 2D joint locations with uniform random noise.
:param joints2D: (bs, num joints, num joints)
:param delta_j2d_dev_range: uniform noise range.
:param delta_j2d_hip_dev_range: uniform noise range for hip joints. You may wish to make
this bigger than for other joints since hip joints are semantically hard to localise and
can be predicted inaccurately by joint detectors.
"""
hip_joints = [11, 12]
other_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 14,15, 16]
batch_size = joints2D.shape[0]
device = joints2D.device
l, h = delta_j2d_dev_range
delta_j2d_dev = (h - l) * torch.rand(batch_size, len(other_joints), 2, device=device) + l
joints2D[:, other_joints, :] = joints2D[:, other_joints, :] + delta_j2d_dev
l, h = delta_j2d_hip_dev_range
delta_j2d_hip_dev_range = (h - l) * torch.rand(batch_size, len(hip_joints), 2, device=device) + l
joints2D[:, hip_joints, :] = joints2D[:, hip_joints, :] + delta_j2d_hip_dev_range
return joints2D
def random_remove_bodyparts(seg, classes_to_remove, probabilities_to_remove):
"""
Randomly remove bodyparts from silhouette/segmentation (i.e. set pixels to background
class).
:param seg: (bs, wh, wh)
:param classes_to_remove: list of classes to remove. Classes are integers (as defined in
nmr_renderer.py).
:param probabilities_to_remove: probability of removal for each class.
"""
assert len(classes_to_remove) == len(probabilities_to_remove)
batch_size = seg.shape[0]
for i in range(len(classes_to_remove)):
class_to_remove = classes_to_remove[i]
prob_to_remove = probabilities_to_remove[i]
# Determine which samples to augment in the batch
rand_vec = np.random.rand(batch_size) < prob_to_remove
samples_to_augment = seg[rand_vec].clone()
samples_to_augment[samples_to_augment == class_to_remove] = 0
seg[rand_vec] = samples_to_augment
return seg
def random_occlude(seg, occlude_probability=0.5, occlude_box_dim=48):
"""
Randomly occlude silhouette/part segmentation with boxes.
:param seg: (bs, wh, wh)
"""
batch_size = seg.shape[0]
seg_wh = seg.shape[-1]
seg_centre = seg_wh/2
x_h, x_l = seg_centre - 0.3*seg_wh/2, seg_centre + 0.3*seg_wh/2
y_h, y_l = seg_centre - 0.3*seg_wh/2, seg_centre + 0.3*seg_wh/2
x = (x_h - x_l) * np.random.rand(batch_size) + x_l
y = (y_h - y_l) * np.random.rand(batch_size) + y_l
box_x1 = (x - occlude_box_dim / 2).astype(np.int16)
box_x2 = (x + occlude_box_dim / 2).astype(np.int16)
box_y1 = (y - occlude_box_dim / 2).astype(np.int16)
box_y2 = (y + occlude_box_dim / 2).astype(np.int16)
rand_vec = np.random.rand(batch_size)
for i in range(batch_size):
if rand_vec[i] < occlude_probability:
seg[i, box_x1[i]:box_x2[i], box_y1[i]:box_y2[i]] = 0
return seg
def augment_proxy_representation(orig_segs, orig_joints2D,
proxy_rep_augment_params):
new_segs = orig_segs.clone()
new_joints2D = orig_joints2D.clone()
if proxy_rep_augment_params['remove_appendages']:
new_segs = random_remove_bodyparts(new_segs,
classes_to_remove=proxy_rep_augment_params['remove_appendages_classes'],
probabilities_to_remove=proxy_rep_augment_params['remove_appendages_probabilities'])
if proxy_rep_augment_params['occlude_seg']:
new_segs = random_occlude(new_segs,
occlude_probability=proxy_rep_augment_params['occlude_probability'],
occlude_box_dim=proxy_rep_augment_params['occlude_box_dim'])
if proxy_rep_augment_params['deviate_joints2D']:
new_joints2D = random_joints2D_deviation(new_joints2D,
delta_j2d_dev_range=proxy_rep_augment_params['delta_j2d_dev_range'],
delta_j2d_hip_dev_range=proxy_rep_augment_params['delta_j2d_hip_dev_range'])
return new_segs, new_joints2D
| 39.590551
| 127
| 0.665672
|
794f35999545c0ac62c9e932e8c994b38ccb923d
| 1,883
|
py
|
Python
|
test.py
|
russlamb/file_convert
|
bcae13390da4728515304288951e9bd3ada8ce06
|
[
"MIT"
] | null | null | null |
test.py
|
russlamb/file_convert
|
bcae13390da4728515304288951e9bd3ada8ce06
|
[
"MIT"
] | null | null | null |
test.py
|
russlamb/file_convert
|
bcae13390da4728515304288951e9bd3ada8ce06
|
[
"MIT"
] | null | null | null |
import os
import unittest
from csv_to_xlsx.convert import FileConvert
def delete_files_if_exists(path_list):
for path in path_list:
if os.path.exists(path):
os.remove(path)
class TestConvert(unittest.TestCase):
def setUp(self):
delete_files_if_exists(["samples/FL_insurance_sample.csv.tsv", "samples/FL_insurance_sample.csv.tsv.xlsx",
"samples/FL_insurance_sample.csv.xlsx",
"samples/my_tsv.tsv","samples/my_output.xlsx"])
def tearDown(self):
delete_files_if_exists(["samples/FL_insurance_sample.csv.tsv", "samples/FL_insurance_sample.csv.tsv.xlsx",
"samples/FL_insurance_sample.csv.xlsx",
"samples/my_tsv.tsv","samples/my_output.xlsx"])
def test_convert_csv_xl(self):
csv = "samples/FL_insurance_sample.csv"
converter = FileConvert(csv)
tsv_file = converter.save_csv_as_tsv()
self.assertTrue (os.path.exists(tsv_file))
self.assertTrue (tsv_file==(csv+".tsv"))
converter = FileConvert(tsv_file)
xlsx_file = converter.save_tsv_as_xlsx()
self.assertTrue (os.path.exists(xlsx_file))
self.assertTrue (xlsx_file==(tsv_file+".xlsx"))
def test_with_output_path(self):
csv = "samples/FL_insurance_sample.csv"
converter = FileConvert(csv)
tsv_file = converter.save_csv_as_tsv(output_path="samples/my_tsv.tsv")
self.assertTrue (os.path.exists(tsv_file))
self.assertFalse ((tsv_file==(csv+".tsv")))
converter = FileConvert(tsv_file)
xlsx_file = converter.save_tsv_as_xlsx(output_path="samples/my_output.xlsx")
self.assertTrue (os.path.exists(xlsx_file))
self.assertFalse ((xlsx_file==(tsv_file+".xlsx")))
if __name__ == "__main__":
unittest.main()
| 35.528302
| 114
| 0.651089
|
794f35a7f8dbf9bdcfd57830c3d34e365fbe607f
| 3,307
|
py
|
Python
|
personality/personality_classifiers.py
|
Datalab-AUTH/MSc---Lampridis---MANIFEST
|
9c13018313f2681dd27ef56ac0eb8470319a1749
|
[
"Apache-2.0"
] | 3
|
2021-03-28T20:13:11.000Z
|
2021-08-23T05:52:27.000Z
|
personality/personality_classifiers.py
|
Datalab-AUTH/MSc---Lampridis---MANIFEST
|
9c13018313f2681dd27ef56ac0eb8470319a1749
|
[
"Apache-2.0"
] | null | null | null |
personality/personality_classifiers.py
|
Datalab-AUTH/MSc---Lampridis---MANIFEST
|
9c13018313f2681dd27ef56ac0eb8470319a1749
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from skmultilearn.problem_transform import ClassifierChain
from utils.preprocessing import clean_text
def list2string(list):
return ','.join(map(str, list))
file_tweets = "new_personality_combined.csv"
file_personalities = "personality-data.txt"
data_tweets = pd.read_csv(file_tweets, sep=",", encoding="utf8", index_col=0)
data_personalities = pd.read_csv(file_personalities, sep="\t", encoding="utf8", index_col=10)
print(data_tweets)
# Join the two dataframes together
merged_df = pd.merge(data_tweets, data_personalities, on='twitter_uid', how='inner')
merged_df.reset_index(drop=True, inplace=True)
# Drop the statues (the text)
personality_categories = list(merged_df.columns.values)[2:]
# Print dataset statistics
print("Final number of data in personality dataset =", merged_df.shape[0])
print("Number of personality categories =", len(personality_categories))
print("Personality categories =", ', '.join(personality_categories))
print(merged_df['statuses'])
merged_df['statuses'] = merged_df.statuses.apply(clean_text)
print(merged_df['statuses'])
merged_df['statuses'] = [list2string(list) for list in merged_df['statuses']]
# Split the personality categories into 3 quantiles to convert the problem to classification
bins = 3
labels = [0, 1, 2]
merged_df['ADMIRATION'] = pd.cut(merged_df['ADMIRATION'], bins, labels=labels)
merged_df['AGRE'] = pd.cut(merged_df['AGRE'], bins, labels=labels)
merged_df['ANXIETY'] = pd.cut(merged_df['ANXIETY'], bins, labels=labels)
merged_df['AVOIDANCE'] = pd.cut(merged_df['AVOIDANCE'], bins, labels=labels)
merged_df['CONS'] = pd.cut(merged_df['CONS'], bins, labels=labels)
merged_df['EXTR'] = pd.cut(merged_df['EXTR'], bins, labels=labels)
merged_df['NARCISSISM'] = pd.cut(merged_df['NARCISSISM'], bins, labels=labels)
merged_df['NEUR'] = pd.cut(merged_df['NEUR'], bins, labels=labels)
merged_df['OPEN'] = pd.cut(merged_df['OPEN'], bins, labels=labels)
merged_df['RIVALRY'] = pd.cut(merged_df['RIVALRY'], bins, labels=labels)
print(merged_df)
# Split the data to train and test
train, test = train_test_split(merged_df, random_state=42, test_size=0.25)
x_train = train['statuses']
x_test = test['statuses']
y_train = train.drop(labels=['statuses', 'tweets'], axis=1).to_numpy(dtype=int)
y_test = test.drop(labels=['statuses', 'tweets'], axis=1).to_numpy(dtype=int)
print(y_train)
print(type(x_train))
print(type(y_train))
# Classifier Chains approach
print("---Classifier Chains---")
classifier_chains = Pipeline([
('tfidf', TfidfVectorizer(encoding='utf-8', ngram_range=(1, 3))),
('clf', ClassifierChain(LinearSVC())),
])
classifier_chains.fit(x_train, y_train)
predictions_chains = classifier_chains.predict(x_test)
predictions_chains = predictions_chains.toarray().astype(dtype=int)
print("predictions: ", predictions_chains)
print(y_test)
# print("accuracy_score:", accuracy_score(y_test, predictions_chains))
# print("Hamming_loss:", hamming_loss(y_test, predictions_chains))
directory = '../classifiers/personality'
pickle.dump(classifier_chains, open(directory + '/classifier_chains_SVC', 'wb'))
| 35.180851
| 93
| 0.761415
|
794f36490fce422761e56c03de5937f389891839
| 2,713
|
py
|
Python
|
unported scripts/magnification.py
|
KirovskiXVI/dicom-sr-qi
|
810f367d0845f4f47c3ee914502cf973b4f9b336
|
[
"BSD-2-Clause"
] | null | null | null |
unported scripts/magnification.py
|
KirovskiXVI/dicom-sr-qi
|
810f367d0845f4f47c3ee914502cf973b4f9b336
|
[
"BSD-2-Clause"
] | null | null | null |
unported scripts/magnification.py
|
KirovskiXVI/dicom-sr-qi
|
810f367d0845f4f47c3ee914502cf973b4f9b336
|
[
"BSD-2-Clause"
] | null | null | null |
"""Makes box plots for DAP and exposure
vs magnification. Can change some things
in line to change the data source (bjh vs. slch)
and to decide between graphing DAP and Exposure
"""
import my_utils
import srdata
import csv
import matplotlib
def build_table():
procs = srdata.process_file(my_utils.BJH_XML_FILE, my_utils.BJH_SYNGO_FILES)
procs = procs + srdata.process_file(my_utils.SLCH_XML_FILE, my_utils.SLCH_SYNGO_FILES)
#procs = srdata.process_file(my_utils.SLCH_XML_FILE, my_utils.SLCH_SYNGO_FILES)
dose_lookup = {}
exposure_lookup = {}
DAP_lookup = {}
for proc in procs:
for e in proc.events:
if e.is_valid() and e.Irradiation_Event_Type == "Fluoroscopy":
if not e.iiDiameter in dose_lookup:
dose_lookup[e.iiDiameter] = []
exposure_lookup[e.iiDiameter] = []
DAP_lookup[e.iiDiameter] = []
dose_lookup[e.iiDiameter].append(e.Dose_RP/e.Number_of_Pulses)
exposure_lookup[e.iiDiameter].append(e.Exposure/e.Number_of_Pulses)
DAP_lookup[e.iiDiameter].append(e.Dose_Area_Product/e.Number_of_Pulses)
return (dose_lookup, exposure_lookup, DAP_lookup)
def write_csv(lookup):
table = []
for diameter, exposures in lookup.iteritems():
row = [str(diameter)]
row = row + [e for e in exposures]
table.append(row)
table = my_utils.transposed(table)
with open("temp.csv",'wb') as f:
w = csv.writer(f)
w.writerows(table)
import matplotlib.pyplot as plt
def plot(lookup):
data = []
for iiDiameter in sorted(lookup.keys()):
data.append(lookup[iiDiameter])
plt.boxplot(data, sym='')
plt.setp(plt.gca(),'xticklabels',sorted(lookup.keys()))
plt.show()
def setup_DAP_axes():
plt.title("DAP vs. Magnification")
plt.xlabel("iiDiameter")
plt.ylabel("DAP (Gy*m^2)")
def setup_exposure_axes():
plt.title("Exposure vs. Magnification")
plt.xlabel("iiDiameter")
plt.ylabel("Exposure (uAs)")
def main():
dose_lookup,exposure_lookup,DAP_lookup = build_table()
plt.figure(1)
#setup_DAP_axes()
#plot(DAP_lookup)
setup_exposure_axes()
plot(exposure_lookup)
#write_csv(DAP_lookup)
if __name__ == "__main__":
main()
| 36.662162
| 104
| 0.563583
|
794f36736e145f45ce11d87afd1e4629f3557275
| 1,562
|
py
|
Python
|
ossim/mat/views.py
|
hrithikbhat019/OS-Simulation
|
76521b130c41aae3ba157b3f07e54f7db4ab374d
|
[
"MIT"
] | 8
|
2019-03-09T14:53:33.000Z
|
2021-06-06T11:22:59.000Z
|
ossim/mat/views.py
|
hrithikbhat019/OS-Simulation
|
76521b130c41aae3ba157b3f07e54f7db4ab374d
|
[
"MIT"
] | 1
|
2019-12-01T23:14:27.000Z
|
2019-12-01T23:14:27.000Z
|
ossim/mat/views.py
|
hrithikbhat019/OS-Simulation
|
76521b130c41aae3ba157b3f07e54f7db4ab374d
|
[
"MIT"
] | 10
|
2017-05-11T13:45:40.000Z
|
2020-03-28T06:25:50.000Z
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import loader
import json
def home_page(request):
return render(request, 'mat/index.html')
def mat_mft_get_data(request):
variables = ['total_memory', 'no_of_blocks']
submitted = True
context = {}
block_size = [0,1,2,3,4,5,6,7,8,0,1,1,1,1,1]
context['invalid_data'] = False
for x in variables:
submitted = request.POST.get(x,False)
if submitted is False:
print("Hello")
break
else:
try:
context[x] = int(request.POST.get(x))
except ValueError:
context['invalid_data'] = True
break
if context['invalid_data'] is False and submitted:
for i in range(1, context['no_of_blocks']+1):
block_size[i] = request.POST.get('block_size_' + str(i))
context['block_size'] = json.dumps(block_size)
if submitted is False or context['invalid_data'] is True:
return render(request, 'mat/mft/get_data.html', context)
else:
return render(request, 'mat/mft/show_demo.html', context)
def mat_mvt_get_data(request):
if request.POST:
context = {
'totalMemory' : request.POST['total_memory'],
'inputMemory' : request.POST['input_memory']
}
return render(request, 'mat/mvt/show_demo.html', context)
return render(request, 'mat/mvt/get_data.html')
| 31.877551
| 68
| 0.629962
|
794f36dd47d275d417360dca6fd88f55ddf2f57f
| 729
|
py
|
Python
|
account/migrations/0003_auto_20190816_1426.py
|
lilianwaweru/Bank
|
05ef2b86beec98d1cf31f3da168bbf32efaa1e3f
|
[
"MIT"
] | null | null | null |
account/migrations/0003_auto_20190816_1426.py
|
lilianwaweru/Bank
|
05ef2b86beec98d1cf31f3da168bbf32efaa1e3f
|
[
"MIT"
] | 7
|
2020-02-12T02:30:17.000Z
|
2021-10-06T02:49:08.000Z
|
account/migrations/0003_auto_20190816_1426.py
|
lilianwaweru/Bank
|
05ef2b86beec98d1cf31f3da168bbf32efaa1e3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-16 14:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_banks'),
]
operations = [
migrations.CreateModel(
name='Access',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transaction_access', models.CharField(max_length=70)),
],
),
migrations.AlterField(
model_name='history',
name='Name',
field=models.CharField(max_length=70),
),
]
| 26.035714
| 114
| 0.577503
|
794f378662d023a782bb07fe1d16cb6df422dc5e
| 35,835
|
py
|
Python
|
quantum_heom/quantum_system.py
|
jwa7/h.e.o.m-quantum
|
e8835ba56f59a7e70aacc988cdba541fa085cf7e
|
[
"MIT"
] | 8
|
2020-03-19T23:37:37.000Z
|
2021-11-17T11:35:20.000Z
|
quantum_heom/quantum_system.py
|
jwa7/quantum-HEOM
|
e8835ba56f59a7e70aacc988cdba541fa085cf7e
|
[
"MIT"
] | 3
|
2019-12-17T09:48:34.000Z
|
2021-05-17T05:17:27.000Z
|
quantum_heom/quantum_system.py
|
jwa7/quantum-HEOM
|
e8835ba56f59a7e70aacc988cdba541fa085cf7e
|
[
"MIT"
] | 3
|
2020-03-19T23:37:29.000Z
|
2021-07-11T06:33:34.000Z
|
"""Module for setting up a quantum system. Contains
the QuantumSystem class."""
from scipy import constants
import numpy as np
from quantum_heom import evolution as evo
from quantum_heom import hamiltonian as ham
from quantum_heom import heom
from quantum_heom import lindbladian as lind
from quantum_heom.bath import SPECTRAL_DENSITIES
from quantum_heom.evolution import (TEMP_DEP_MODELS,
DYNAMICS_MODELS)
from quantum_heom.hamiltonian import INTERACTION_MODELS
from quantum_heom.lindbladian import LINDBLAD_MODELS
class QuantumSystem:
"""
Class where the properties of the quantum system are defined.
Parameters
----------
sites : int (required)
The number of sites in the system.
interaction_model : str (required)
How to model the interactions between sites. Must be
one of ['nearest neighbour linear',
'nearest neighbour cyclic', 'FMO', 'spin-boson']. FMO is
only valid for 7-site systems and spin-boson only for 2.
dynamics_model : str (required)
The model used to describe the system dynamics. Must
be one of ['local dephasing lindblad', 'local thermalising
lindblad', 'global thermalising lindblad', 'HEOM'].
**settings
init_site_pop : list of int
The sites in which to place initial population. For
example, to place equal population in sites 1 and 6
(in a 7-site system), the user should pass [1, 6]. To
place twice as much initial population in 3 as in 4,
pass [3, 3, 4]. Default value is [1], which populates
only site 1.
alpha_beta : tuple of float
The values of alpha and beta (respectively) to use
in Hamiltonian construction. Alpha sets the value of
the site energies (diagonals), while beta sets the
strength of the interaction between sites. Default
value is (0., -15.5) in units of rad ps^-1.
epsi_delta : tuple of float
The (epsi, delta) values used to construct the system
Hamiltonian for the 'spin-boson' model. Must be passed in
units of rad ps^-1. epsi corresponds to the total energy of
the 2-site system, whilst delta corresponds to strength of
tunnelling between sites. Default is (0., -31.0).
time_interval : float
The time interval between timesteps at which the system
density matrix is evaluated, in units of femtoseconds.
Default time interval is 5 fs.
timesteps : int
The number of timesteps for which the time evolution
of the system is evaluated. Default value is 500.
temperature : float
The temperature of the thermal bath, in Kelvin. Default
value is 300 K.
spectral_density : str
The spectral density used to described the interaction
of the system with the bath modes. Must be either
'debye', 'ohmic', or 'renger-marcus'. Systems with HEOM
dynamics can currently only be described by Debye
spectral densities.
deph_rate : float
The dephasing rate constant of the system, in units of
rad ps^-1, used in the local dephasing lindblad model.
reorg_energy : float
The scale factor used to match thermalisation rates
between dynamics models in units of rad ps^-1. Default
value is 11.87 rad ps^-1. Used in thermalising Lindblad
models.
cutoff_freq : float
The cutoff frequency used in calculating the spectral
density, in rad ps^-1. Default value is 6.024 rad ps^-1.
mastsubara_terms : int
The number of matubara terms, i.e. the number of
exponentials to include in evaluation of the correlation
function.
Default value is 2.
matsubara_coeffs : np.ndarray
The matsubara coefficients c_k used in calculating the
spectral density for the HEOM approach. Must be in
order (largest -> smallest), where the nth coefficient
corresponds to the nth matsubara term. Default is None;
QuTiP's HEOMSolver automatically generates them.
matsubara_freqs : np.ndarray
The matsubara frequencies v_k used in calculating the
spectral density for the HEOM approach, in units of rad
ps^-1. Must be in order (smallest -> largest), where
the kth frequency corresponds to the kth matsubara term.
Default is None; QuTiP's HEOMSolver automatically
generates them.
bath_cutoff : int
The number of bath terms to include in the HEOM
evaluation of the system dynamics. Default value is 20.
"""
def __init__(self, sites, interaction_model, dynamics_model, **settings):
# SITES SETTINGS
self.sites = sites
if settings.get('init_site_pop') is not None:
self._init_site_pop = settings.get('init_site_pop')
else:
self._init_site_pop = [1] # initial excitation on site 1
# INTERACTIONS SETTINGS
self.interaction_model = interaction_model
if self.interaction_model.startswith('nearest'):
if settings.get('alpha_beta') is not None:
self.alpha_beta = settings.get('alpha_beta')
else:
self.alpha_beta = (20., -15.5) # rad ps^-1
if self.interaction_model == 'spin-boson':
if settings.get('epsi_delta') is not None:
self.epsi_delta = settings.get('epsi_delta')
else:
self.epsi_delta = (20., 40.) # rad ps^-1
# DYNAMICS SETTINGS
self.dynamics_model = dynamics_model
if settings.get('time_interval'):
self.time_interval = settings.get('time_interval') # seconds
else:
self.time_interval = 5. # 5 fs
if settings.get('timesteps'):
self.timesteps = settings.get('timesteps')
else:
self.timesteps = 500
# SETTINGS FOR LINDBLAD MODELS
if self.dynamics_model in LINDBLAD_MODELS:
if settings.get('deph_rate') is not None:
self.deph_rate = settings.get('deph_rate')
else:
self.deph_rate = 11 # rad ps^-1
# SETTINGS FOR TEMPERATURE DEPENDENT MODELS
if self.dynamics_model in TEMP_DEP_MODELS:
if settings.get('temperature') is not None:
self.temperature = settings.get('temperature')
else:
self.temperature = 300. # Kelvin
if settings.get('cutoff_freq') is not None:
self.cutoff_freq = settings.get('cutoff_freq')
else:
self.cutoff_freq = 6.024 # rad ps^-1
if settings.get('reorg_energy') is not None:
self.reorg_energy = settings.get('reorg_energy')
else:
self.reorg_energy = 1.391 # rad ps^-1
if settings.get('spectral_density') is not None:
self.spectral_density = settings.get('spectral_density')
else:
self.spectral_density = 'debye'
if self.spectral_density == 'ohmic':
if settings.get('ohmic_exponent') is not None:
self.ohmic_exponent = settings.get('ohmic_exponent')
else:
# Default to normal Ohmic, rather than sub- or super-Ohmic.
self.ohmic_exponent = 1.
# SETTINGS FOR HEOM (TEMP DEPENDENT)
if self.dynamics_model == 'HEOM':
if settings.get('matsubara_terms') is not None:
self.matsubara_terms = settings.get('matsubara_terms')
else:
self.matsubara_terms = 2
if settings.get('matsubara_coeffs') is not None:
self.matsubara_coeffs = settings.get('matsubara_coeffs')
else:
self.matsubara_coeffs = None
if settings.get('matsubara_freqs') is not None:
self.matsubara_freqs = settings.get('matsubara_freqs')
else:
self.matsubara_freqs = None
if settings.get('bath_cutoff') is not None:
self.bath_cutoff = settings.get('bath_cutoff')
else:
self.bath_cutoff = 20
# -------------------------------------------------------------------
# SITES + INITIAL DENSITY MATRIX FUNCTIONS
# -------------------------------------------------------------------
@property
def sites(self) -> int:
"""
Gets or sets the number of sites in the QuantumSystem
Raises
------
ValueError
If the number of sites set to a non-positive integer.
Returns
-------
int
The number of sites in the QuantumSystem
"""
return self._sites
@sites.setter
def sites(self, sites: int):
if sites < 1:
raise ValueError('Number of sites must be a positive integer')
self._sites = sites
@property
def init_site_pop(self) -> list:
"""
Get or set the site populations in the initial denisty
matrix. Must be passed as a list of integers which indicate
the sites of the system that should be equally populated.
Raises
------
ValueError
If invalid site numbers (i.e. less than 1 or greater
than the number of sites) are passed.
Returns
-------
list of int
The site numbers that will be initially and equally
populated.
"""
return self._init_site_pop
@init_site_pop.setter
def init_site_pop(self, init_site_pop: list):
for site in init_site_pop:
if site < 1 or site > self.sites:
raise ValueError('Invalid site number.')
self._init_site_pop = init_site_pop
@property
def initial_density_matrix(self) -> np.ndarray:
"""
Returns an N x N 2D array corresponding to the density
matrix of the system at time t=0, where N is the number
of sites. Site populations are split equally between the
sites specified in 'QuantumSystem.init_site_pop' setting.
Returns
-------
np.ndarray
N x N 2D array (where N is the number of sites)
for the initial density matrix.
"""
return evo.initial_density_matrix(self.sites, self.init_site_pop)
# -------------------------------------------------------------------
# INTERACTIONS FUNCTIONS
# -------------------------------------------------------------------
@property
def interaction_model(self) -> str:
"""
Gets or sets the model used for interaction between sites.
Raises
------
ValueError
If attempting to set to an invalid model.
Returns
-------
str
The interaction model being used.
"""
return self._interaction_model
@interaction_model.setter
def interaction_model(self, model: str):
if model not in INTERACTION_MODELS:
raise ValueError('Must choose an interaction model from '
+ str(INTERACTION_MODELS))
if model == 'FMO':
assert self.sites == 7, (
'The FMO model is only valid for 7-site systems.')
if model == 'spin-boson':
assert self.sites == 2, (
'The spin-boson model is only valid for 2-site systems.')
self._interaction_model = model
@property
def hamiltonian(self) -> np.ndarray:
"""
Builds an interaction Hamiltonian for the QuantumSystem,
in units of rad ps^-1. The FMO Hamiltonian is for 7-site
systems only, and has a form constructed using parameters
from Adolphs, J.; Renger, T. Biophysical Journal 2006, 91,
2778–2797. The spin-boson model is only applicable to 2-
site systems, and has the form:
.. math::
H_{sys} = \\frac{\\epsilon}{2} \\sigma_z
+ \\frac{\\Delta}{2} \\sigma_x
as shown in J. Chem. Phys. 144, 044110 (2016);
https://doi.org/10.1063/1.4940218. The nearest neighbour models
are applicable to any number of sites and are given by:
.. math ::
H_{sys} = \\alpha I + \\beta A
where A is the adjacency matrix as built in the method
adjacency_matrix().
Returns
-------
np.ndarray
An N x N 2D array that represents the interactions
between sites in the quantum system, where N is the
number of sites. In units of rad ps^-1.
"""
return ham.system_hamiltonian(self.sites, self.interaction_model,
self.alpha_beta, self.epsi_delta)
@property
def hamiltonian_superop(self) -> np.ndarray:
"""
Builds the Hamiltonian superoperator in rad ps^-1,
given by:
.. math::
H_{sup} = -i(H \\otimes I - I \\otimes H^{\\dagger})
Returns
-------
np.ndarray
The (N^2) x (N^2) 2D array representing the Hamiltonian
superoperator, in units of rad ps^-1.
"""
return ham.hamiltonian_superop(self.hamiltonian)
@property
def alpha_beta(self) -> tuple:
"""
Get or set the values of alpha and beta used to construct
the system Hamiltonian for 'nearest neighbour' interaction
models. Alpha sets the value of the site energies
(diagonals), assuming all site energies are equal, while
beta sets the strength of the interaction between sites
(off-diagonals), also assumed to be of equal strength for
sites that interact (according to the adjacency matrix used
in the nearest neighbour model).
Returns
-------
tuple of float
The values of alpha and beta (respectively) to use
in Hamiltonian construction.
"""
if self.interaction_model.startswith('nearest'):
return self._alpha_beta
@alpha_beta.setter
def alpha_beta(self, alpha_beta: tuple):
assert isinstance(alpha_beta, tuple), ('alpha_beta must be passed as'
' a tuple.')
assert len(alpha_beta) == 2, 'Must pass as 2 float values in a tuple.'
self._alpha_beta = alpha_beta
@property
def epsi_delta(self) -> tuple:
"""
Get/set the values of epsilon and delta used to construct
the 'spin-boson' system Hamiltonian for a 2-site system.
Epsilon sets the value of the total system energy (i.e. the
sum of the energies of site 1 and 2), while delta sets the
tunnelling strength between the 2 sites.
Returns
-------
tuple of float
The values of alpha and beta (respectively) to use
in Hamiltonian construction.
"""
if self.interaction_model == 'spin-boson':
return self._epsi_delta
@epsi_delta.setter
def epsi_delta(self, epsi_delta: tuple):
assert isinstance(epsi_delta, tuple), ('epsi_delta must be passed as'
' a tuple.')
assert len(epsi_delta) == 2, 'Must pass as 2 float values in a tuple.'
assert self.sites == 2, 'spin-boson model only valid for 2-site systems'
self._epsi_delta = epsi_delta
# -------------------------------------------------------------------
# DYNAMICS PROPERTIES
# -------------------------------------------------------------------
@property
def dynamics_model(self) -> str:
"""
Gets or sets the type of model used to describe the
dynamics of the quantum system. Currently only 'local
dephasing lindblad', 'global thermalising lindblad', 'local
thermalising lindblad' and 'HEOM' are implemented in
quantum_HEOM.
Raises
-----
ValueError
If trying to set the dynamics model to an invalid
option.
Returns
-------
str
The dynamics model being used.
"""
return self._dynamics_model
@dynamics_model.setter
def dynamics_model(self, model: str):
if model not in DYNAMICS_MODELS:
raise ValueError('Must choose an dynamics model from '
+ str(DYNAMICS_MODELS))
if model == 'HEOM':
assert self.sites == 2, (
'Currently HEOM can only be run for 2-site systems.')
assert self.interaction_model == 'spin-boson', (
'Currently only a spin-boson interaction model can be used in'
' in conjunction with HEOM dynamics.')
self._dynamics_model = model
@property
def equilibrium_state(self) -> np.ndarray:
"""
Returns the equilibirum density matrix of the system. For
systems described by thermalising models, this is the
themal equilibirum state, given by:
.. math::
\\rho^{(eq)}
= \\frac{e^{- H / k_B T}}{tr(e^{- H / k_B T})}
however for the 'local dephasing lindblad' model, this is
the maximally mixed state:
.. math::
\\rho_{mm}^{eq}
= \\frac{1}{N} \\sum_{i=1}^N \\ket{i} \\bra{i}
where N is the dimension (i.e. number of sites) of the
system. This also corresponds to the thermal equilibrium
state in the infinite temperature limit.
Returns
-------
np.ndarray
A 2D square density matrix for the system's equilibrium
state.
"""
return evo.equilibrium_state(self.dynamics_model, self.sites,
self.hamiltonian, self.temperature)
# -------------------------------------------------------------------
# TIME-EVOLUTION FUNCTIONS
# -------------------------------------------------------------------
@property
def time_interval(self) -> float:
"""
Gets or sets the time interval value used in evaluating
the density matrix evolution, in femtoseconds.
Returns
-------
float
The time interval being used, in femtoseconds.
"""
return self._time_interval # fs
@time_interval.setter
def time_interval(self, time_interval: float):
if isinstance(time_interval, int):
time_interval = float(time_interval)
assert isinstance(time_interval, float), ('time_interval must be'
' passed as a float.')
assert time_interval > 0., 'time_interval must be positive.'
self._time_interval = time_interval
@property
def timesteps(self) -> int:
"""
Gets or sets the number of timesteps over which the
evolution of the QuantumSystem's density matrix should
be evaluated.
Raises
------
ValueError
If the number of timesteps is being set to a non-
positive integer.
Returns
-------
int
The number of timesteps used in evaluation of the
QuantumSystem's evolution.
"""
return self._timesteps
@timesteps.setter
def timesteps(self, timesteps: int):
if timesteps:
assert isinstance(timesteps, int), 'Must pass timesteps as an int'
if timesteps <= 0:
raise ValueError('Number of timesteps must be a positive'
' integer')
self._timesteps = timesteps
@property
def time_evolution(self) -> np.ndarray:
"""
Evaluates the density operator of the system at n_steps
forward in time, spaced by time_interval.
Raises
------
AttributeError
If trying to access this property without having set
values for time_interval, timesteps, and deph_rate.
Returns
-------
evolution : np.ndarray
An array of length corresponding to the number of
timesteps the evolution is evaluated for. Each element
is a tuple of the form (time, matrix, squared, distance),
where 'time' is the time at which the density matrix
- 'matrix' - is evaluted, 'squared' is the trace of
'matrix' squared, and 'distance' is the trace distance
of 'matrix' from the system's equilibrium state.
"""
# LINDBLAD DYNAMICS
if self.dynamics_model in LINDBLAD_MODELS:
superop = self.hamiltonian_superop + self.lindbladian_superop
return evo.time_evo_lindblad(self.initial_density_matrix,
superop, # rad ps^-1
self.timesteps,
self.time_interval, # fs
self.dynamics_model,
self.hamiltonian, # rad ps^-1
self.temperature # Kelvin
)
# HEOM DYNAMICS
if self.dynamics_model == 'HEOM':
# Units of temperature must match units of Hamiltonian
# Quantity quantum_HEOM ----> QuTiP Conversion
# -------------------------------------------------------------
# hamiltonian: rad ps^-1 rad ps^-1 -
# time: fs ps * 1e-3
# temperature: K rad ps^-1 * k / (hbar * 1e12)
# coup_strength: rad ps^-1 rad ps^-1 / 2pi
# cutoff_freq: rad ps^-1 rad ps^-1 / 2pi
# planck: = 1.0
# boltzmann: = 1.0
# matsu coeffs: unitless unitless -
# matsu freqs: rad ps^-1 ps^-1 / 2pi
# Perform conversions
temperature = (self.temperature * 1e-12
* (constants.k / constants.hbar)) # K ---> rad ps^-1
tmp = evo.time_evo_heom(self.initial_density_matrix, # dimensionless
self.timesteps, # dimensionless
self.time_interval * 1e-3, # fs --> ps
self.hamiltonian, # rad ps^-1
self.coupling_op, # dimensionless
self.reorg_energy, # rad ps^-1
temperature, # rad ps^-1
self.bath_cutoff, # dimensionless
self.matsubara_terms, # dimensionless
self.cutoff_freq, # rad ps^-1
self.matsubara_coeffs, # dimensionless
self.matsubara_freqs # rad ps^-1
)
# Unpack the data, retrieving the evolution data, and setting
# the QuantumSystem's matsubara coefficients and frequencies
# to those returned by the function (as set by QuTiP's HEOMSolver).
evolution, self.matsubara_coeffs, self.matsubara_freqs = tmp
return evolution
# -------------------------------------------------------------------
# LINDBLAD-SPECIFIC PROPERTIES
# -------------------------------------------------------------------
@property
def deph_rate(self) -> float:
"""
Gets or sets the dephasing rate of the quantum system,
in units of rad ps^-1. Used in the local dephasing lindblad
model.
Returns
-------
float
The decay rate of the density matrix elements, in
units of rad ps^-1.
"""
if self.dynamics_model in LINDBLAD_MODELS:
return self._deph_rate
@deph_rate.setter
def deph_rate(self, deph_rate: float):
assert isinstance(deph_rate, (int, float)), (
'deph_rate must be passed as either an int or float')
if deph_rate < 0.:
raise ValueError('Dephasing rate must be a non-negative float'
' in units of rad s^-1.')
self._deph_rate = deph_rate
@property
def lindbladian_superop(self) -> np.ndarray:
"""
Builds the Lindbladian superoperator for the system, either
using the local dephasing, local thermalising, or global
thermalising lindblad description of the dynamics.
Returns
-------
np.ndarray
The (N^2) x (N^2) 2D array representing the Lindbladian
superoperator, in rad ps^-1.
"""
# Assumes any deph_rate, cutoff_freq, reorg_energy in rad ps^-1
if self.dynamics_model in LINDBLAD_MODELS:
return lind.lindbladian_superop(self.sites,
self.dynamics_model,
self.hamiltonian, # rad ps^-1
self.deph_rate, # rad ps^-1
self.cutoff_freq, # rad ps^-1
self.reorg_energy, # rad ps^-1
self.temperature, # Kelvin
self.spectral_density,
self.ohmic_exponent) # rad ps^-1
raise ValueError(
'Can only build a Lindbladian superoperator for systems defined'
' with Lindblad dynamics. Choose from ' + str(LINDBLAD_MODELS))
# -------------------------------------------------------------------
# HEOM-SPECIFIC PROPERTIES
# -------------------------------------------------------------------
@property
def matsubara_terms(self) -> int:
"""
Get or set the number of Matsubara terms to include in the
HEOM approach to solving the system dynamics.
Raises
------
ValueError
If being set to a non-positive integer.
Returns
-------
int
The number of Matsubara terms HEOM is evaluated for.
"""
if self.dynamics_model == 'HEOM':
return self._matsubara_terms
@matsubara_terms.setter
def matsubara_terms(self, terms: int):
if terms < 0:
raise ValueError('The number of Matsubara terms must be a positive'
' integer.')
self._matsubara_terms = terms
@property
def matsubara_coeffs(self) -> np.ndarray:
"""
Get or set the matsubara coefficients used in HEOM dynamics
Raises
------
ValueError
If the number of coefficients being set exceeds the
number of matsubara terms set.
Returns
-------
np.ndarray
An array of matsubara coefficients, in order,
corresponding to the first n matsubara terms.
"""
if self.dynamics_model == 'HEOM':
return self._matsubara_coeffs
@matsubara_coeffs.setter
def matsubara_coeffs(self, coeffs: np.ndarray):
try:
if len(coeffs) > self.matsubara_terms:
raise ValueError('The number of coefficients being set exceeds'
' the number of matsubara terms')
if isinstance(coeffs, list):
coeffs = np.array(coeffs)
check = [isinstance(i, (float, complex)) for i in coeffs]
assert (isinstance(coeffs, np.ndarray)
and check.count(True) == len(check)), (
'matsubara_coeffs must be passed as a np.ndarray'
' with all elements as floats.')
self._matsubara_coeffs = coeffs
except TypeError:
self._matsubara_coeffs = None
@property
def matsubara_freqs(self) -> np.ndarray:
"""
Get or set the matsubara frequencies used in HEOM dynamics,
in units of s^-1.
Raises
------
ValueError
If the number of frequencies being set exceeds the
number of matsubara terms set.
Returns
-------
np.ndarray
An array of matsubara frequencies, in order,
corresponding to the first n matsubara terms, in units
of s^-1.
"""
if self.dynamics_model == 'HEOM':
return self._matsubara_freqs
@matsubara_freqs.setter
def matsubara_freqs(self, freqs: np.ndarray):
try:
if len(freqs) > self.matsubara_terms:
raise ValueError('The number of frequencies being set exceeds'
' the number of matsubara terms')
if isinstance(freqs, list):
freqs = np.array(freqs)
check = [isinstance(i, (float, complex)) for i in freqs]
assert (isinstance(freqs, np.ndarray)
and check.count(True) == len(check)), (
'matsubara_freqs must be passed as a np.ndarray'
' with all elements as floats.')
self._matsubara_freqs = freqs
except TypeError:
self._matsubara_freqs = None
@property
def bath_cutoff(self) -> int:
"""
Get or set the cutoff for the number of bath terms included
in the HEOM evaluation of the system dynamics.
Raises
------
ValueError
If being set to a non-positive integer.
Returns
-------
int
The number of bath terms HEOM is evaluated for.
"""
if self.dynamics_model == 'HEOM':
return self._bath_cutoff
@bath_cutoff.setter
def bath_cutoff(self, bath_cutoff: int):
if bath_cutoff < 0:
raise ValueError('The number of bath terms must be a positive'
' integer.')
self._bath_cutoff = bath_cutoff
@property
def coupling_op(self) -> np.ndarray:
"""
Get the operator describing the coupling between the system
and bath modes, used in the HEOM model of the dynamics.
Returns
-------
np.ndarray of complex
2D square array of size N x N (where N is the number
of sites) that represents the coupling operator.
"""
return heom.system_bath_coupling_op(self.sites)
# -------------------------------------------------------------------
# BATH + THERMAL PROPERTIES
# -------------------------------------------------------------------
@property
def temperature(self) -> float:
"""
Get or set the temperature of the thermal bath in Kelvin.
Raises
------
ValueError
If the temperature is being set to a negative value.
Returns
-------
float
The temperature of the system, in Kelvin.
"""
if self.dynamics_model in TEMP_DEP_MODELS:
return self._temperature
@temperature.setter
def temperature(self, temperature: float):
if isinstance(temperature, int):
temperature = float(temperature)
if temperature <= 0. or not isinstance(temperature, float):
raise ValueError('Temperature must be a positive float value'
' in Kelvin.')
self._temperature = temperature
@property
def cutoff_freq(self) -> float:
"""
Get or set the cutoff frequency used in calculating the
spectral density, in units of rad ps^-1.
Raises
------
ValueError
If the cutoff frequency is being set to a non-positive
value.
Returns
-------
float
The cutoff frequency being used, in rad ps^-1.
"""
if self.dynamics_model in TEMP_DEP_MODELS:
return self._cutoff_freq
@cutoff_freq.setter
def cutoff_freq(self, cutoff_freq: float):
if isinstance(cutoff_freq, int):
cutoff_freq = float(cutoff_freq)
if cutoff_freq <= 0.:
raise ValueError('Cutoff frequency must be a positive float.')
self._cutoff_freq = cutoff_freq
@property
def reorg_energy(self) -> float:
"""
Get or set the scale factor used in scaling the spectral
density, in units of rad ps^-1.
Raises
------
ValueError
If the value being set is non-positive.
Returns
-------
float
The thermalisation scale factor being used, in units
of rad ps^-1.
"""
if self.dynamics_model in TEMP_DEP_MODELS:
return self._reorg_energy
@reorg_energy.setter
def reorg_energy(self, reorg_energy: float):
if isinstance(reorg_energy, int):
reorg_energy = float(reorg_energy)
assert isinstance(reorg_energy, float), (
'reorg_energy must be passed as a float')
if reorg_energy < 0.:
raise ValueError('Scale factor must be a non-negative float in rad'
' s^-1.')
self._reorg_energy = reorg_energy
@property
def spectral_density(self) -> str:
"""
Get or set the spectral density used to describe the
interaction of the system with bath modes.
Returns
-------
str
The spectral density beign used. Either 'debye',
'ohmic', or 'renger-marcus'
"""
if self.dynamics_model in TEMP_DEP_MODELS:
return self._spectral_density
@spectral_density.setter
def spectral_density(self, spectral_density: str):
assert spectral_density in SPECTRAL_DENSITIES, (
'Must choose a spectral density from ' + str(SPECTRAL_DENSITIES)
+ '. Other spectral densities not yet implemented in quantum_HEOM.')
if self.dynamics_model == 'HEOM' and spectral_density != 'debye':
raise NotImplementedError(
'Currently systems described by HEOM dynamics can only be'
' evaluated for Debye spectral densities.')
self._spectral_density = spectral_density
@property
def ohmic_exponent(self) -> float:
"""
Get or set the exponent used in calculation of the Ohmic
spectral density. Spectral density is described as Ohmic
if the exponent is equal to 1, sub-Ohmic if < 1, and
super-Ohmic is > 1.
Returns
-------
float
The exponent used in calculation of the Ohmic spectral
density.
Raises
------
ValueError
If trying to set the exponent to a non-postive float.
"""
if (self.spectral_density == 'ohmic'
and self.dynamics_model in TEMP_DEP_MODELS):
return self._ohmic_exponent
@ohmic_exponent.setter
def ohmic_exponent(self, exponent):
if exponent > 0.:
self._ohmic_exponent = exponent
| 35.656716
| 81
| 0.54804
|
794f378bfcc2685e0745c6a72bb3ad65073023ad
| 25,621
|
py
|
Python
|
quant/platform/okex_swap.py
|
yfjelley/thenextquant
|
5a2c4324ea390b513632ed2cc64d53314624e4ba
|
[
"MIT"
] | 2
|
2021-09-22T08:41:55.000Z
|
2021-11-05T01:45:27.000Z
|
quant/platform/okex_swap.py
|
mrganer/thenextquant
|
52fb22f5df20d43cb275a08adad81dc97f25a712
|
[
"MIT"
] | 1
|
2019-10-25T05:25:28.000Z
|
2019-10-25T05:25:28.000Z
|
quant/platform/okex_swap.py
|
mrganer/thenextquant
|
52fb22f5df20d43cb275a08adad81dc97f25a712
|
[
"MIT"
] | 4
|
2019-11-29T03:12:34.000Z
|
2021-09-19T02:59:29.000Z
|
# -*- coding:utf-8 -*-
"""
OKEx Swap Trade module
https://www.okex.me/docs/zh/
NOTE: Only Cross Margin Mode is supported in Trade module currently. Please change Margin Mode to `Cross`, not `Fixed`!
Author: HuangTao
Date: 2019/01/19
Email: huangtao@ifclover.com
"""
import time
import zlib
import json
import copy
import hmac
import base64
from urllib.parse import urljoin
from quant.error import Error
from quant.order import Order
from quant.utils import tools
from quant.utils import logger
from quant.tasks import SingleTask
from quant.position import Position
from quant.const import OKEX_SWAP
from quant.utils.websocket import Websocket
from quant.asset import Asset, AssetSubscribe
from quant.utils.http_client import AsyncHttpRequests
from quant.utils.decorator import async_method_locker
from quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL, ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET
from quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, \
ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED
__all__ = ("OKExSwapRestAPI", "OKExSwapTrade", )
class OKExSwapRestAPI:
""" OKEx Swap REST API client.
Attributes:
host: HTTP request host.
access_key: Account's ACCESS KEY.
secret_key: Account's SECRET KEY.
passphrase: API KEY Passphrase.
"""
def __init__(self, host, access_key, secret_key, passphrase):
"""initialize REST API client."""
self._host = host
self._access_key = access_key
self._secret_key = secret_key
self._passphrase = passphrase
async def get_user_account(self):
""" Get the perpetual swap account info of all tokens. Margin ratio set as 10,000 when users have no open
position.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
success, error = await self.request("GET", "/api/swap/v3/accounts", auth=True)
return success, error
async def get_position(self, instrument_id):
""" Get the information of holding positions of a contract.
Args:
instrument_id: Contract ID, e.g. BTC-USD-SWAP.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/swap/v3/{instrument_id}/position".format(instrument_id=instrument_id)
success, error = await self.request("GET", uri, auth=True)
return success, error
async def create_order(self, instrument_id, trade_type, price, size, match_price=0, order_type=0):
""" Create an order.
Args:
instrument_id: Contract ID, e.g. BTC-USD-SWAP.
trade_type: 1: open long, 2: open short, 3: close long, 4: close short.
price: Price of each contract.
size: The buying or selling quantity.
match_price: Order at best counter party price? (0: no, 1: yes), When posting orders at best bid price,
order_type can only be 0 (regular order).
order_type: Fill in number for parameter, 0: Normal limit order (Unfilled and 0 represent normal limit
order) 1: Post only, 2: Fill Or Kill, 3: Immediately Or Cancel.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/swap/v3/order"
body = {
"instrument_id": instrument_id,
"type": str(trade_type),
"price": price,
"size": size,
"match_price": match_price,
"order_type": order_type
}
success, error = await self.request("POST", uri, body=body, auth=True)
return success, error
async def revoke_order(self, instrument_id, order_id):
""" Cancelling an unfilled order.
Args:
instrument_id: Contract ID, e.g. BTC-USD-SWAP.
order_id: order ID.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/swap/v3/cancel_order/{instrument_id}/{order_id}".format(instrument_id=instrument_id,
order_id=order_id)
success, error = await self.request("POST", uri, auth=True)
if error:
return None, error
if not success["result"]:
return None, success
return success, None
async def revoke_orders(self, instrument_id, order_ids):
""" Cancelling multiple open orders with order_id,Maximum 10 orders can be cancelled at a time for each
trading pair.
Args:
instrument_id: Contract ID, e.g. BTC-USD-SWAP.
order_ids: order ID list.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
assert isinstance(order_ids, list)
if len(order_ids) > 10:
logger.warn("order id list too long! no more than 10!", caller=self)
uri = "/api/swap/v3/cancel_batch_orders/{instrument_id}".format(instrument_id=instrument_id)
body = {
"ids": order_ids
}
success, error = await self.request("POST", uri, body=body, auth=True)
if error:
return None, error
if not success["result"]:
return None, success
return success, None
async def get_order_info(self, instrument_id, order_id):
""" Get order details by order ID. Canceled unfilled orders will be kept in record for 2 hours only.
Args:
instrument_id: Contract ID, e.g. BTC-USD-SWAP.
order_id: order ID.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
uri = "/api/swap/v3/orders/{instrument_id}/{order_id}".format(instrument_id=instrument_id, order_id=order_id)
success, error = await self.request("GET", uri, auth=True)
return success, error
async def get_order_list(self, instrument_id, state, limit=100):
""" List your orders. This API can retrieve the latest 20000 entries of data this week.
Args:
instrument_id: Contract ID, e.g. BTC-USD-SWAP.
state: Order state for filter. ("-2": Failed, "-1": Cancelled, "0": Open , "1": Partially Filled,
"2": Fully Filled, "3": Submitting, "4": Cancelling, "6": Incomplete(open + partially filled),
"7": Complete(cancelled + fully filled)).
limit: Number of results per request. Maximum 100. (default 100)
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
TODO: Add args `from` & `to`.
"""
uri = "/api/swap/v3/orders/{instrument_id}".format(instrument_id=instrument_id)
params = {
"state": state,
"limit": limit
}
success, error = await self.request("GET", uri, params=params, auth=True)
return success, error
async def request(self, method, uri, params=None, body=None, headers=None, auth=False):
""" Do HTTP request.
Args:
method: HTTP request method. GET, POST, DELETE, PUT.
uri: HTTP request uri.
params: HTTP query params.
body: HTTP request body.
headers: HTTP request headers.
auth: If this request requires authentication.
Returns:
success: Success results, otherwise it's None.
error: Error information, otherwise it's None.
"""
if params:
query = "&".join(["{}={}".format(k, params[k]) for k in sorted(params.keys())])
uri += "?" + query
url = urljoin(self._host, uri)
# Add signature for authentication.
if auth:
timestamp = str(time.time()).split(".")[0] + "." + str(time.time()).split(".")[1][:3]
if body:
body = json.dumps(body)
else:
body = ""
message = str(timestamp) + str.upper(method) + uri + str(body)
mac = hmac.new(bytes(self._secret_key, encoding="utf8"), bytes(message, encoding="utf-8"),
digestmod="sha256")
d = mac.digest()
sign = base64.b64encode(d)
if not headers:
headers = {}
headers["Content-Type"] = "application/json"
headers["OK-ACCESS-KEY"] = self._access_key.encode().decode()
headers["OK-ACCESS-SIGN"] = sign.decode()
headers["OK-ACCESS-TIMESTAMP"] = str(timestamp)
headers["OK-ACCESS-PASSPHRASE"] = self._passphrase
_, success, error = await AsyncHttpRequests.fetch(method, url, body=body, headers=headers, timeout=10)
if error:
return None, error
if not isinstance(success, dict): # If response data is not dict format, convert it to json format.
success = json.loads(success)
return success, None
class OKExSwapTrade(Websocket):
""" OKEx Swap Trade module. You can initialize trade object with some attributes in kwargs.
Attributes:
account: Account name for this trade exchange.
strategy: What's name would you want to created for you strategy.
symbol: Symbol name for your trade.
host: HTTP request host. (default "https://www.okex.com")
wss: Websocket address. (default "wss://real.okex.com:8443")
access_key: Account's ACCESS KEY.
secret_key Account's SECRET KEY.
passphrase API KEY Passphrase.
asset_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `asset_update_callback` is like `async def on_asset_update_callback(asset: Asset): pass` and this
callback function will be executed asynchronous when received AssetEvent.
order_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `order_update_callback` is like `async def on_order_update_callback(order: Order): pass` and this
callback function will be executed asynchronous when some order state updated.
position_update_callback: You can use this param to specific a async callback function when you initializing Trade
object. `position_update_callback` is like `async def on_position_update_callback(order: Position): pass` and
this callback function will be executed asynchronous when some position state updated.
init_success_callback: You can use this param to specific a async callback function when you initializing Trade
object. `init_success_callback` is like `async def on_init_success_callback(success: bool, error: Error, **kwargs): pass`
and this callback function will be executed asynchronous after Trade module object initialized successfully.
"""
def __init__(self, **kwargs):
"""Initialize."""
e = None
if not kwargs.get("account"):
e = Error("param account miss")
if not kwargs.get("strategy"):
e = Error("param strategy miss")
if not kwargs.get("symbol"):
e = Error("param symbol miss")
if not kwargs.get("host"):
kwargs["host"] = "https://www.okex.com"
if not kwargs.get("wss"):
kwargs["wss"] = "wss://real.okex.com:8443"
if not kwargs.get("access_key"):
e = Error("param access_key miss")
if not kwargs.get("secret_key"):
e = Error("param secret_key miss")
if not kwargs.get("passphrase"):
e = Error("param passphrase miss")
if e:
logger.error(e, caller=self)
if kwargs.get("init_success_callback"):
SingleTask.run(kwargs["init_success_callback"], False, e)
return
self._account = kwargs["account"]
self._strategy = kwargs["strategy"]
self._platform = OKEX_SWAP
self._symbol = kwargs["symbol"]
self._host = kwargs["host"]
self._wss = kwargs["wss"]
self._access_key = kwargs["access_key"]
self._secret_key = kwargs["secret_key"]
self._passphrase = kwargs["passphrase"]
self._asset_update_callback = kwargs.get("asset_update_callback")
self._order_update_callback = kwargs.get("order_update_callback")
self._position_update_callback = kwargs.get("position_update_callback")
self._init_success_callback = kwargs.get("init_success_callback")
url = self._wss + "/ws/v3"
super(OKExSwapTrade, self).__init__(url, send_hb_interval=5)
self.heartbeat_msg = "ping"
self._assets = {} # Asset object. e.g. {"BTC": {"free": "1.1", "locked": "2.2", "total": "3.3"}, ... }
self._orders = {} # Order objects. e.g. {"order_no": Order, ... }
self._position = Position(self._platform, self._account, self._strategy, self._symbol)
# Subscribing our channels.
self._order_channel = "swap/order:{symbol}".format(symbol=self._symbol)
self._position_channel = "swap/position:{symbol}".format(symbol=self._symbol)
# If our channels that subscribed successfully.
self._subscribe_order_ok = False
self._subscribe_position_ok = False
# Initializing our REST API client.
self._rest_api = OKExSwapRestAPI(self._host, self._access_key, self._secret_key, self._passphrase)
# Subscribing our asset event.
if self._asset_update_callback:
AssetSubscribe(self._platform, self._account, self.on_event_asset_update)
self.initialize()
@property
def assets(self):
return copy.copy(self._assets)
@property
def orders(self):
return copy.copy(self._orders)
@property
def position(self):
return copy.copy(self._position)
@property
def rest_api(self):
return self._rest_api
async def connected_callback(self):
"""After websocket connection created successfully, we will send a message to server for authentication."""
timestamp = str(time.time()).split(".")[0] + "." + str(time.time()).split(".")[1][:3]
message = str(timestamp) + "GET" + "/users/self/verify"
mac = hmac.new(bytes(self._secret_key, encoding="utf8"), bytes(message, encoding="utf8"), digestmod="sha256")
d = mac.digest()
signature = base64.b64encode(d).decode()
data = {
"op": "login",
"args": [self._access_key, self._passphrase, timestamp, signature]
}
await self.ws.send_json(data)
@async_method_locker("OKExSwapTrade.process_binary.locker")
async def process_binary(self, raw):
""" Process binary message that received from websocket.
Args:
raw: Binary message received from websocket.
Returns:
None.
"""
decompress = zlib.decompressobj(-zlib.MAX_WBITS)
msg = decompress.decompress(raw)
msg += decompress.flush()
msg = msg.decode()
# Heartbeat message received.
if msg == "pong":
return
logger.debug("msg:", msg, caller=self)
msg = json.loads(msg)
# Authorization message received.
if msg.get("event") == "login":
if not msg.get("success"):
e = Error("Websocket connection authorized failed: {}".format(msg))
logger.error(e, caller=self)
SingleTask.run(self._init_success_callback, False, e)
return
logger.info("Websocket connection authorized successfully.", caller=self)
# Fetch orders from server. (open + partially filled)
result, error = await self._rest_api.get_order_list(self._symbol, 6)
if error:
e = Error("get open orders error: {}".format(error))
SingleTask.run(self._init_success_callback, False, e)
return
if len(result) > 100:
logger.warn("order length too long! (more than 100)", caller=self)
for order_info in result["order_info"]:
self._update_order(order_info)
# Fetch positions from server.
position, error = await self._rest_api.get_position(self._symbol)
if error:
e = Error("get position error: {}".format(error))
SingleTask.run(self._init_success_callback, False, e)
return
self._update_position(position)
# Subscribe order channel and position channel.
data = {
"op": "subscribe",
"args": [self._order_channel, self._position_channel]
}
await self.ws.send_json(data)
return
# Subscribe response message received.
if msg.get("event") == "subscribe":
if msg.get("channel") == self._order_channel:
self._subscribe_order_ok = True
if msg.get("channel") == self._position_channel:
self._subscribe_position_ok = True
if self._subscribe_order_ok and self._subscribe_position_ok:
SingleTask.run(self._init_success_callback, True, None)
return
# Order update message received.
if msg.get("table") == "swap/order":
for data in msg["data"]:
self._update_order(data)
return
# Position update message receive.
if msg.get("table") == "swap/position":
for data in msg["data"]:
self._update_position(data)
async def create_order(self, action, price, quantity, order_type=ORDER_TYPE_LIMIT, match_price=0, *args, **kwargs):
""" Create an order.
Args:
action: Trade direction, `BUY` or `SELL`.
price: Price of each contract.
quantity: The buying or selling quantity.
order_type: Order type, `MARKET` or `LIMIT`.
match_price: Order at best counter party price? (0: no, 1: yes), When posting orders at best bid price,
order_type can only be 0 (regular order).
order_type: Fill in number for parameter, 0: Normal limit order (Unfilled and 0 represent normal limit
order) 1: Post only, 2: Fill Or Kill, 3: Immediately Or Cancel.
Returns:
order_no: Order ID if created successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
if int(quantity) > 0:
if action == ORDER_ACTION_BUY:
trade_type = "1"
else:
trade_type = "3"
else:
if action == ORDER_ACTION_BUY:
trade_type = "4"
else:
trade_type = "2"
quantity = abs(int(quantity))
if order_type == ORDER_TYPE_LIMIT:
order_type_2 = 0
elif order_type == ORDER_TYPE_MARKET:
order_type_2 = 2
else:
return None, "order type error"
result, error = await self._rest_api.create_order(self._symbol, trade_type, price, quantity, match_price,
order_type_2)
if error:
return None, error
order_no = result["order_id"]
return order_no, None
async def revoke_order(self, *order_nos):
""" Revoke (an) order(s).
Args:
order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel
all orders for this symbol(initialized in Trade object). If you set 1 param, you can cancel an order.
If you set multiple param, you can cancel multiple orders. Do not set param length more than 100.
Returns:
Success or error, see bellow.
"""
# If len(order_nos) == 0, you will cancel all orders for this symbol(initialized in Trade object).
if len(order_nos) == 0:
result, error = await self._rest_api.get_order_list(self._symbol, 6)
if error:
return False, error
if len(result) > 100:
logger.warn("order length too long! (more than 100)", caller=self)
for order_info in result["order_info"]:
order_no = order_info["order_id"]
_, error = await self._rest_api.revoke_order(self._symbol, order_no)
if error:
return False, error
return True, None
# If len(order_nos) == 1, you will cancel an order.
if len(order_nos) == 1:
success, error = await self._rest_api.revoke_order(self._symbol, order_nos[0])
if error:
return order_nos[0], error
else:
return order_nos[0], None
# If len(order_nos) > 1, you will cancel multiple orders.
if len(order_nos) > 1:
success, error = [], []
for order_no in order_nos:
_, e = await self._rest_api.revoke_order(self._symbol, order_no)
if e:
error.append((order_no, e))
else:
success.append(order_no)
return success, error
async def get_open_order_nos(self):
""" Get open order id list.
Args:
None.
Returns:
order_nos: Open order id list, otherwise it's None.
error: Error information, otherwise it's None.
"""
success, error = await self._rest_api.get_order_list(self._symbol, 6)
if error:
return None, error
else:
if len(success) > 100:
logger.warn("order length too long! (more than 100)", caller=self)
order_nos = []
for order_info in success["order_info"]:
order_nos.append(order_info["order_id"])
return order_nos, None
def _update_order(self, order_info):
""" Order update.
Args:
order_info: Order information.
Returns:
None.
"""
order_no = str(order_info["order_id"])
state = order_info["state"]
remain = int(order_info["size"]) - int(order_info["filled_qty"])
ctime = tools.utctime_str_to_mts(order_info["timestamp"])
if state == "-2":
status = ORDER_STATUS_FAILED
elif state == "-1":
status = ORDER_STATUS_CANCELED
elif state == "0":
status = ORDER_STATUS_SUBMITTED
elif state == "1":
status = ORDER_STATUS_PARTIAL_FILLED
elif state == "2":
status = ORDER_STATUS_FILLED
else:
return None
order = self._orders.get(order_no)
if not order:
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": ORDER_ACTION_BUY if order_info["type"] in ["1", "4"] else ORDER_ACTION_SELL,
"symbol": self._symbol,
"price": order_info["price"],
"quantity": order_info["size"],
"trade_type": int(order_info["type"])
}
order = Order(**info)
order.remain = remain
order.status = status
order.avg_price = order_info["price_avg"]
order.ctime = ctime
order.utime = ctime
self._orders[order_no] = order
SingleTask.run(self._order_update_callback, copy.copy(order))
if status in [ORDER_STATUS_FAILED, ORDER_STATUS_CANCELED, ORDER_STATUS_FILLED]:
self._orders.pop(order_no)
def _update_position(self, position_info):
""" Position update.
Args:
position_info: Position information.
Returns:
None.
"""
if len(position_info["holding"]) == 0: # When the length of `holding` is 0, specific all the position is closed
self._position.update(0, 0, 0, 0, 0)
return
for item in position_info["holding"]:
if item["side"] == "long":
self._position.liquid_price = item["liquidation_price"]
self._position.long_quantity = int(item["position"])
self._position.long_avg_price = item["avg_cost"]
elif item["side"] == "short":
self._position.short_quantity = int(item["position"])
self._position.short_avg_price = item["avg_cost"]
else:
continue
self._position.utime = tools.utctime_str_to_mts(item["timestamp"])
SingleTask.run(self._position_update_callback, copy.copy(self.position))
async def on_event_asset_update(self, asset: Asset):
""" Asset event data callback.
Args:
asset: Asset object callback from EventCenter.
Returns:
None.
"""
self._assets = asset
SingleTask.run(self._asset_update_callback, asset)
| 40.348031
| 133
| 0.595878
|
794f3907a50c729a817f794d56f53e2b2e413047
| 97
|
py
|
Python
|
admin_object_actions/__init__.py
|
bjuretko/django-admin-object-actions
|
2c80ffcbb53b3d585f191d1fe662daf36fa7e204
|
[
"BSD-3-Clause"
] | null | null | null |
admin_object_actions/__init__.py
|
bjuretko/django-admin-object-actions
|
2c80ffcbb53b3d585f191d1fe662daf36fa7e204
|
[
"BSD-3-Clause"
] | null | null | null |
admin_object_actions/__init__.py
|
bjuretko/django-admin-object-actions
|
2c80ffcbb53b3d585f191d1fe662daf36fa7e204
|
[
"BSD-3-Clause"
] | null | null | null |
__version__ = '0.1.5'
default_app_config = 'admin_object_actions.apps.AdminObjectActionsConfig'
| 24.25
| 73
| 0.824742
|
794f398ae8272c803d342f2febc1e780945d475e
| 14,297
|
py
|
Python
|
scripts/imapServer.py
|
idiotic-geek/roundup-mirror-via-github-import
|
8b6956ff319092a18202882be41cf31860cfb787
|
[
"MIT"
] | 20
|
2016-06-16T15:34:21.000Z
|
2021-11-17T10:02:09.000Z
|
scripts/imapServer.py
|
idiotic-geek/roundup-mirror-via-github-import
|
8b6956ff319092a18202882be41cf31860cfb787
|
[
"MIT"
] | 2
|
2018-12-02T01:32:22.000Z
|
2021-12-09T20:30:04.000Z
|
scripts/imapServer.py
|
idiotic-geek/roundup-mirror-via-github-import
|
8b6956ff319092a18202882be41cf31860cfb787
|
[
"MIT"
] | 8
|
2016-12-08T09:21:05.000Z
|
2021-02-04T05:13:14.000Z
|
#!/usr/bin/env python
"""\
This script is a wrapper around the mailgw.py script that exists in roundup.
It runs as service instead of running as a one-time shot.
It also connects to a secure IMAP server. The main reasons for this script are:
1) The roundup-mailgw script isn't designed to run as a server. It
expects that you either run it by hand, and enter the password each
time, or you supply the password on the command line. I prefer to
run a server that I initialize with the password, and then it just
runs. I don't want to have to pass it on the command line, so
running through crontab isn't a possibility. (This wouldn't be a
problem on a local machine running through a mailspool.)
2) mailgw.py somehow screws up SSL support so IMAP4_SSL doesn't work. So
hopefully running that work outside of the mailgw will allow it to work.
3) I wanted to be able to check multiple projects at the same time.
roundup-mailgw is only for 1 mailbox and 1 project.
*TODO*:
For the first round, the program spawns a new roundup-mailgw for
each imap message that it finds and pipes the result in. In the
future it might be more practical to actually include the roundup
files and run the appropriate commands using python.
*TODO*:
Look into supporting a logfile instead of using 2>/logfile
*TODO*:
Add an option for changing the uid/gid of the running process.
"""
from __future__ import print_function
import getpass
import logging
import imaplib
import optparse
import os
import re
import time
from roundup.anypy.my_input import my_input
logging.basicConfig()
log = logging.getLogger('roundup.IMAPServer')
version = '0.1.2'
class RoundupMailbox:
"""This contains all the info about each mailbox.
Username, Password, server, security, roundup database
"""
def __init__(self, dbhome='', username=None, password=None, mailbox=None
, server=None, protocol='imaps'):
self.username = username
self.password = password
self.mailbox = mailbox
self.server = server
self.protocol = protocol
self.dbhome = dbhome
try:
if not self.dbhome:
self.dbhome = my_input('Tracker home: ')
if not os.path.exists(self.dbhome):
raise ValueError('Invalid home address: ' \
'directory "%s" does not exist.' % self.dbhome)
if not self.server:
self.server = my_input('Server: ')
if not self.server:
raise ValueError('No Servername supplied')
protocol = my_input('protocol [imaps]? ')
self.protocol = protocol
if not self.username:
self.username = my_input('Username: ')
if not self.username:
raise ValueError('Invalid Username')
if not self.password:
print('For server %s, user %s' % (self.server, self.username))
self.password = getpass.getpass()
# password can be empty because it could be superceeded
# by a later entry
#if self.mailbox is None:
# self.mailbox = my_input('Mailbox [INBOX]: ')
# # We allow an empty mailbox because that will
# # select the INBOX, whatever it is called
except (KeyboardInterrupt, EOFError):
raise ValueError('Canceled by User')
def __str__(self):
return 'Mailbox{ server:%(server)s, protocol:%(protocol)s, ' \
'username:%(username)s, mailbox:%(mailbox)s, ' \
'dbhome:%(dbhome)s }' % self.__dict__
# [als] class name is misleading. this is imap client, not imap server
class IMAPServer:
"""IMAP mail gatherer.
This class runs as a server process. It is configured with a list of
mailboxes to connect to, along with the roundup database directories
that correspond with each email address. It then connects to each
mailbox at a specified interval, and if there are new messages it
reads them, and sends the result to the roundup.mailgw.
*TODO*:
Try to be smart about how you access the mailboxes so that you can
connect once, and access multiple mailboxes and possibly multiple
usernames.
*NOTE*:
This assumes that if you are using the same user on the same
server, you are using the same password. (the last one supplied is
used.) Empty passwords are ignored. Only the last protocol
supplied is used.
"""
def __init__(self, pidfile=None, delay=5, daemon=False):
#This is sorted by servername, then username, then mailboxes
self.mailboxes = {}
self.delay = float(delay)
self.pidfile = pidfile
self.daemon = daemon
def setDelay(self, delay):
self.delay = delay
def addMailbox(self, mailbox):
""" The linkage is as follows:
servers -- users - mailbox:dbhome
So there can be multiple servers, each with multiple users.
Each username can be associated with multiple mailboxes.
each mailbox is associated with 1 database home
"""
log.info('Adding mailbox %s', mailbox)
if mailbox.server not in self.mailboxes:
self.mailboxes[mailbox.server] = {'protocol':'imaps', 'users':{}}
server = self.mailboxes[mailbox.server]
if mailbox.protocol:
server['protocol'] = mailbox.protocol
if mailbox.username not in server['users']:
server['users'][mailbox.username] = {'password':'', 'mailboxes':{}}
user = server['users'][mailbox.username]
if mailbox.password:
user['password'] = mailbox.password
if mailbox.mailbox in user['mailboxes']:
raise ValueError('Mailbox is already defined')
user['mailboxes'][mailbox.mailbox] = mailbox.dbhome
def _process(self, message, dbhome):
"""Actually process one of the email messages"""
child = os.popen('roundup-mailgw %s' % dbhome, 'wb')
child.write(message)
child.close()
#print message
def _getMessages(self, serv, count, dbhome):
"""This assumes that you currently have a mailbox open, and want to
process all messages that are inside.
"""
for n in range(1, count+1):
(t, data) = serv.fetch(n, '(RFC822)')
if t == 'OK':
self._process(data[0][1], dbhome)
serv.store(n, '+FLAGS', r'(\Deleted)')
def checkBoxes(self):
"""This actually goes out and does all the checking.
Returns False if there were any errors, otherwise returns true.
"""
noErrors = True
for server in self.mailboxes:
log.info('Connecting to server: %s', server)
s_vals = self.mailboxes[server]
try:
for user in s_vals['users']:
u_vals = s_vals['users'][user]
# TODO: As near as I can tell, you can only
# login with 1 username for each connection to a server.
protocol = s_vals['protocol'].lower()
if protocol == 'imaps':
serv = imaplib.IMAP4_SSL(server)
elif protocol == 'imap':
serv = imaplib.IMAP4(server)
else:
raise ValueError('Unknown protocol %s' % protocol)
password = u_vals['password']
try:
log.info('Connecting as user: %s', user)
serv.login(user, password)
for mbox in u_vals['mailboxes']:
dbhome = u_vals['mailboxes'][mbox]
log.info('Using mailbox: %s, home: %s',
mbox, dbhome)
#access a specific mailbox
if mbox:
(t, data) = serv.select(mbox)
else:
# Select the default mailbox (INBOX)
(t, data) = serv.select()
try:
nMessages = int(data[0])
except ValueError:
nMessages = 0
log.info('Found %s messages', nMessages)
if nMessages:
self._getMessages(serv, nMessages, dbhome)
serv.expunge()
# We are done with this mailbox
serv.close()
except:
log.exception('Exception with server %s user %s',
server, user)
noErrors = False
serv.logout()
serv.shutdown()
del serv
except:
log.exception('Exception while connecting to %s', server)
noErrors = False
return noErrors
def makeDaemon(self):
"""Turn this process into a daemon.
- make our parent PID 1
Write our new PID to the pidfile.
From A.M. Kuuchling (possibly originally Greg Ward) with
modification from Oren Tirosh, and finally a small mod from me.
Originally taken from roundup.scripts.roundup_server.py
"""
log.info('Running as Daemon')
# Fork once
if os.fork() != 0:
os._exit(0)
# Create new session
os.setsid()
# Second fork to force PPID=1
pid = os.fork()
if pid:
if self.pidfile:
pidfile = open(self.pidfile, 'w')
pidfile.write(str(pid))
pidfile.close()
os._exit(0)
def run(self):
"""Run email gathering daemon.
This spawns itself as a daemon, and then runs continually, just
sleeping inbetween checks. It is recommended that you run
checkBoxes once first before you select run. That way you can
know if there were any failures.
"""
if self.daemon:
self.makeDaemon()
while True:
time.sleep(self.delay * 60.0)
log.info('Time: %s', time.strftime('%Y-%m-%d %H:%M:%S'))
self.checkBoxes()
def getItems(s):
"""Parse a string looking for userame@server"""
myRE = re.compile(
r'((?P<protocol>[^:]+)://)?'#You can supply a protocol if you like
r'(' #The username part is optional
r'(?P<username>[^:]+)' #You can supply the password as
r'(:(?P<password>.+))?' #username:password@server
r'@)?'
r'(?P<server>[^/]+)'
r'(/(?P<mailbox>.+))?$'
)
m = myRE.match(s)
if m:
return m.groupdict()
else:
return None
def main():
"""This is what is called if run at the prompt"""
parser = optparse.OptionParser(
version=('%prog ' + version),
usage="""usage: %prog [options] (home server)...
So each entry has a home, and then the server configuration. Home is just
a path to the roundup issue tracker. The server is something of the form:
imaps://user:password@server/mailbox
If you don't supply the protocol, imaps is assumed. Without user or
password, you will be prompted for them. The server must be supplied.
Without mailbox the INBOX is used.
Examples:
%prog /home/roundup/trackers/test imaps://test@imap.example.com/test
%prog /home/roundup/trackers/test imap.example.com \
/home/roundup/trackers/test2 imap.example.com/test2
"""
)
parser.add_option('-d', '--delay', dest='delay', type='float',
metavar='<sec>', default=5,
help="Set the delay between checks in minutes. (default 5)"
)
parser.add_option('-p', '--pid-file', dest='pidfile',
metavar='<file>', default=None,
help="The pid of the server process will be written to <file>"
)
parser.add_option('-n', '--no-daemon', dest='daemon',
action='store_false', default=True,
help="Do not fork into the background after running the first check."
)
parser.add_option('-v', '--verbose', dest='verbose',
action='store_const', const=logging.INFO,
help="Be more verbose in letting you know what is going on."
" Enables informational messages."
)
parser.add_option('-V', '--very-verbose', dest='verbose',
action='store_const', const=logging.DEBUG,
help="Be very verbose in letting you know what is going on."
" Enables debugging messages."
)
parser.add_option('-q', '--quiet', dest='verbose',
action='store_const', const=logging.ERROR,
help="Be less verbose. Ignores warnings, only prints errors."
)
parser.add_option('-Q', '--very-quiet', dest='verbose',
action='store_const', const=logging.CRITICAL,
help="Be much less verbose. Ignores warnings and errors."
" Only print CRITICAL messages."
)
(opts, args) = parser.parse_args()
if (len(args) == 0) or (len(args) % 2 == 1):
parser.error('Invalid number of arguments. '
'Each site needs a home and a server.')
if opts.verbose == None:
opts.verbose = logging.WARNING
log.setLevel(opts.verbose)
myServer = IMAPServer(delay=opts.delay, pidfile=opts.pidfile,
daemon=opts.daemon)
for i in range(0,len(args),2):
home = args[i]
server = args[i+1]
if not os.path.exists(home):
parser.error('Home: "%s" does not exist' % home)
info = getItems(server)
if not info:
parser.error('Invalid server string: "%s"' % server)
myServer.addMailbox(
RoundupMailbox(dbhome=home, mailbox=info['mailbox']
, username=info['username'], password=info['password']
, server=info['server'], protocol=info['protocol']
)
)
if myServer.checkBoxes():
myServer.run()
if __name__ == '__main__':
main()
# vim: et ft=python si sts=4 sw=4
| 36.658974
| 79
| 0.576345
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.